From a15305e60e1a1d05b1e3b8525d43438f20b00238 Mon Sep 17 00:00:00 2001 From: PR Bot Date: Fri, 13 Mar 2026 07:17:43 +0800 Subject: [PATCH 1/4] feat: add MiniMax as a new LLM provider Add MiniMax (https://platform.minimax.io) as a new LLM provider with OpenAI-compatible API support. Changes: - Add MiniMax LLM provider class extending OpenAI with temperature clamping (must be in (0, 1]) and response_format removal - Register provider in LLMClasses, openai-adapters, and config-types - Add model info for MiniMax-M2.5 and MiniMax-M2.5-highspeed (204K context, 192K max output) - Add GUI model selection entries and provider configuration - Add provider documentation page --- core/llm/llms/MiniMax.ts | 40 ++++++++++++++ core/llm/llms/index.ts | 2 + .../model-providers/more/minimax.mdx | 53 +++++++++++++++++++ docs/customize/model-providers/overview.mdx | 1 + gui/src/pages/AddNewModel/configs/models.ts | 24 +++++++++ .../pages/AddNewModel/configs/providers.ts | 30 +++++++++++ packages/config-types/src/index.ts | 1 + packages/llm-info/src/index.ts | 2 + packages/llm-info/src/providers/minimax.ts | 28 ++++++++++ packages/openai-adapters/src/index.ts | 2 + 10 files changed, 183 insertions(+) create mode 100644 core/llm/llms/MiniMax.ts create mode 100644 docs/customize/model-providers/more/minimax.mdx create mode 100644 packages/llm-info/src/providers/minimax.ts diff --git a/core/llm/llms/MiniMax.ts b/core/llm/llms/MiniMax.ts new file mode 100644 index 00000000000..c96200a53ca --- /dev/null +++ b/core/llm/llms/MiniMax.ts @@ -0,0 +1,40 @@ +import { ChatMessage, CompletionOptions, LLMOptions } from "../../index.js"; + +import OpenAI from "./OpenAI.js"; + +class MiniMax extends OpenAI { + static providerName = "minimax"; + static defaultOptions: Partial = { + apiBase: "https://api.minimax.io/v1/", + model: "MiniMax-M2.5", + useLegacyCompletionsEndpoint: false, + }; + + protected _convertArgs( + options: CompletionOptions, + messages: ChatMessage[], + ) { + const finalOptions = super._convertArgs(options, messages); + + // MiniMax requires temperature in (0.0, 1.0] — zero is rejected + if ( + finalOptions.temperature !== undefined && + finalOptions.temperature !== null + ) { + if (finalOptions.temperature <= 0) { + finalOptions.temperature = 0.01; + } else if (finalOptions.temperature > 1) { + finalOptions.temperature = 1.0; + } + } + + // MiniMax does not support response_format + if ((finalOptions as any).response_format) { + delete (finalOptions as any).response_format; + } + + return finalOptions; + } +} + +export default MiniMax; diff --git a/core/llm/llms/index.ts b/core/llm/llms/index.ts index 04f58e393de..a0711f1fbdd 100644 --- a/core/llm/llms/index.ts +++ b/core/llm/llms/index.ts @@ -38,6 +38,7 @@ import Lemonade from "./Lemonade"; import LMStudio from "./LMStudio"; import Mistral from "./Mistral"; import Mimo from "./Mimo"; +import MiniMax from "./MiniMax"; import MockLLM from "./Mock"; import Moonshot from "./Moonshot"; import Msty from "./Msty"; @@ -92,6 +93,7 @@ export const LLMClasses = [ LMStudio, Mistral, Mimo, + MiniMax, Bedrock, BedrockImport, SageMaker, diff --git a/docs/customize/model-providers/more/minimax.mdx b/docs/customize/model-providers/more/minimax.mdx new file mode 100644 index 00000000000..4b47dd96585 --- /dev/null +++ b/docs/customize/model-providers/more/minimax.mdx @@ -0,0 +1,53 @@ +--- +title: "How to Configure MiniMax with Continue" +sidebarTitle: "MiniMax" +--- + + + Get your API key from the [MiniMax Platform](https://platform.minimax.io) + + +## Configuration + + + + ```yaml title="config.yaml" + name: My Config + version: 0.0.1 + schema: v1 + + models: + - name: MiniMax M2.5 + provider: minimax + model: MiniMax-M2.5 + apiKey: + ``` + + + ```json title="config.json" + { + "models": [ + { + "title": "MiniMax M2.5", + "provider": "minimax", + "model": "MiniMax-M2.5", + "apiKey": "" + } + ] + } + ``` + + + +## Available Models + +| Model | Description | +| :---- | :---------- | +| `MiniMax-M2.5` | Peak performance with ultimate value. 204K context window. | +| `MiniMax-M2.5-highspeed` | Same performance, faster and more agile. 204K context window. | + +## Notes + +- MiniMax uses an OpenAI-compatible API at `https://api.minimax.io/v1` +- Set the `MINIMAX_API_KEY` environment variable or configure `apiKey` in your config +- For users in China, set `apiBase` to `https://api.minimaxi.com/v1/` diff --git a/docs/customize/model-providers/overview.mdx b/docs/customize/model-providers/overview.mdx index 4bbf1c0dc60..fcd99be669a 100644 --- a/docs/customize/model-providers/overview.mdx +++ b/docs/customize/model-providers/overview.mdx @@ -38,6 +38,7 @@ Beyond the top-level providers, Continue supports many other options: | [Cohere](/customize/model-providers/more/cohere) | Models specialized for semantic search and text generation | | [NVIDIA](/customize/model-providers/more/nvidia) | GPU-accelerated model hosting | | [Cloudflare](/customize/model-providers/more/cloudflare) | Edge-based AI inference services | +| [MiniMax](/customize/model-providers/more/minimax) | High-performance models with 200K+ context window | ### Local Model Options diff --git a/gui/src/pages/AddNewModel/configs/models.ts b/gui/src/pages/AddNewModel/configs/models.ts index d15db3f3d1d..52028bd965a 100644 --- a/gui/src/pages/AddNewModel/configs/models.ts +++ b/gui/src/pages/AddNewModel/configs/models.ts @@ -2688,6 +2688,30 @@ export const models: { [key: string]: ModelPackage } = { providerOptions: ["sambanova"], isOpenSource: true, }, + minimaxM25: { + title: "MiniMax M2.5", + description: + "Peak performance with ultimate value. Excels at complex reasoning, code generation, and multi-step tasks with a 204K context window.", + params: { + title: "MiniMax M2.5", + model: "MiniMax-M2.5", + contextLength: 204_800, + }, + providerOptions: ["minimax"], + isOpenSource: false, + }, + minimaxM25Highspeed: { + title: "MiniMax M2.5 Highspeed", + description: + "Same performance as M2.5, faster and more agile for latency-sensitive tasks with a 204K context window.", + params: { + title: "MiniMax M2.5 Highspeed", + model: "MiniMax-M2.5-highspeed", + contextLength: 204_800, + }, + providerOptions: ["minimax"], + isOpenSource: false, + }, AUTODETECT: { title: "Autodetect", description: diff --git a/gui/src/pages/AddNewModel/configs/providers.ts b/gui/src/pages/AddNewModel/configs/providers.ts index 5dfb7220b14..348052e7dc0 100644 --- a/gui/src/pages/AddNewModel/configs/providers.ts +++ b/gui/src/pages/AddNewModel/configs/providers.ts @@ -579,6 +579,36 @@ Select the \`GPT-4o\` model below to complete your provider configuration, but n ], apiKeyUrl: "https://console.groq.com/keys", }, + minimax: { + title: "MiniMax", + provider: "minimax", + description: + "MiniMax offers high-performance models with 200K+ context windows at competitive pricing.", + longDescription: + "To get started with MiniMax, obtain an API key from the [MiniMax Platform](https://platform.minimax.io).", + tags: [ModelProviderTags.RequiresApiKey], + collectInputFor: [ + { + inputType: "text", + key: "apiKey", + label: "API Key", + placeholder: "Enter your MiniMax API key", + required: true, + }, + ], + packages: [ + models.minimaxM25, + models.minimaxM25Highspeed, + { + ...models.AUTODETECT, + params: { + ...models.AUTODETECT.params, + title: "MiniMax", + }, + }, + ], + apiKeyUrl: "https://platform.minimax.io", + }, deepseek: { title: "DeepSeek", provider: "deepseek", diff --git a/packages/config-types/src/index.ts b/packages/config-types/src/index.ts index a22bb00bb0b..8561500e662 100644 --- a/packages/config-types/src/index.ts +++ b/packages/config-types/src/index.ts @@ -61,6 +61,7 @@ export const modelDescriptionSchema = z.object({ "nebius", "scaleway", "watsonx", + "minimax", ]), model: z.string(), apiKey: z.string().optional(), diff --git a/packages/llm-info/src/index.ts b/packages/llm-info/src/index.ts index 52ca9f211cb..066b39c0ce2 100644 --- a/packages/llm-info/src/index.ts +++ b/packages/llm-info/src/index.ts @@ -4,6 +4,7 @@ import { Bedrock } from "./providers/bedrock.js"; import { Cohere } from "./providers/cohere.js"; import { CometAPI } from "./providers/cometapi.js"; import { Gemini } from "./providers/gemini.js"; +import { MiniMax } from "./providers/minimax.js"; import { Mistral } from "./providers/mistral.js"; import { Ollama } from "./providers/ollama.js"; import { OpenAi } from "./providers/openai.js"; @@ -25,6 +26,7 @@ export const allModelProviders: ModelProvider[] = [ Bedrock, Cohere, CometAPI, + MiniMax, xAI, zAI, ]; diff --git a/packages/llm-info/src/providers/minimax.ts b/packages/llm-info/src/providers/minimax.ts new file mode 100644 index 00000000000..ee157e8a86e --- /dev/null +++ b/packages/llm-info/src/providers/minimax.ts @@ -0,0 +1,28 @@ +import { ModelProvider } from "../types.js"; + +export const MiniMax: ModelProvider = { + models: [ + { + model: "MiniMax-M2.5", + displayName: "MiniMax M2.5", + contextLength: 204800, + maxCompletionTokens: 192000, + description: + "Peak performance with ultimate value. Excels at complex reasoning, code generation, and multi-step tasks.", + regex: /MiniMax-M2\.5$/i, + recommendedFor: ["chat"], + }, + { + model: "MiniMax-M2.5-highspeed", + displayName: "MiniMax M2.5 Highspeed", + contextLength: 204800, + maxCompletionTokens: 192000, + description: + "Same performance as M2.5, faster and more agile for latency-sensitive tasks.", + regex: /MiniMax-M2\.5-highspeed/i, + recommendedFor: ["chat"], + }, + ], + id: "minimax", + displayName: "MiniMax", +}; diff --git a/packages/openai-adapters/src/index.ts b/packages/openai-adapters/src/index.ts index a00b95b81f7..50c937946b5 100644 --- a/packages/openai-adapters/src/index.ts +++ b/packages/openai-adapters/src/index.ts @@ -141,6 +141,8 @@ export function constructLlmApi(config: LLMConfig): BaseLlmApi | undefined { return openAICompatible("http://localhost:8000/v1/", config); case "groq": return openAICompatible("https://api.groq.com/openai/v1/", config); + case "minimax": + return openAICompatible("https://api.minimax.io/v1/", config); case "sambanova": return openAICompatible("https://api.sambanova.ai/v1/", config); case "text-gen-webui": From 8104b5b5175e879d01056680fc33466dad08539c Mon Sep 17 00:00:00 2001 From: octo-patch Date: Sat, 14 Mar 2026 14:16:54 +0800 Subject: [PATCH 2/4] fix: use dedicated MiniMaxApi adapter instead of generic OpenAIApi The minimax provider was wired to the generic OpenAIApi via openAICompatible(), which skips MiniMax-specific request fixes. This creates a dedicated MiniMaxApi adapter class that overrides modifyChatBody to apply temperature clamping (MiniMax requires temperature in (0.0, 1.0]) and response_format removal, matching the adaptations already present in core/llm/llms/MiniMax.ts. Co-Authored-By: Claude Opus 4.6 --- packages/openai-adapters/src/apis/MiniMax.ts | 37 ++++++++++++++++++++ packages/openai-adapters/src/index.ts | 3 +- packages/openai-adapters/src/types.ts | 6 ++++ 3 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 packages/openai-adapters/src/apis/MiniMax.ts diff --git a/packages/openai-adapters/src/apis/MiniMax.ts b/packages/openai-adapters/src/apis/MiniMax.ts new file mode 100644 index 00000000000..e13f65dcf3d --- /dev/null +++ b/packages/openai-adapters/src/apis/MiniMax.ts @@ -0,0 +1,37 @@ +import { + ChatCompletionCreateParams, +} from "openai/resources/index"; +import { MiniMaxConfig } from "../types.js"; +import { OpenAIApi } from "./OpenAI.js"; + +export const MINIMAX_API_BASE = "https://api.minimax.io/v1/"; + +export class MiniMaxApi extends OpenAIApi { + constructor(config: MiniMaxConfig) { + super({ + ...config, + provider: "openai", + apiBase: config.apiBase ?? MINIMAX_API_BASE, + }); + } + + modifyChatBody(body: T): T { + body = super.modifyChatBody(body); + + // MiniMax requires temperature in (0.0, 1.0] — zero is rejected + if (body.temperature !== undefined && body.temperature !== null) { + if (body.temperature <= 0) { + body.temperature = 0.01; + } else if (body.temperature > 1) { + body.temperature = 1.0; + } + } + + // MiniMax does not support response_format + if ((body as any).response_format) { + delete (body as any).response_format; + } + + return body; + } +} diff --git a/packages/openai-adapters/src/index.ts b/packages/openai-adapters/src/index.ts index 50c937946b5..760f92ef7e4 100644 --- a/packages/openai-adapters/src/index.ts +++ b/packages/openai-adapters/src/index.ts @@ -13,6 +13,7 @@ import { GeminiApi } from "./apis/Gemini.js"; import { InceptionApi } from "./apis/Inception.js"; import { JinaApi } from "./apis/Jina.js"; import { LlamastackApi } from "./apis/LlamaStack.js"; +import { MiniMaxApi } from "./apis/MiniMax.js"; import { MockApi } from "./apis/Mock.js"; import { MoonshotApi } from "./apis/Moonshot.js"; import { OpenAIApi } from "./apis/OpenAI.js"; @@ -142,7 +143,7 @@ export function constructLlmApi(config: LLMConfig): BaseLlmApi | undefined { case "groq": return openAICompatible("https://api.groq.com/openai/v1/", config); case "minimax": - return openAICompatible("https://api.minimax.io/v1/", config); + return new MiniMaxApi(config); case "sambanova": return openAICompatible("https://api.sambanova.ai/v1/", config); case "text-gen-webui": diff --git a/packages/openai-adapters/src/types.ts b/packages/openai-adapters/src/types.ts index 868a6e8dfe9..443036754cf 100644 --- a/packages/openai-adapters/src/types.ts +++ b/packages/openai-adapters/src/types.ts @@ -74,6 +74,11 @@ export const DeepseekConfigSchema = OpenAIConfigSchema.extend({ }); export type DeepseekConfig = z.infer; +export const MiniMaxConfigSchema = OpenAIConfigSchema.extend({ + provider: z.literal("minimax"), +}); +export type MiniMaxConfig = z.infer; + export const BedrockConfigSchema = OpenAIConfigSchema.extend({ provider: z.literal("bedrock"), // cacheBehavior: z.object({ @@ -263,6 +268,7 @@ export const LLMConfigSchema = z.discriminatedUnion("provider", [ BedrockConfigSchema, MoonshotConfigSchema, DeepseekConfigSchema, + MiniMaxConfigSchema, CohereConfigSchema, AzureConfigSchema, GeminiConfigSchema, From 166673aa1401bfb5a3aa08f6de81ea16275ea6fa Mon Sep 17 00:00:00 2001 From: Octopus Date: Wed, 18 Mar 2026 06:39:02 -0500 Subject: [PATCH 3/4] feat: upgrade MiniMax default model to M2.7 - Add MiniMax-M2.7 and MiniMax-M2.7-highspeed to model list - Set MiniMax-M2.7 as default model - Keep all previous models (M2.5, M2.5-highspeed) as alternatives - Update docs and GUI model selection --- core/llm/llms/MiniMax.ts | 2 +- .../model-providers/more/minimax.mdx | 10 ++++---- gui/src/pages/AddNewModel/configs/models.ts | 24 +++++++++++++++++++ .../pages/AddNewModel/configs/providers.ts | 2 ++ packages/llm-info/src/providers/minimax.ts | 20 ++++++++++++++++ 5 files changed, 53 insertions(+), 5 deletions(-) diff --git a/core/llm/llms/MiniMax.ts b/core/llm/llms/MiniMax.ts index c96200a53ca..a40078a543b 100644 --- a/core/llm/llms/MiniMax.ts +++ b/core/llm/llms/MiniMax.ts @@ -6,7 +6,7 @@ class MiniMax extends OpenAI { static providerName = "minimax"; static defaultOptions: Partial = { apiBase: "https://api.minimax.io/v1/", - model: "MiniMax-M2.5", + model: "MiniMax-M2.7", useLegacyCompletionsEndpoint: false, }; diff --git a/docs/customize/model-providers/more/minimax.mdx b/docs/customize/model-providers/more/minimax.mdx index 4b47dd96585..e0a56e2cfbc 100644 --- a/docs/customize/model-providers/more/minimax.mdx +++ b/docs/customize/model-providers/more/minimax.mdx @@ -17,9 +17,9 @@ sidebarTitle: "MiniMax" schema: v1 models: - - name: MiniMax M2.5 + - name: MiniMax M2.7 provider: minimax - model: MiniMax-M2.5 + model: MiniMax-M2.7 apiKey: ``` @@ -28,9 +28,9 @@ sidebarTitle: "MiniMax" { "models": [ { - "title": "MiniMax M2.5", + "title": "MiniMax M2.7", "provider": "minimax", - "model": "MiniMax-M2.5", + "model": "MiniMax-M2.7", "apiKey": "" } ] @@ -43,6 +43,8 @@ sidebarTitle: "MiniMax" | Model | Description | | :---- | :---------- | +| `MiniMax-M2.7` | Latest flagship model with enhanced reasoning and coding. 204K context window. | +| `MiniMax-M2.7-highspeed` | High-speed version of M2.7 for low-latency scenarios. 204K context window. | | `MiniMax-M2.5` | Peak performance with ultimate value. 204K context window. | | `MiniMax-M2.5-highspeed` | Same performance, faster and more agile. 204K context window. | diff --git a/gui/src/pages/AddNewModel/configs/models.ts b/gui/src/pages/AddNewModel/configs/models.ts index 52028bd965a..594a43ff551 100644 --- a/gui/src/pages/AddNewModel/configs/models.ts +++ b/gui/src/pages/AddNewModel/configs/models.ts @@ -2688,6 +2688,30 @@ export const models: { [key: string]: ModelPackage } = { providerOptions: ["sambanova"], isOpenSource: true, }, + minimaxM27: { + title: "MiniMax M2.7", + description: + "Latest flagship model with enhanced reasoning and coding capabilities. 204K context window.", + params: { + title: "MiniMax M2.7", + model: "MiniMax-M2.7", + contextLength: 204_800, + }, + providerOptions: ["minimax"], + isOpenSource: false, + }, + minimaxM27Highspeed: { + title: "MiniMax M2.7 Highspeed", + description: + "High-speed version of M2.7 for low-latency scenarios. 204K context window.", + params: { + title: "MiniMax M2.7 Highspeed", + model: "MiniMax-M2.7-highspeed", + contextLength: 204_800, + }, + providerOptions: ["minimax"], + isOpenSource: false, + }, minimaxM25: { title: "MiniMax M2.5", description: diff --git a/gui/src/pages/AddNewModel/configs/providers.ts b/gui/src/pages/AddNewModel/configs/providers.ts index 348052e7dc0..5ba3860bfb6 100644 --- a/gui/src/pages/AddNewModel/configs/providers.ts +++ b/gui/src/pages/AddNewModel/configs/providers.ts @@ -597,6 +597,8 @@ Select the \`GPT-4o\` model below to complete your provider configuration, but n }, ], packages: [ + models.minimaxM27, + models.minimaxM27Highspeed, models.minimaxM25, models.minimaxM25Highspeed, { diff --git a/packages/llm-info/src/providers/minimax.ts b/packages/llm-info/src/providers/minimax.ts index ee157e8a86e..e55a5d8fcd7 100644 --- a/packages/llm-info/src/providers/minimax.ts +++ b/packages/llm-info/src/providers/minimax.ts @@ -2,6 +2,26 @@ import { ModelProvider } from "../types.js"; export const MiniMax: ModelProvider = { models: [ + { + model: "MiniMax-M2.7", + displayName: "MiniMax M2.7", + contextLength: 204800, + maxCompletionTokens: 192000, + description: + "Latest flagship model with enhanced reasoning and coding capabilities.", + regex: /MiniMax-M2\.7$/i, + recommendedFor: ["chat"], + }, + { + model: "MiniMax-M2.7-highspeed", + displayName: "MiniMax M2.7 Highspeed", + contextLength: 204800, + maxCompletionTokens: 192000, + description: + "High-speed version of M2.7 for low-latency scenarios.", + regex: /MiniMax-M2\.7-highspeed/i, + recommendedFor: ["chat"], + }, { model: "MiniMax-M2.5", displayName: "MiniMax M2.5", From c0b55011eb2a4cf5532038eb043db2e3a838c058 Mon Sep 17 00:00:00 2001 From: octo-patch Date: Sat, 21 Mar 2026 13:04:25 +0800 Subject: [PATCH 4/4] fix: add minimax to PROVIDER_HANDLES_TEMPLATING and PARALLEL_PROVIDERS MiniMax uses an OpenAI-compatible chat completions API, so it handles templating natively. MiniMax also supports parallel tool calls. Not added to PROVIDER_SUPPORTS_IMAGES since MiniMax M2.7 is text-only. --- core/llm/autodetect.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/llm/autodetect.ts b/core/llm/autodetect.ts index 4085d3798b7..05120f98bd4 100644 --- a/core/llm/autodetect.ts +++ b/core/llm/autodetect.ts @@ -67,6 +67,7 @@ const PROVIDER_HANDLES_TEMPLATING: string[] = [ "openrouter", "deepseek", "xAI", + "minimax", "groq", "gemini", "docker", @@ -248,6 +249,7 @@ const PARALLEL_PROVIDERS: string[] = [ "vertexai", "function-network", "scaleway", + "minimax", ]; function llmCanGenerateInParallel(provider: string, model: string): boolean {