diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts
index d5880cd3b1..3bc4e13454 100644
--- a/packages/types/src/provider-settings.ts
+++ b/packages/types/src/provider-settings.ts
@@ -243,6 +243,7 @@ const openAiSchema = baseProviderSettingsSchema.extend({
openAiBaseUrl: z.string().optional(),
openAiApiKey: z.string().optional(),
openAiR1FormatEnabled: z.boolean().optional(),
+ openAiThinkingModeEnabled: z.boolean().optional(),
openAiModelId: z.string().optional(),
openAiCustomModelInfo: modelInfoSchema.nullish(),
openAiUseAzure: z.boolean().optional(),
diff --git a/src/api/providers/__tests__/openai.spec.ts b/src/api/providers/__tests__/openai.spec.ts
index 4469efd4d1..1cb282e8e8 100644
--- a/src/api/providers/__tests__/openai.spec.ts
+++ b/src/api/providers/__tests__/openai.spec.ts
@@ -497,6 +497,90 @@ describe("OpenAiHandler", () => {
const callArgs = mockCreate.mock.calls[0][0]
expect(callArgs.max_completion_tokens).toBe(4096)
})
+
+ describe("thinking mode", () => {
+ it("should include thinking parameter when openAiThinkingModeEnabled is true (streaming)", async () => {
+ const thinkingOptions: ApiHandlerOptions = {
+ ...mockOptions,
+ openAiThinkingModeEnabled: true,
+ }
+ const thinkingHandler = new OpenAiHandler(thinkingOptions)
+ const stream = thinkingHandler.createMessage(systemPrompt, messages)
+ // Consume the stream to trigger the API call
+ for await (const _chunk of stream) {
+ }
+ // Assert the mockCreate was called with thinking parameter
+ expect(mockCreate).toHaveBeenCalled()
+ const callArgs = mockCreate.mock.calls[0][0]
+ expect(callArgs.thinking).toEqual({ type: "enabled" })
+ })
+
+ it("should not include thinking parameter when openAiThinkingModeEnabled is false (streaming)", async () => {
+ const noThinkingOptions: ApiHandlerOptions = {
+ ...mockOptions,
+ openAiThinkingModeEnabled: false,
+ }
+ const noThinkingHandler = new OpenAiHandler(noThinkingOptions)
+ const stream = noThinkingHandler.createMessage(systemPrompt, messages)
+ // Consume the stream to trigger the API call
+ for await (const _chunk of stream) {
+ }
+ // Assert the mockCreate was called without thinking parameter
+ expect(mockCreate).toHaveBeenCalled()
+ const callArgs = mockCreate.mock.calls[0][0]
+ expect(callArgs.thinking).toBeUndefined()
+ })
+
+ it("should not include thinking parameter when openAiThinkingModeEnabled is undefined (streaming)", async () => {
+ const defaultOptions: ApiHandlerOptions = {
+ ...mockOptions,
+ // openAiThinkingModeEnabled is not set
+ }
+ const defaultHandler = new OpenAiHandler(defaultOptions)
+ const stream = defaultHandler.createMessage(systemPrompt, messages)
+ // Consume the stream to trigger the API call
+ for await (const _chunk of stream) {
+ }
+ // Assert the mockCreate was called without thinking parameter
+ expect(mockCreate).toHaveBeenCalled()
+ const callArgs = mockCreate.mock.calls[0][0]
+ expect(callArgs.thinking).toBeUndefined()
+ })
+
+ it("should include thinking parameter when openAiThinkingModeEnabled is true (non-streaming)", async () => {
+ const thinkingOptions: ApiHandlerOptions = {
+ ...mockOptions,
+ openAiThinkingModeEnabled: true,
+ openAiStreamingEnabled: false,
+ }
+ const thinkingHandler = new OpenAiHandler(thinkingOptions)
+ const stream = thinkingHandler.createMessage(systemPrompt, messages)
+ // Consume the stream to trigger the API call
+ for await (const _chunk of stream) {
+ }
+ // Assert the mockCreate was called with thinking parameter
+ expect(mockCreate).toHaveBeenCalled()
+ const callArgs = mockCreate.mock.calls[0][0]
+ expect(callArgs.thinking).toEqual({ type: "enabled" })
+ })
+
+ it("should not include thinking parameter when openAiThinkingModeEnabled is false (non-streaming)", async () => {
+ const noThinkingOptions: ApiHandlerOptions = {
+ ...mockOptions,
+ openAiThinkingModeEnabled: false,
+ openAiStreamingEnabled: false,
+ }
+ const noThinkingHandler = new OpenAiHandler(noThinkingOptions)
+ const stream = noThinkingHandler.createMessage(systemPrompt, messages)
+ // Consume the stream to trigger the API call
+ for await (const _chunk of stream) {
+ }
+ // Assert the mockCreate was called without thinking parameter
+ expect(mockCreate).toHaveBeenCalled()
+ const callArgs = mockCreate.mock.calls[0][0]
+ expect(callArgs.thinking).toBeUndefined()
+ })
+ })
})
describe("error handling", () => {
diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts
index 43a4fe7ae3..0f8bcd72a3 100644
--- a/src/api/providers/openai.ts
+++ b/src/api/providers/openai.ts
@@ -89,6 +89,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
const modelUrl = this.options.openAiBaseUrl ?? ""
const modelId = this.options.openAiModelId ?? ""
const enabledR1Format = this.options.openAiR1FormatEnabled ?? false
+ const enabledThinkingMode = this.options.openAiThinkingModeEnabled ?? false
const isAzureAiInference = this._isAzureAiInference(modelUrl)
const deepseekReasoner = modelId.includes("deepseek-reasoner") || enabledR1Format
@@ -153,13 +154,16 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
const isGrokXAI = this._isGrokXAI(this.options.openAiBaseUrl)
- const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
+ const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming & {
+ thinking?: { type: "enabled"; budget_tokens?: number }
+ } = {
model: modelId,
temperature: this.options.modelTemperature ?? (deepseekReasoner ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
messages: convertedMessages,
stream: true as const,
...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
...(reasoning && reasoning),
+ ...(enabledThinkingMode && { thinking: { type: "enabled" } }),
...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }),
...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }),
...(metadata?.toolProtocol === "native" && {
@@ -224,11 +228,14 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
yield this.processUsageMetrics(lastUsage, modelInfo)
}
} else {
- const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
+ const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming & {
+ thinking?: { type: "enabled"; budget_tokens?: number }
+ } = {
model: modelId,
messages: deepseekReasoner
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
: [systemMessage, ...convertToOpenAiMessages(messages)],
+ ...(enabledThinkingMode && { thinking: { type: "enabled" } }),
...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }),
...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }),
...(metadata?.toolProtocol === "native" && {
diff --git a/webview-ui/src/components/settings/ThinkingModeSetting.tsx b/webview-ui/src/components/settings/ThinkingModeSetting.tsx
new file mode 100644
index 0000000000..379cb56415
--- /dev/null
+++ b/webview-ui/src/components/settings/ThinkingModeSetting.tsx
@@ -0,0 +1,25 @@
+import { Checkbox } from "vscrui"
+
+import { useAppTranslation } from "@/i18n/TranslationContext"
+
+interface ThinkingModeSettingProps {
+ onChange: (value: boolean) => void
+ openAiThinkingModeEnabled?: boolean
+}
+
+export const ThinkingModeSetting = ({ onChange, openAiThinkingModeEnabled }: ThinkingModeSettingProps) => {
+ const { t } = useAppTranslation()
+
+ return (
+
+
+
+ {t("settings:modelInfo.enableThinkingMode")}
+
+
+
+ {t("settings:modelInfo.enableThinkingModeTips")}
+
+
+ )
+}
diff --git a/webview-ui/src/components/settings/providers/OpenAICompatible.tsx b/webview-ui/src/components/settings/providers/OpenAICompatible.tsx
index 4eea6f09f1..9217e4a7f1 100644
--- a/webview-ui/src/components/settings/providers/OpenAICompatible.tsx
+++ b/webview-ui/src/components/settings/providers/OpenAICompatible.tsx
@@ -20,6 +20,7 @@ import { convertHeadersToObject } from "../utils/headers"
import { inputEventTransform, noTransform } from "../transforms"
import { ModelPicker } from "../ModelPicker"
import { R1FormatSetting } from "../R1FormatSetting"
+import { ThinkingModeSetting } from "../ThinkingModeSetting"
import { ThinkingBudget } from "../ThinkingBudget"
type OpenAICompatibleProps = {
@@ -153,6 +154,10 @@ export const OpenAICompatible = ({
onChange={handleInputChange("openAiR1FormatEnabled", noTransform)}
openAiR1FormatEnabled={apiConfiguration?.openAiR1FormatEnabled ?? false}
/>
+
diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json
index 4f92caf7f7..30de64c85b 100644
--- a/webview-ui/src/i18n/locales/ca/settings.json
+++ b/webview-ui/src/i18n/locales/ca/settings.json
@@ -863,6 +863,8 @@
"enableStreaming": "Habilitar streaming",
"enableR1Format": "Activar els paràmetres del model R1",
"enableR1FormatTips": "S'ha d'activat quan s'utilitzen models R1 com el QWQ per evitar errors 400",
+ "enableThinkingMode": "Activar mode de pensament",
+ "enableThinkingModeTips": "Activar per a models de pensament com Kimi K2, DeepSeek Reasoner, GLM-4 per retornar contingut de raonament",
"useAzure": "Utilitzar Azure",
"azureApiVersion": "Establir versió de l'API d'Azure",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json
index 30e482c820..0be2a8fc7d 100644
--- a/webview-ui/src/i18n/locales/de/settings.json
+++ b/webview-ui/src/i18n/locales/de/settings.json
@@ -863,6 +863,8 @@
"enableStreaming": "Streaming aktivieren",
"enableR1Format": "R1-Modellparameter aktivieren",
"enableR1FormatTips": "Muss bei Verwendung von R1-Modellen wie QWQ aktiviert werden, um 400er-Fehler zu vermeiden",
+ "enableThinkingMode": "Denkmodus aktivieren",
+ "enableThinkingModeTips": "Für Denkmodelle wie Kimi K2, DeepSeek Reasoner, GLM-4 aktivieren, um Schlussfolgerungsinhalte zurückzugeben",
"useAzure": "Azure verwenden",
"azureApiVersion": "Azure API-Version festlegen",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json
index 17d14da8bd..57feb39b4e 100644
--- a/webview-ui/src/i18n/locales/en/settings.json
+++ b/webview-ui/src/i18n/locales/en/settings.json
@@ -872,6 +872,8 @@
"enableStreaming": "Enable streaming",
"enableR1Format": "Enable R1 model parameters",
"enableR1FormatTips": "Must be enabled when using R1 models such as QWQ to prevent 400 errors",
+ "enableThinkingMode": "Enable thinking mode",
+ "enableThinkingModeTips": "Enable for thinking models like Kimi K2, DeepSeek Reasoner, GLM-4 to return reasoning content",
"useAzure": "Use Azure",
"azureApiVersion": "Set Azure API version",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json
index f514ba12fc..801f32a313 100644
--- a/webview-ui/src/i18n/locales/es/settings.json
+++ b/webview-ui/src/i18n/locales/es/settings.json
@@ -863,6 +863,8 @@
"enableStreaming": "Habilitar streaming",
"enableR1Format": "Habilitar parámetros del modelo R1",
"enableR1FormatTips": "Debe habilitarse al utilizar modelos R1 como QWQ, para evitar el error 400",
+ "enableThinkingMode": "Habilitar modo de pensamiento",
+ "enableThinkingModeTips": "Habilitar para modelos de pensamiento como Kimi K2, DeepSeek Reasoner, GLM-4 para devolver contenido de razonamiento",
"useAzure": "Usar Azure",
"azureApiVersion": "Establecer versión de API de Azure",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json
index 415bfa5220..6c222aea81 100644
--- a/webview-ui/src/i18n/locales/fr/settings.json
+++ b/webview-ui/src/i18n/locales/fr/settings.json
@@ -863,6 +863,8 @@
"enableStreaming": "Activer le streaming",
"enableR1Format": "Activer les paramètres du modèle R1",
"enableR1FormatTips": "Doit être activé lors de l'utilisation de modèles R1 tels que QWQ, pour éviter l'erreur 400",
+ "enableThinkingMode": "Activer le mode réflexion",
+ "enableThinkingModeTips": "Activer pour les modèles de réflexion comme Kimi K2, DeepSeek Reasoner, GLM-4 pour retourner le contenu de raisonnement",
"useAzure": "Utiliser Azure",
"azureApiVersion": "Définir la version de l'API Azure",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json
index 96a33fafee..4c960cfda1 100644
--- a/webview-ui/src/i18n/locales/hi/settings.json
+++ b/webview-ui/src/i18n/locales/hi/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "स्ट्रीमिंग सक्षम करें",
"enableR1Format": "R1 मॉडल पैरामीटर सक्षम करें",
"enableR1FormatTips": "QWQ जैसी R1 मॉडलों का उपयोग करते समय इसे सक्षम करना आवश्यक है, ताकि 400 त्रुटि से बचा जा सके",
+ "enableThinkingMode": "थिंकिंग मोड सक्षम करें",
+ "enableThinkingModeTips": "Kimi K2, DeepSeek Reasoner, GLM-4 जैसे थिंकिंग मॉडल के लिए सक्षम करें ताकि तर्क सामग्री वापस आ सके",
"useAzure": "Azure का उपयोग करें",
"azureApiVersion": "Azure API संस्करण सेट करें",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json
index a184c5c092..9bfb45d548 100644
--- a/webview-ui/src/i18n/locales/id/settings.json
+++ b/webview-ui/src/i18n/locales/id/settings.json
@@ -893,6 +893,8 @@
"enableStreaming": "Aktifkan streaming",
"enableR1Format": "Aktifkan parameter model R1",
"enableR1FormatTips": "Harus diaktifkan saat menggunakan model R1 seperti QWQ untuk mencegah error 400",
+ "enableThinkingMode": "Aktifkan mode berpikir",
+ "enableThinkingModeTips": "Aktifkan untuk model berpikir seperti Kimi K2, DeepSeek Reasoner, GLM-4 untuk mengembalikan konten penalaran",
"useAzure": "Gunakan Azure",
"azureApiVersion": "Atur versi API Azure",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json
index 69fd4e40c1..f73d26ae97 100644
--- a/webview-ui/src/i18n/locales/it/settings.json
+++ b/webview-ui/src/i18n/locales/it/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "Abilita streaming",
"enableR1Format": "Abilita i parametri del modello R1",
"enableR1FormatTips": "Deve essere abilitato quando si utilizzano modelli R1 come QWQ, per evitare l'errore 400",
+ "enableThinkingMode": "Abilita modalità di pensiero",
+ "enableThinkingModeTips": "Abilita per modelli di pensiero come Kimi K2, DeepSeek Reasoner, GLM-4 per restituire contenuti di ragionamento",
"useAzure": "Usa Azure",
"azureApiVersion": "Imposta versione API Azure",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json
index 66d1b6579e..b283d0def1 100644
--- a/webview-ui/src/i18n/locales/ja/settings.json
+++ b/webview-ui/src/i18n/locales/ja/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "ストリーミングを有効化",
"enableR1Format": "R1モデルパラメータを有効にする",
"enableR1FormatTips": "QWQなどのR1モデルを使用する際には、有効にする必要があります。400エラーを防ぐために",
+ "enableThinkingMode": "思考モードを有効にする",
+ "enableThinkingModeTips": "Kimi K2、DeepSeek Reasoner、GLM-4などの思考モデルで推論内容を返すために有効にします",
"useAzure": "Azureを使用",
"azureApiVersion": "Azure APIバージョンを設定",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json
index 691d756fed..514709cdfe 100644
--- a/webview-ui/src/i18n/locales/ko/settings.json
+++ b/webview-ui/src/i18n/locales/ko/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "스트리밍 활성화",
"enableR1Format": "R1 모델 매개변수 활성화",
"enableR1FormatTips": "QWQ와 같은 R1 모델을 사용할 때 활성화해야 하며, 400 오류를 방지합니다",
+ "enableThinkingMode": "사고 모드 활성화",
+ "enableThinkingModeTips": "Kimi K2, DeepSeek Reasoner, GLM-4와 같은 사고 모델에서 추론 내용을 반환하려면 활성화하세요",
"useAzure": "Azure 사용",
"azureApiVersion": "Azure API 버전 설정",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json
index d0c3a78216..75329d77c4 100644
--- a/webview-ui/src/i18n/locales/nl/settings.json
+++ b/webview-ui/src/i18n/locales/nl/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "Streaming inschakelen",
"enableR1Format": "R1-modelparameters inschakelen",
"enableR1FormatTips": "Moet ingeschakeld zijn bij gebruik van R1-modellen zoals QWQ om 400-fouten te voorkomen",
+ "enableThinkingMode": "Denkmodus inschakelen",
+ "enableThinkingModeTips": "Inschakelen voor denkmodellen zoals Kimi K2, DeepSeek Reasoner, GLM-4 om redeneerinhoud te retourneren",
"useAzure": "Azure gebruiken",
"azureApiVersion": "Azure API-versie instellen",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json
index 218cdf60e8..888418eaf6 100644
--- a/webview-ui/src/i18n/locales/pl/settings.json
+++ b/webview-ui/src/i18n/locales/pl/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "Włącz strumieniowanie",
"enableR1Format": "Włącz parametry modelu R1",
"enableR1FormatTips": "Należy włączyć podczas korzystania z modeli R1, takich jak QWQ, aby uniknąć błędu 400",
+ "enableThinkingMode": "Włącz tryb myślenia",
+ "enableThinkingModeTips": "Włącz dla modeli myślących, takich jak Kimi K2, DeepSeek Reasoner, GLM-4, aby zwrócić treść rozumowania",
"useAzure": "Użyj Azure",
"azureApiVersion": "Ustaw wersję API Azure",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json
index e96dc4c345..0f3c8de060 100644
--- a/webview-ui/src/i18n/locales/pt-BR/settings.json
+++ b/webview-ui/src/i18n/locales/pt-BR/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "Ativar streaming",
"enableR1Format": "Ativar parâmetros do modelo R1",
"enableR1FormatTips": "Deve ser ativado ao usar modelos R1 como QWQ, para evitar erro 400",
+ "enableThinkingMode": "Ativar modo de pensamento",
+ "enableThinkingModeTips": "Ativar para modelos de pensamento como Kimi K2, DeepSeek Reasoner, GLM-4 para retornar conteúdo de raciocínio",
"useAzure": "Usar Azure",
"azureApiVersion": "Definir versão da API Azure",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json
index aaae476d9c..c2b63bd356 100644
--- a/webview-ui/src/i18n/locales/ru/settings.json
+++ b/webview-ui/src/i18n/locales/ru/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "Включить потоковую передачу",
"enableR1Format": "Включить параметры модели R1",
"enableR1FormatTips": "Необходимо включить при использовании моделей R1 (например, QWQ), чтобы избежать ошибок 400",
+ "enableThinkingMode": "Включить режим мышления",
+ "enableThinkingModeTips": "Включите для моделей мышления, таких как Kimi K2, DeepSeek Reasoner, GLM-4, чтобы возвращать содержание рассуждений",
"useAzure": "Использовать Azure",
"azureApiVersion": "Установить версию API Azure",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json
index 0eabf2782f..404816e280 100644
--- a/webview-ui/src/i18n/locales/tr/settings.json
+++ b/webview-ui/src/i18n/locales/tr/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "Akışı etkinleştir",
"enableR1Format": "R1 model parametrelerini etkinleştir",
"enableR1FormatTips": "QWQ gibi R1 modelleri kullanıldığında etkinleştirilmelidir, 400 hatası alınmaması için",
+ "enableThinkingMode": "Düşünme modunu etkinleştir",
+ "enableThinkingModeTips": "Kimi K2, DeepSeek Reasoner, GLM-4 gibi düşünme modelleri için akıl yürütme içeriği döndürmek üzere etkinleştirin",
"useAzure": "Azure kullan",
"azureApiVersion": "Azure API sürümünü ayarla",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json
index debb9fe860..5ec5e48163 100644
--- a/webview-ui/src/i18n/locales/vi/settings.json
+++ b/webview-ui/src/i18n/locales/vi/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "Bật streaming",
"enableR1Format": "Kích hoạt tham số mô hình R1",
"enableR1FormatTips": "Cần kích hoạt khi sử dụng các mô hình R1 như QWQ, để tránh lỗi 400",
+ "enableThinkingMode": "Bật chế độ suy nghĩ",
+ "enableThinkingModeTips": "Bật cho các mô hình suy nghĩ như Kimi K2, DeepSeek Reasoner, GLM-4 để trả về nội dung suy luận",
"useAzure": "Sử dụng Azure",
"azureApiVersion": "Đặt phiên bản API Azure",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json
index 6137b091e7..ed83cb7861 100644
--- a/webview-ui/src/i18n/locales/zh-CN/settings.json
+++ b/webview-ui/src/i18n/locales/zh-CN/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "启用流式传输",
"enableR1Format": "启用 R1 模型参数",
"enableR1FormatTips": "使用 QWQ 等 R1 系列模型时必须启用,避免出现 400 错误",
+ "enableThinkingMode": "启用思考模式",
+ "enableThinkingModeTips": "为 Kimi K2、DeepSeek Reasoner、GLM-4 等思考模型启用,以返回推理内容",
"useAzure": "使用 Azure 服务",
"azureApiVersion": "设置 Azure API 版本",
"gemini": {
diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json
index 84dfdb7527..1857cf2e41 100644
--- a/webview-ui/src/i18n/locales/zh-TW/settings.json
+++ b/webview-ui/src/i18n/locales/zh-TW/settings.json
@@ -864,6 +864,8 @@
"enableStreaming": "啟用串流輸出",
"enableR1Format": "啟用 R1 模型參數",
"enableR1FormatTips": "使用 QWQ 等 R1 模型時必須啟用,以避免發生 400 錯誤",
+ "enableThinkingMode": "啟用思考模式",
+ "enableThinkingModeTips": "為 Kimi K2、DeepSeek Reasoner、GLM-4 等思考模型啟用,以返回推理內容",
"useAzure": "使用 Azure",
"azureApiVersion": "設定 Azure API 版本",
"gemini": {