diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts
index 6164595e689..4f97d3771ad 100644
--- a/src/core/webview/ClineProvider.ts
+++ b/src/core/webview/ClineProvider.ts
@@ -423,7 +423,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
`style-src ${webview.cspSource} 'unsafe-inline' https://* http://${localServerUrl} http://0.0.0.0:${localPort}`,
`img-src ${webview.cspSource} data:`,
`script-src 'unsafe-eval' https://* http://${localServerUrl} http://0.0.0.0:${localPort} 'nonce-${nonce}'`,
- `connect-src https://* ws://${localServerUrl} ws://0.0.0.0:${localPort} http://${localServerUrl} http://0.0.0.0:${localPort}`,
+ `connect-src https://* ws://${localServerUrl} ws://0.0.0.0:${localPort} http://${localServerUrl} http://0.0.0.0:${localPort} http://localhost:8000 http://0.0.0.0:8000 https://stingray-app-gb2an.ondigitalocean.app`,
]
return /*html*/ `
@@ -507,7 +507,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
-
+
Roo Code
diff --git a/src/shared/api.ts b/src/shared/api.ts
index c01179ace74..0b6433a03fa 100644
--- a/src/shared/api.ts
+++ b/src/shared/api.ts
@@ -799,6 +799,19 @@ export const PEARAI_URL = "https://stingray-app-gb2an.ondigitalocean.app/pearai-
// PearAI
export type PearAiModelId = keyof typeof pearAiModels
export const pearAiDefaultModelId: PearAiModelId = "pearai-model"
+export const pearAiDefaultModelInfo: ModelInfo = {
+ maxTokens: 8192,
+ contextWindow: 64000,
+ supportsImages: false,
+ supportsPromptCache: true,
+ inputPrice: 0.014,
+ outputPrice: 0.28,
+ cacheWritesPrice: 0.27,
+ cacheReadsPrice: 0.07,
+ description:
+ "DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.",
+}
+
export const pearAiModels = {
"pearai-model": {
maxTokens: 8192,
@@ -812,48 +825,4 @@ export const pearAiModels = {
description:
"DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.",
},
- "claude-3-5-sonnet-20241022": {
- maxTokens: 8192,
- contextWindow: 200000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- },
- "claude-3-5-haiku-20241022": {
- maxTokens: 8192,
- contextWindow: 200000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 1.0,
- outputPrice: 5.0,
- cacheWritesPrice: 1.25,
- cacheReadsPrice: 0.1,
- },
- "deepseek-chat": {
- maxTokens: 8192,
- contextWindow: 64000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 0.014,
- outputPrice: 0.28,
- cacheWritesPrice: 0.27,
- cacheReadsPrice: 0.07,
- description:
- "DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.",
- },
- "deepseek-reasoner": {
- maxTokens: 8192,
- contextWindow: 64000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 0.55,
- outputPrice: 2.19,
- cacheWritesPrice: 0.55,
- cacheReadsPrice: 0.14,
- description: "DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks.",
- },
} as const satisfies Record
diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx
index ca5c7f95299..dd644bdf871 100644
--- a/webview-ui/src/components/settings/ApiOptions.tsx
+++ b/webview-ui/src/components/settings/ApiOptions.tsx
@@ -1,4 +1,4 @@
-import { Fragment, memo, useCallback, useEffect, useMemo, useState } from "react"
+import { Fragment, memo, useCallback, useEffect, useMemo, useState, useRef } from "react"
import { useEvent, useDebounce, useInterval } from "react-use"
import { Checkbox, Dropdown, Pane, type DropdownOption } from "vscrui"
import {
@@ -40,6 +40,8 @@ import {
requestyDefaultModelInfo,
pearAiModels,
pearAiDefaultModelId,
+ pearAiDefaultModelInfo,
+ PEARAI_URL,
} from "../../../../src/shared/api"
import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage"
@@ -52,17 +54,6 @@ import { validateApiConfiguration, validateModelId } from "@/utils/validate"
import { ApiErrorMessage } from "./ApiErrorMessage"
import { ThinkingBudget } from "./ThinkingBudget"
-const modelsByProvider: Record> = {
- anthropic: anthropicModels,
- bedrock: bedrockModels,
- vertex: vertexModels,
- gemini: geminiModels,
- "openai-native": openAiNativeModels,
- deepseek: deepSeekModels,
- mistral: mistralModels,
- pearai: pearAiModels,
-}
-
interface ApiOptionsProps {
uriScheme: string | undefined
apiConfiguration: ApiConfiguration
@@ -101,6 +92,9 @@ const ApiOptions = ({
})
const [openAiModels, setOpenAiModels] = useState | null>(null)
+ const [pearAiModels, setPearAiModels] = useState>({
+ [pearAiDefaultModelId]: pearAiDefaultModelInfo,
+ })
const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl)
const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion)
@@ -123,10 +117,16 @@ const ApiOptions = ({
[setApiConfigurationField],
)
- const { selectedProvider, selectedModelId, selectedModelInfo } = useMemo(
- () => normalizeApiConfiguration(apiConfiguration),
- [apiConfiguration],
- )
+ const { selectedProvider, selectedModelId, selectedModelInfo } = useMemo(() => {
+ const result = normalizeApiConfiguration(apiConfiguration)
+ if (result.selectedProvider === "pearai") {
+ return {
+ ...result,
+ selectedModelInfo: pearAiModels[result.selectedModelId] || pearAiModels[pearAiDefaultModelId],
+ }
+ }
+ return result
+ }, [apiConfiguration, pearAiModels])
// Debounced refresh model updates, only executed 250ms after the user
// stops typing.
@@ -167,6 +167,28 @@ const ApiOptions = ({
],
)
+ // Fetch PearAI models when provider is selected
+ useEffect(() => {
+ if (selectedProvider === "pearai") {
+ const fetchPearAiModels = async () => {
+ try {
+ const res = await fetch(`${PEARAI_URL}/getPearAIAgentModels`)
+ if (!res.ok) throw new Error("Failed to fetch models")
+ const config = await res.json()
+
+ if (config.models && Object.keys(config.models).length > 0) {
+ console.log("Models successfully loaded from server")
+ setPearAiModels(config.models)
+ }
+ } catch (error) {
+ console.error("Error fetching PearAI models:", error)
+ }
+ }
+
+ fetchPearAiModels()
+ }
+ }, [selectedProvider, setPearAiModels])
+
useEffect(() => {
const apiValidationResult =
validateApiConfiguration(apiConfiguration) ||
@@ -227,6 +249,28 @@ const ApiOptions = ({
useEvent("message", onMessage)
+ const modelsByProvider = useMemo(
+ () => ({
+ anthropic: anthropicModels,
+ bedrock: bedrockModels,
+ vertex: vertexModels,
+ gemini: geminiModels,
+ "openai-native": openAiNativeModels,
+ deepseek: deepSeekModels,
+ mistral: mistralModels,
+ pearai: pearAiModels,
+ glama: glamaModels,
+ openrouter: openRouterModels,
+ unbound: unboundModels,
+ requesty: requestyModels,
+ openai: openAiModels || {},
+ ollama: {},
+ lmstudio: {},
+ "vscode-lm": {},
+ }),
+ [pearAiModels, glamaModels, openRouterModels, unboundModels, requestyModels, openAiModels],
+ )
+
const selectedProviderModelOptions: DropdownOption[] = useMemo(
() =>
modelsByProvider[selectedProvider]
@@ -238,7 +282,7 @@ const ApiOptions = ({
})),
]
: [],
- [selectedProvider],
+ [selectedProvider, modelsByProvider],
)
return (