Skip to content

Commit 1c1b5ae

Browse files
committed
fix param to follow openai new convention
1 parent 896d5e7 commit 1c1b5ae

File tree

7 files changed

+8
-8
lines changed

7 files changed

+8
-8
lines changed

apps/sim/executor/handlers/agent/agent-handler.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -936,8 +936,8 @@ export class AgentBlockHandler implements BlockHandler {
936936
systemPrompt: validMessages ? undefined : inputs.systemPrompt,
937937
context: validMessages ? undefined : stringifyJSON(messages),
938938
tools: formattedTools,
939-
temperature: inputs.temperature,
940-
maxTokens: inputs.maxTokens,
939+
temperature: inputs.temperature != null ? Number(inputs.temperature) : undefined,
940+
maxTokens: inputs.maxTokens != null ? Number(inputs.maxTokens) : undefined,
941941
apiKey: inputs.apiKey,
942942
azureEndpoint: inputs.azureEndpoint,
943943
azureApiVersion: inputs.azureApiVersion,

apps/sim/providers/azure-openai/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ export const azureOpenAIProvider: ProviderConfig = {
102102
}
103103

104104
if (request.temperature !== undefined) payload.temperature = request.temperature
105-
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
105+
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
106106

107107
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
108108
if (request.verbosity !== undefined) payload.verbosity = request.verbosity

apps/sim/providers/cerebras/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ export const cerebrasProvider: ProviderConfig = {
7777
messages: allMessages,
7878
}
7979
if (request.temperature !== undefined) payload.temperature = request.temperature
80-
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
80+
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
8181
if (request.responseFormat) {
8282
payload.response_format = {
8383
type: 'json_schema',

apps/sim/providers/groq/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ export const groqProvider: ProviderConfig = {
7474
}
7575

7676
if (request.temperature !== undefined) payload.temperature = request.temperature
77-
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
77+
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
7878

7979
if (request.responseFormat) {
8080
payload.response_format = {

apps/sim/providers/openai/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ export const openaiProvider: ProviderConfig = {
8181
}
8282

8383
if (request.temperature !== undefined) payload.temperature = request.temperature
84-
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
84+
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
8585

8686
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
8787
if (request.verbosity !== undefined) payload.verbosity = request.verbosity

apps/sim/providers/vllm/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ export const vllmProvider: ProviderConfig = {
135135
}
136136

137137
if (request.temperature !== undefined) payload.temperature = request.temperature
138-
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
138+
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
139139

140140
if (request.responseFormat) {
141141
payload.response_format = {

apps/sim/providers/xai/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ export const xAIProvider: ProviderConfig = {
9292
}
9393

9494
if (request.temperature !== undefined) basePayload.temperature = request.temperature
95-
if (request.maxTokens != null) basePayload.max_tokens = request.maxTokens
95+
if (request.maxTokens != null) basePayload.max_completion_tokens = request.maxTokens
9696
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null
9797

9898
if (tools?.length) {

0 commit comments

Comments
 (0)