From d047c8beae31525e86db373cbd7fe536a7a66481 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Sat, 1 Feb 2025 00:47:46 -0500 Subject: [PATCH] Add o3-mini-high and o3-mini-low --- .changeset/kind-balloons-grin.md | 5 +++++ .../providers/__tests__/openai-native.test.ts | 2 +- src/api/providers/openai-native.ts | 17 ++++++++++++++-- src/shared/api.ts | 20 +++++++++++++++++++ 4 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 .changeset/kind-balloons-grin.md diff --git a/.changeset/kind-balloons-grin.md b/.changeset/kind-balloons-grin.md new file mode 100644 index 0000000..889beae --- /dev/null +++ b/.changeset/kind-balloons-grin.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Add o3-mini-high and o3-mini-low diff --git a/src/api/providers/__tests__/openai-native.test.ts b/src/api/providers/__tests__/openai-native.test.ts index 4488bd3..356f18c 100644 --- a/src/api/providers/__tests__/openai-native.test.ts +++ b/src/api/providers/__tests__/openai-native.test.ts @@ -300,7 +300,7 @@ describe("OpenAiNativeHandler", () => { expect(mockCreate).toHaveBeenCalledWith({ model: "o3-mini", messages: [{ role: "user", content: "Test prompt" }], - temperature: 0, + reasoning_effort: "medium", }) }) diff --git a/src/api/providers/openai-native.ts b/src/api/providers/openai-native.ts index 4c90496..b2f5237 100644 --- a/src/api/providers/openai-native.ts +++ b/src/api/providers/openai-native.ts @@ -48,12 +48,15 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler } break } - case "o3-mini": { + case "o3-mini": + case "o3-mini-low": + case "o3-mini-high": { const stream = await this.client.chat.completions.create({ - model: this.getModel().id, + model: "o3-mini", messages: [{ role: "developer", content: systemPrompt }, ...convertToOpenAiMessages(messages)], stream: true, stream_options: { include_usage: true }, + reasoning_effort: this.getModel().info.reasoningEffort, }) for await (const chunk of stream) { @@ -132,6 +135,16 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler messages: [{ role: "user", content: prompt }], } break + case "o3-mini": + case "o3-mini-low": + case "o3-mini-high": + // o3 doesn't support non-1 temp + requestOptions = { + model: "o3-mini", + messages: [{ role: "user", content: prompt }], + reasoning_effort: this.getModel().info.reasoningEffort, + } + break default: requestOptions = { model: modelId, diff --git a/src/shared/api.ts b/src/shared/api.ts index 7da67fd..6e655de 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -80,6 +80,7 @@ export interface ModelInfo { cacheWritesPrice?: number cacheReadsPrice?: number description?: string + reasoningEffort?: "low" | "medium" | "high" } // Anthropic @@ -517,6 +518,25 @@ export const openAiNativeModels = { supportsPromptCache: false, inputPrice: 1.1, outputPrice: 4.4, + reasoningEffort: "medium", + }, + "o3-mini-high": { + maxTokens: 100_000, + contextWindow: 200_000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 1.1, + outputPrice: 4.4, + reasoningEffort: "high", + }, + "o3-mini-low": { + maxTokens: 100_000, + contextWindow: 200_000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 1.1, + outputPrice: 4.4, + reasoningEffort: "low", }, o1: { maxTokens: 100_000,