diff --git a/.changeset/breezy-badgers-refuse.md b/.changeset/breezy-badgers-refuse.md new file mode 100644 index 0000000..50cbbe9 --- /dev/null +++ b/.changeset/breezy-badgers-refuse.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Visual cleanup to the list of modes on the prompts tab diff --git a/.github/workflows/discord-pr-notify.yml b/.github/workflows/discord-pr-notify.yml new file mode 100644 index 0000000..7c38b43 --- /dev/null +++ b/.github/workflows/discord-pr-notify.yml @@ -0,0 +1,21 @@ +name: Discord PR Notifier + +on: + workflow_dispatch: + pull_request_target: + types: [opened] + +jobs: + notify: + runs-on: ubuntu-latest + if: github.head_ref != 'changeset-release/main' + steps: + - name: Send Discord Notification + uses: Ilshidur/action-discord@master + with: + args: | + 🚀 **New PR:** ${{ github.event.pull_request.title }} + 🔗 <${{ github.event.pull_request.html_url }}> + 👤 **Author:** ${{ github.event.pull_request.user.login }} + env: + DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} diff --git a/src/api/providers/__tests__/openai-native.test.ts b/src/api/providers/__tests__/openai-native.test.ts index 356f18c..f39d493 100644 --- a/src/api/providers/__tests__/openai-native.test.ts +++ b/src/api/providers/__tests__/openai-native.test.ts @@ -153,11 +153,35 @@ describe("OpenAiNativeHandler", () => { expect(mockCreate).toHaveBeenCalledWith({ model: "o1", messages: [ - { role: "developer", content: systemPrompt }, + { role: "developer", content: "Formatting re-enabled\n" + systemPrompt }, { role: "user", content: "Hello!" }, ], }) }) + + it("should handle o3-mini model family correctly", async () => { + handler = new OpenAiNativeHandler({ + ...mockOptions, + apiModelId: "o3-mini", + }) + + const stream = handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(mockCreate).toHaveBeenCalledWith({ + model: "o3-mini", + messages: [ + { role: "developer", content: "Formatting re-enabled\n" + systemPrompt }, + { role: "user", content: "Hello!" }, + ], + stream: true, + stream_options: { include_usage: true }, + reasoning_effort: "medium", + }) + }) }) describe("streaming models", () => { diff --git a/src/api/providers/openai-native.ts b/src/api/providers/openai-native.ts index b2f5237..f1b5bce 100644 --- a/src/api/providers/openai-native.ts +++ b/src/api/providers/openai-native.ts @@ -24,88 +24,111 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { const modelId = this.getModel().id - switch (modelId) { - case "o1": - case "o1-preview": - case "o1-mini": { - // o1-preview and o1-mini don't support streaming, non-1 temp, or system prompt - // o1 doesnt support streaming or non-1 temp but does support a developer prompt - const response = await this.client.chat.completions.create({ - model: modelId, - messages: [ - { role: modelId === "o1" ? "developer" : "user", content: systemPrompt }, - ...convertToOpenAiMessages(messages), - ], - }) + + if (modelId.startsWith("o1")) { + yield* this.handleO1FamilyMessage(modelId, systemPrompt, messages) + return + } + + if (modelId.startsWith("o3-mini")) { + yield* this.handleO3FamilyMessage(modelId, systemPrompt, messages) + return + } + + yield* this.handleDefaultModelMessage(modelId, systemPrompt, messages) + } + + private async *handleO1FamilyMessage( + modelId: string, + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[] + ): ApiStream { + // o1 supports developer prompt with formatting + // o1-preview and o1-mini only support user messages + const isOriginalO1 = modelId === "o1" + const response = await this.client.chat.completions.create({ + model: modelId, + messages: [ + { + role: isOriginalO1 ? "developer" : "user", + content: isOriginalO1 ? `Formatting re-enabled\n${systemPrompt}` : systemPrompt, + }, + ...convertToOpenAiMessages(messages), + ], + }) + + yield* this.yieldResponseData(response) + } + + private async *handleO3FamilyMessage( + modelId: string, + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[] + ): ApiStream { + const stream = await this.client.chat.completions.create({ + model: "o3-mini", + messages: [ + { + role: "developer", + content: `Formatting re-enabled\n${systemPrompt}`, + }, + ...convertToOpenAiMessages(messages), + ], + stream: true, + stream_options: { include_usage: true }, + reasoning_effort: this.getModel().info.reasoningEffort, + }) + + yield* this.handleStreamResponse(stream) + } + + private async *handleDefaultModelMessage( + modelId: string, + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[] + ): ApiStream { + const stream = await this.client.chat.completions.create({ + model: modelId, + temperature: 0, + messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], + stream: true, + stream_options: { include_usage: true }, + }) + + yield* this.handleStreamResponse(stream) + } + + private async *yieldResponseData( + response: OpenAI.Chat.Completions.ChatCompletion + ): ApiStream { + yield { + type: "text", + text: response.choices[0]?.message.content || "", + } + yield { + type: "usage", + inputTokens: response.usage?.prompt_tokens || 0, + outputTokens: response.usage?.completion_tokens || 0, + } + } + + private async *handleStreamResponse( + stream: AsyncIterable + ): ApiStream { + for await (const chunk of stream) { + const delta = chunk.choices[0]?.delta + if (delta?.content) { yield { type: "text", - text: response.choices[0]?.message.content || "", + text: delta.content, } + } + + if (chunk.usage) { yield { type: "usage", - inputTokens: response.usage?.prompt_tokens || 0, - outputTokens: response.usage?.completion_tokens || 0, - } - break - } - case "o3-mini": - case "o3-mini-low": - case "o3-mini-high": { - const stream = await this.client.chat.completions.create({ - model: "o3-mini", - messages: [{ role: "developer", content: systemPrompt }, ...convertToOpenAiMessages(messages)], - stream: true, - stream_options: { include_usage: true }, - reasoning_effort: this.getModel().info.reasoningEffort, - }) - - for await (const chunk of stream) { - const delta = chunk.choices[0]?.delta - if (delta?.content) { - yield { - type: "text", - text: delta.content, - } - } - - // contains a null value except for the last chunk which contains the token usage statistics for the entire request - if (chunk.usage) { - yield { - type: "usage", - inputTokens: chunk.usage.prompt_tokens || 0, - outputTokens: chunk.usage.completion_tokens || 0, - } - } - } - break - } - default: { - const stream = await this.client.chat.completions.create({ - model: this.getModel().id, - // max_completion_tokens: this.getModel().info.maxTokens, - temperature: 0, - messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], - stream: true, - stream_options: { include_usage: true }, - }) - - for await (const chunk of stream) { - const delta = chunk.choices[0]?.delta - if (delta?.content) { - yield { - type: "text", - text: delta.content, - } - } - - // contains a null value except for the last chunk which contains the token usage statistics for the entire request - if (chunk.usage) { - yield { - type: "usage", - inputTokens: chunk.usage.prompt_tokens || 0, - outputTokens: chunk.usage.completion_tokens || 0, - } - } + inputTokens: chunk.usage.prompt_tokens || 0, + outputTokens: chunk.usage.completion_tokens || 0, } } } @@ -125,32 +148,12 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler const modelId = this.getModel().id let requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming - switch (modelId) { - case "o1": - case "o1-preview": - case "o1-mini": - // o1 doesn't support non-1 temp - requestOptions = { - model: modelId, - messages: [{ role: "user", content: prompt }], - } - break - case "o3-mini": - case "o3-mini-low": - case "o3-mini-high": - // o3 doesn't support non-1 temp - requestOptions = { - model: "o3-mini", - messages: [{ role: "user", content: prompt }], - reasoning_effort: this.getModel().info.reasoningEffort, - } - break - default: - requestOptions = { - model: modelId, - messages: [{ role: "user", content: prompt }], - temperature: 0, - } + if (modelId.startsWith("o1")) { + requestOptions = this.getO1CompletionOptions(modelId, prompt) + } else if (modelId.startsWith("o3-mini")) { + requestOptions = this.getO3CompletionOptions(modelId, prompt) + } else { + requestOptions = this.getDefaultCompletionOptions(modelId, prompt) } const response = await this.client.chat.completions.create(requestOptions) @@ -162,4 +165,36 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler throw error } } + + private getO1CompletionOptions( + modelId: string, + prompt: string + ): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming { + return { + model: modelId, + messages: [{ role: "user", content: prompt }], + } + } + + private getO3CompletionOptions( + modelId: string, + prompt: string + ): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming { + return { + model: "o3-mini", + messages: [{ role: "user", content: prompt }], + reasoning_effort: this.getModel().info.reasoningEffort, + } + } + + private getDefaultCompletionOptions( + modelId: string, + prompt: string + ): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming { + return { + model: modelId, + messages: [{ role: "user", content: prompt }], + temperature: 0, + } + } } diff --git a/src/core/diff/strategies/search-replace.ts b/src/core/diff/strategies/search-replace.ts index 959f949..a9bf467 100644 --- a/src/core/diff/strategies/search-replace.ts +++ b/src/core/diff/strategies/search-replace.ts @@ -1,37 +1,9 @@ import { DiffStrategy, DiffResult } from "../types" import { addLineNumbers, everyLineHasLineNumbers, stripLineNumbers } from "../../../integrations/misc/extract-text" +import { distance } from "fastest-levenshtein" const BUFFER_LINES = 20 // Number of extra context lines to show before and after matches -function levenshteinDistance(a: string, b: string): number { - const matrix: number[][] = [] - - // Initialize matrix - for (let i = 0; i <= a.length; i++) { - matrix[i] = [i] - } - for (let j = 0; j <= b.length; j++) { - matrix[0][j] = j - } - - // Fill matrix - for (let i = 1; i <= a.length; i++) { - for (let j = 1; j <= b.length; j++) { - if (a[i - 1] === b[j - 1]) { - matrix[i][j] = matrix[i - 1][j - 1] - } else { - matrix[i][j] = Math.min( - matrix[i - 1][j - 1] + 1, // substitution - matrix[i][j - 1] + 1, // insertion - matrix[i - 1][j] + 1, // deletion - ) - } - } - } - - return matrix[a.length][b.length] -} - function getSimilarity(original: string, search: string): number { if (search === "") { return 1 @@ -47,12 +19,12 @@ function getSimilarity(original: string, search: string): number { return 1 } - // Calculate Levenshtein distance - const distance = levenshteinDistance(normalizedOriginal, normalizedSearch) + // Calculate Levenshtein distance using fastest-levenshtein's distance function + const dist = distance(normalizedOriginal, normalizedSearch) - // Calculate similarity ratio (0 to 1, where 1 is exact match) + // Calculate similarity ratio (0 to 1, where 1 is an exact match) const maxLength = Math.max(normalizedOriginal.length, normalizedSearch.length) - return 1 - distance / maxLength + return 1 - dist / maxLength } export class SearchReplaceDiffStrategy implements DiffStrategy { diff --git a/webview-ui/src/components/prompts/PromptsView.tsx b/webview-ui/src/components/prompts/PromptsView.tsx index 6827878..eff9f99 100644 --- a/webview-ui/src/components/prompts/PromptsView.tsx +++ b/webview-ui/src/components/prompts/PromptsView.tsx @@ -472,13 +472,11 @@ const PromptsView = ({ onDone }: PromptsViewProps) => {
{modes.map((modeConfig) => { const isActive = mode === modeConfig.slug @@ -859,13 +857,11 @@ const PromptsView = ({ onDone }: PromptsViewProps) => {
{Object.keys(supportPrompt.default).map((type) => (