Merge branch 'RooVetGit:main' into main

This commit is contained in:
pacnpal
2025-02-02 11:33:51 -05:00
committed by GitHub
6 changed files with 200 additions and 147 deletions

View File

@@ -0,0 +1,5 @@
---
"roo-cline": patch
---
Visual cleanup to the list of modes on the prompts tab

21
.github/workflows/discord-pr-notify.yml vendored Normal file
View File

@@ -0,0 +1,21 @@
name: Discord PR Notifier
on:
workflow_dispatch:
pull_request_target:
types: [opened]
jobs:
notify:
runs-on: ubuntu-latest
if: github.head_ref != 'changeset-release/main'
steps:
- name: Send Discord Notification
uses: Ilshidur/action-discord@master
with:
args: |
🚀 **New PR:** ${{ github.event.pull_request.title }}
🔗 <${{ github.event.pull_request.html_url }}>
👤 **Author:** ${{ github.event.pull_request.user.login }}
env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}

View File

@@ -153,11 +153,35 @@ describe("OpenAiNativeHandler", () => {
expect(mockCreate).toHaveBeenCalledWith({ expect(mockCreate).toHaveBeenCalledWith({
model: "o1", model: "o1",
messages: [ messages: [
{ role: "developer", content: systemPrompt }, { role: "developer", content: "Formatting re-enabled\n" + systemPrompt },
{ role: "user", content: "Hello!" }, { role: "user", content: "Hello!" },
], ],
}) })
}) })
it("should handle o3-mini model family correctly", async () => {
handler = new OpenAiNativeHandler({
...mockOptions,
apiModelId: "o3-mini",
})
const stream = handler.createMessage(systemPrompt, messages)
const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
}
expect(mockCreate).toHaveBeenCalledWith({
model: "o3-mini",
messages: [
{ role: "developer", content: "Formatting re-enabled\n" + systemPrompt },
{ role: "user", content: "Hello!" },
],
stream: true,
stream_options: { include_usage: true },
reasoning_effort: "medium",
})
})
}) })
describe("streaming models", () => { describe("streaming models", () => {

View File

@@ -24,88 +24,111 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
const modelId = this.getModel().id const modelId = this.getModel().id
switch (modelId) {
case "o1": if (modelId.startsWith("o1")) {
case "o1-preview": yield* this.handleO1FamilyMessage(modelId, systemPrompt, messages)
case "o1-mini": { return
// o1-preview and o1-mini don't support streaming, non-1 temp, or system prompt }
// o1 doesnt support streaming or non-1 temp but does support a developer prompt
const response = await this.client.chat.completions.create({ if (modelId.startsWith("o3-mini")) {
model: modelId, yield* this.handleO3FamilyMessage(modelId, systemPrompt, messages)
messages: [ return
{ role: modelId === "o1" ? "developer" : "user", content: systemPrompt }, }
...convertToOpenAiMessages(messages),
], yield* this.handleDefaultModelMessage(modelId, systemPrompt, messages)
}) }
private async *handleO1FamilyMessage(
modelId: string,
systemPrompt: string,
messages: Anthropic.Messages.MessageParam[]
): ApiStream {
// o1 supports developer prompt with formatting
// o1-preview and o1-mini only support user messages
const isOriginalO1 = modelId === "o1"
const response = await this.client.chat.completions.create({
model: modelId,
messages: [
{
role: isOriginalO1 ? "developer" : "user",
content: isOriginalO1 ? `Formatting re-enabled\n${systemPrompt}` : systemPrompt,
},
...convertToOpenAiMessages(messages),
],
})
yield* this.yieldResponseData(response)
}
private async *handleO3FamilyMessage(
modelId: string,
systemPrompt: string,
messages: Anthropic.Messages.MessageParam[]
): ApiStream {
const stream = await this.client.chat.completions.create({
model: "o3-mini",
messages: [
{
role: "developer",
content: `Formatting re-enabled\n${systemPrompt}`,
},
...convertToOpenAiMessages(messages),
],
stream: true,
stream_options: { include_usage: true },
reasoning_effort: this.getModel().info.reasoningEffort,
})
yield* this.handleStreamResponse(stream)
}
private async *handleDefaultModelMessage(
modelId: string,
systemPrompt: string,
messages: Anthropic.Messages.MessageParam[]
): ApiStream {
const stream = await this.client.chat.completions.create({
model: modelId,
temperature: 0,
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
stream: true,
stream_options: { include_usage: true },
})
yield* this.handleStreamResponse(stream)
}
private async *yieldResponseData(
response: OpenAI.Chat.Completions.ChatCompletion
): ApiStream {
yield {
type: "text",
text: response.choices[0]?.message.content || "",
}
yield {
type: "usage",
inputTokens: response.usage?.prompt_tokens || 0,
outputTokens: response.usage?.completion_tokens || 0,
}
}
private async *handleStreamResponse(
stream: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>
): ApiStream {
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta
if (delta?.content) {
yield { yield {
type: "text", type: "text",
text: response.choices[0]?.message.content || "", text: delta.content,
} }
}
if (chunk.usage) {
yield { yield {
type: "usage", type: "usage",
inputTokens: response.usage?.prompt_tokens || 0, inputTokens: chunk.usage.prompt_tokens || 0,
outputTokens: response.usage?.completion_tokens || 0, outputTokens: chunk.usage.completion_tokens || 0,
}
break
}
case "o3-mini":
case "o3-mini-low":
case "o3-mini-high": {
const stream = await this.client.chat.completions.create({
model: "o3-mini",
messages: [{ role: "developer", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
stream: true,
stream_options: { include_usage: true },
reasoning_effort: this.getModel().info.reasoningEffort,
})
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta
if (delta?.content) {
yield {
type: "text",
text: delta.content,
}
}
// contains a null value except for the last chunk which contains the token usage statistics for the entire request
if (chunk.usage) {
yield {
type: "usage",
inputTokens: chunk.usage.prompt_tokens || 0,
outputTokens: chunk.usage.completion_tokens || 0,
}
}
}
break
}
default: {
const stream = await this.client.chat.completions.create({
model: this.getModel().id,
// max_completion_tokens: this.getModel().info.maxTokens,
temperature: 0,
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
stream: true,
stream_options: { include_usage: true },
})
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta
if (delta?.content) {
yield {
type: "text",
text: delta.content,
}
}
// contains a null value except for the last chunk which contains the token usage statistics for the entire request
if (chunk.usage) {
yield {
type: "usage",
inputTokens: chunk.usage.prompt_tokens || 0,
outputTokens: chunk.usage.completion_tokens || 0,
}
}
} }
} }
} }
@@ -125,32 +148,12 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
const modelId = this.getModel().id const modelId = this.getModel().id
let requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming let requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
switch (modelId) { if (modelId.startsWith("o1")) {
case "o1": requestOptions = this.getO1CompletionOptions(modelId, prompt)
case "o1-preview": } else if (modelId.startsWith("o3-mini")) {
case "o1-mini": requestOptions = this.getO3CompletionOptions(modelId, prompt)
// o1 doesn't support non-1 temp } else {
requestOptions = { requestOptions = this.getDefaultCompletionOptions(modelId, prompt)
model: modelId,
messages: [{ role: "user", content: prompt }],
}
break
case "o3-mini":
case "o3-mini-low":
case "o3-mini-high":
// o3 doesn't support non-1 temp
requestOptions = {
model: "o3-mini",
messages: [{ role: "user", content: prompt }],
reasoning_effort: this.getModel().info.reasoningEffort,
}
break
default:
requestOptions = {
model: modelId,
messages: [{ role: "user", content: prompt }],
temperature: 0,
}
} }
const response = await this.client.chat.completions.create(requestOptions) const response = await this.client.chat.completions.create(requestOptions)
@@ -162,4 +165,36 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
throw error throw error
} }
} }
private getO1CompletionOptions(
modelId: string,
prompt: string
): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming {
return {
model: modelId,
messages: [{ role: "user", content: prompt }],
}
}
private getO3CompletionOptions(
modelId: string,
prompt: string
): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming {
return {
model: "o3-mini",
messages: [{ role: "user", content: prompt }],
reasoning_effort: this.getModel().info.reasoningEffort,
}
}
private getDefaultCompletionOptions(
modelId: string,
prompt: string
): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming {
return {
model: modelId,
messages: [{ role: "user", content: prompt }],
temperature: 0,
}
}
} }

View File

@@ -1,37 +1,9 @@
import { DiffStrategy, DiffResult } from "../types" import { DiffStrategy, DiffResult } from "../types"
import { addLineNumbers, everyLineHasLineNumbers, stripLineNumbers } from "../../../integrations/misc/extract-text" import { addLineNumbers, everyLineHasLineNumbers, stripLineNumbers } from "../../../integrations/misc/extract-text"
import { distance } from "fastest-levenshtein"
const BUFFER_LINES = 20 // Number of extra context lines to show before and after matches const BUFFER_LINES = 20 // Number of extra context lines to show before and after matches
function levenshteinDistance(a: string, b: string): number {
const matrix: number[][] = []
// Initialize matrix
for (let i = 0; i <= a.length; i++) {
matrix[i] = [i]
}
for (let j = 0; j <= b.length; j++) {
matrix[0][j] = j
}
// Fill matrix
for (let i = 1; i <= a.length; i++) {
for (let j = 1; j <= b.length; j++) {
if (a[i - 1] === b[j - 1]) {
matrix[i][j] = matrix[i - 1][j - 1]
} else {
matrix[i][j] = Math.min(
matrix[i - 1][j - 1] + 1, // substitution
matrix[i][j - 1] + 1, // insertion
matrix[i - 1][j] + 1, // deletion
)
}
}
}
return matrix[a.length][b.length]
}
function getSimilarity(original: string, search: string): number { function getSimilarity(original: string, search: string): number {
if (search === "") { if (search === "") {
return 1 return 1
@@ -47,12 +19,12 @@ function getSimilarity(original: string, search: string): number {
return 1 return 1
} }
// Calculate Levenshtein distance // Calculate Levenshtein distance using fastest-levenshtein's distance function
const distance = levenshteinDistance(normalizedOriginal, normalizedSearch) const dist = distance(normalizedOriginal, normalizedSearch)
// Calculate similarity ratio (0 to 1, where 1 is exact match) // Calculate similarity ratio (0 to 1, where 1 is an exact match)
const maxLength = Math.max(normalizedOriginal.length, normalizedSearch.length) const maxLength = Math.max(normalizedOriginal.length, normalizedSearch.length)
return 1 - distance / maxLength return 1 - dist / maxLength
} }
export class SearchReplaceDiffStrategy implements DiffStrategy { export class SearchReplaceDiffStrategy implements DiffStrategy {

View File

@@ -472,13 +472,11 @@ const PromptsView = ({ onDone }: PromptsViewProps) => {
<div <div
style={{ style={{
display: "flex", display: "flex",
gap: "16px", gap: "8px",
alignItems: "center", alignItems: "center",
marginBottom: "12px", marginBottom: "12px",
overflowX: "auto", flexWrap: "wrap",
flexWrap: "nowrap", padding: "4px 0",
paddingBottom: "4px",
paddingRight: "20px",
}}> }}>
{modes.map((modeConfig) => { {modes.map((modeConfig) => {
const isActive = mode === modeConfig.slug const isActive = mode === modeConfig.slug
@@ -859,13 +857,11 @@ const PromptsView = ({ onDone }: PromptsViewProps) => {
<div <div
style={{ style={{
display: "flex", display: "flex",
gap: "16px", gap: "8px",
alignItems: "center", alignItems: "center",
marginBottom: "12px", marginBottom: "12px",
overflowX: "auto", flexWrap: "wrap",
flexWrap: "nowrap", padding: "4px 0",
paddingBottom: "4px",
paddingRight: "20px",
}}> }}>
{Object.keys(supportPrompt.default).map((type) => ( {Object.keys(supportPrompt.default).map((type) => (
<button <button