mirror of
https://github.com/pacnpal/Roo-Code.git
synced 2025-12-19 20:01:08 -05:00
Merge branch 'RooVetGit:main' into main
This commit is contained in:
5
.changeset/breezy-badgers-refuse.md
Normal file
5
.changeset/breezy-badgers-refuse.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"roo-cline": patch
|
||||
---
|
||||
|
||||
Visual cleanup to the list of modes on the prompts tab
|
||||
21
.github/workflows/discord-pr-notify.yml
vendored
Normal file
21
.github/workflows/discord-pr-notify.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Discord PR Notifier
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request_target:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
notify:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.head_ref != 'changeset-release/main'
|
||||
steps:
|
||||
- name: Send Discord Notification
|
||||
uses: Ilshidur/action-discord@master
|
||||
with:
|
||||
args: |
|
||||
🚀 **New PR:** ${{ github.event.pull_request.title }}
|
||||
🔗 <${{ github.event.pull_request.html_url }}>
|
||||
👤 **Author:** ${{ github.event.pull_request.user.login }}
|
||||
env:
|
||||
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||
@@ -153,11 +153,35 @@ describe("OpenAiNativeHandler", () => {
|
||||
expect(mockCreate).toHaveBeenCalledWith({
|
||||
model: "o1",
|
||||
messages: [
|
||||
{ role: "developer", content: systemPrompt },
|
||||
{ role: "developer", content: "Formatting re-enabled\n" + systemPrompt },
|
||||
{ role: "user", content: "Hello!" },
|
||||
],
|
||||
})
|
||||
})
|
||||
|
||||
it("should handle o3-mini model family correctly", async () => {
|
||||
handler = new OpenAiNativeHandler({
|
||||
...mockOptions,
|
||||
apiModelId: "o3-mini",
|
||||
})
|
||||
|
||||
const stream = handler.createMessage(systemPrompt, messages)
|
||||
const chunks: any[] = []
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk)
|
||||
}
|
||||
|
||||
expect(mockCreate).toHaveBeenCalledWith({
|
||||
model: "o3-mini",
|
||||
messages: [
|
||||
{ role: "developer", content: "Formatting re-enabled\n" + systemPrompt },
|
||||
{ role: "user", content: "Hello!" },
|
||||
],
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
reasoning_effort: "medium",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("streaming models", () => {
|
||||
|
||||
@@ -24,19 +24,83 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
|
||||
|
||||
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
|
||||
const modelId = this.getModel().id
|
||||
switch (modelId) {
|
||||
case "o1":
|
||||
case "o1-preview":
|
||||
case "o1-mini": {
|
||||
// o1-preview and o1-mini don't support streaming, non-1 temp, or system prompt
|
||||
// o1 doesnt support streaming or non-1 temp but does support a developer prompt
|
||||
|
||||
if (modelId.startsWith("o1")) {
|
||||
yield* this.handleO1FamilyMessage(modelId, systemPrompt, messages)
|
||||
return
|
||||
}
|
||||
|
||||
if (modelId.startsWith("o3-mini")) {
|
||||
yield* this.handleO3FamilyMessage(modelId, systemPrompt, messages)
|
||||
return
|
||||
}
|
||||
|
||||
yield* this.handleDefaultModelMessage(modelId, systemPrompt, messages)
|
||||
}
|
||||
|
||||
private async *handleO1FamilyMessage(
|
||||
modelId: string,
|
||||
systemPrompt: string,
|
||||
messages: Anthropic.Messages.MessageParam[]
|
||||
): ApiStream {
|
||||
// o1 supports developer prompt with formatting
|
||||
// o1-preview and o1-mini only support user messages
|
||||
const isOriginalO1 = modelId === "o1"
|
||||
const response = await this.client.chat.completions.create({
|
||||
model: modelId,
|
||||
messages: [
|
||||
{ role: modelId === "o1" ? "developer" : "user", content: systemPrompt },
|
||||
{
|
||||
role: isOriginalO1 ? "developer" : "user",
|
||||
content: isOriginalO1 ? `Formatting re-enabled\n${systemPrompt}` : systemPrompt,
|
||||
},
|
||||
...convertToOpenAiMessages(messages),
|
||||
],
|
||||
})
|
||||
|
||||
yield* this.yieldResponseData(response)
|
||||
}
|
||||
|
||||
private async *handleO3FamilyMessage(
|
||||
modelId: string,
|
||||
systemPrompt: string,
|
||||
messages: Anthropic.Messages.MessageParam[]
|
||||
): ApiStream {
|
||||
const stream = await this.client.chat.completions.create({
|
||||
model: "o3-mini",
|
||||
messages: [
|
||||
{
|
||||
role: "developer",
|
||||
content: `Formatting re-enabled\n${systemPrompt}`,
|
||||
},
|
||||
...convertToOpenAiMessages(messages),
|
||||
],
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
reasoning_effort: this.getModel().info.reasoningEffort,
|
||||
})
|
||||
|
||||
yield* this.handleStreamResponse(stream)
|
||||
}
|
||||
|
||||
private async *handleDefaultModelMessage(
|
||||
modelId: string,
|
||||
systemPrompt: string,
|
||||
messages: Anthropic.Messages.MessageParam[]
|
||||
): ApiStream {
|
||||
const stream = await this.client.chat.completions.create({
|
||||
model: modelId,
|
||||
temperature: 0,
|
||||
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
})
|
||||
|
||||
yield* this.handleStreamResponse(stream)
|
||||
}
|
||||
|
||||
private async *yieldResponseData(
|
||||
response: OpenAI.Chat.Completions.ChatCompletion
|
||||
): ApiStream {
|
||||
yield {
|
||||
type: "text",
|
||||
text: response.choices[0]?.message.content || "",
|
||||
@@ -46,19 +110,11 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
|
||||
inputTokens: response.usage?.prompt_tokens || 0,
|
||||
outputTokens: response.usage?.completion_tokens || 0,
|
||||
}
|
||||
break
|
||||
}
|
||||
case "o3-mini":
|
||||
case "o3-mini-low":
|
||||
case "o3-mini-high": {
|
||||
const stream = await this.client.chat.completions.create({
|
||||
model: "o3-mini",
|
||||
messages: [{ role: "developer", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
reasoning_effort: this.getModel().info.reasoningEffort,
|
||||
})
|
||||
|
||||
private async *handleStreamResponse(
|
||||
stream: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>
|
||||
): ApiStream {
|
||||
for await (const chunk of stream) {
|
||||
const delta = chunk.choices[0]?.delta
|
||||
if (delta?.content) {
|
||||
@@ -68,7 +124,6 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
|
||||
}
|
||||
}
|
||||
|
||||
// contains a null value except for the last chunk which contains the token usage statistics for the entire request
|
||||
if (chunk.usage) {
|
||||
yield {
|
||||
type: "usage",
|
||||
@@ -77,38 +132,6 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
default: {
|
||||
const stream = await this.client.chat.completions.create({
|
||||
model: this.getModel().id,
|
||||
// max_completion_tokens: this.getModel().info.maxTokens,
|
||||
temperature: 0,
|
||||
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
})
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const delta = chunk.choices[0]?.delta
|
||||
if (delta?.content) {
|
||||
yield {
|
||||
type: "text",
|
||||
text: delta.content,
|
||||
}
|
||||
}
|
||||
|
||||
// contains a null value except for the last chunk which contains the token usage statistics for the entire request
|
||||
if (chunk.usage) {
|
||||
yield {
|
||||
type: "usage",
|
||||
inputTokens: chunk.usage.prompt_tokens || 0,
|
||||
outputTokens: chunk.usage.completion_tokens || 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
getModel(): { id: OpenAiNativeModelId; info: ModelInfo } {
|
||||
@@ -125,32 +148,12 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
|
||||
const modelId = this.getModel().id
|
||||
let requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
|
||||
|
||||
switch (modelId) {
|
||||
case "o1":
|
||||
case "o1-preview":
|
||||
case "o1-mini":
|
||||
// o1 doesn't support non-1 temp
|
||||
requestOptions = {
|
||||
model: modelId,
|
||||
messages: [{ role: "user", content: prompt }],
|
||||
}
|
||||
break
|
||||
case "o3-mini":
|
||||
case "o3-mini-low":
|
||||
case "o3-mini-high":
|
||||
// o3 doesn't support non-1 temp
|
||||
requestOptions = {
|
||||
model: "o3-mini",
|
||||
messages: [{ role: "user", content: prompt }],
|
||||
reasoning_effort: this.getModel().info.reasoningEffort,
|
||||
}
|
||||
break
|
||||
default:
|
||||
requestOptions = {
|
||||
model: modelId,
|
||||
messages: [{ role: "user", content: prompt }],
|
||||
temperature: 0,
|
||||
}
|
||||
if (modelId.startsWith("o1")) {
|
||||
requestOptions = this.getO1CompletionOptions(modelId, prompt)
|
||||
} else if (modelId.startsWith("o3-mini")) {
|
||||
requestOptions = this.getO3CompletionOptions(modelId, prompt)
|
||||
} else {
|
||||
requestOptions = this.getDefaultCompletionOptions(modelId, prompt)
|
||||
}
|
||||
|
||||
const response = await this.client.chat.completions.create(requestOptions)
|
||||
@@ -162,4 +165,36 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private getO1CompletionOptions(
|
||||
modelId: string,
|
||||
prompt: string
|
||||
): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming {
|
||||
return {
|
||||
model: modelId,
|
||||
messages: [{ role: "user", content: prompt }],
|
||||
}
|
||||
}
|
||||
|
||||
private getO3CompletionOptions(
|
||||
modelId: string,
|
||||
prompt: string
|
||||
): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming {
|
||||
return {
|
||||
model: "o3-mini",
|
||||
messages: [{ role: "user", content: prompt }],
|
||||
reasoning_effort: this.getModel().info.reasoningEffort,
|
||||
}
|
||||
}
|
||||
|
||||
private getDefaultCompletionOptions(
|
||||
modelId: string,
|
||||
prompt: string
|
||||
): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming {
|
||||
return {
|
||||
model: modelId,
|
||||
messages: [{ role: "user", content: prompt }],
|
||||
temperature: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,37 +1,9 @@
|
||||
import { DiffStrategy, DiffResult } from "../types"
|
||||
import { addLineNumbers, everyLineHasLineNumbers, stripLineNumbers } from "../../../integrations/misc/extract-text"
|
||||
import { distance } from "fastest-levenshtein"
|
||||
|
||||
const BUFFER_LINES = 20 // Number of extra context lines to show before and after matches
|
||||
|
||||
function levenshteinDistance(a: string, b: string): number {
|
||||
const matrix: number[][] = []
|
||||
|
||||
// Initialize matrix
|
||||
for (let i = 0; i <= a.length; i++) {
|
||||
matrix[i] = [i]
|
||||
}
|
||||
for (let j = 0; j <= b.length; j++) {
|
||||
matrix[0][j] = j
|
||||
}
|
||||
|
||||
// Fill matrix
|
||||
for (let i = 1; i <= a.length; i++) {
|
||||
for (let j = 1; j <= b.length; j++) {
|
||||
if (a[i - 1] === b[j - 1]) {
|
||||
matrix[i][j] = matrix[i - 1][j - 1]
|
||||
} else {
|
||||
matrix[i][j] = Math.min(
|
||||
matrix[i - 1][j - 1] + 1, // substitution
|
||||
matrix[i][j - 1] + 1, // insertion
|
||||
matrix[i - 1][j] + 1, // deletion
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return matrix[a.length][b.length]
|
||||
}
|
||||
|
||||
function getSimilarity(original: string, search: string): number {
|
||||
if (search === "") {
|
||||
return 1
|
||||
@@ -47,12 +19,12 @@ function getSimilarity(original: string, search: string): number {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Calculate Levenshtein distance
|
||||
const distance = levenshteinDistance(normalizedOriginal, normalizedSearch)
|
||||
// Calculate Levenshtein distance using fastest-levenshtein's distance function
|
||||
const dist = distance(normalizedOriginal, normalizedSearch)
|
||||
|
||||
// Calculate similarity ratio (0 to 1, where 1 is exact match)
|
||||
// Calculate similarity ratio (0 to 1, where 1 is an exact match)
|
||||
const maxLength = Math.max(normalizedOriginal.length, normalizedSearch.length)
|
||||
return 1 - distance / maxLength
|
||||
return 1 - dist / maxLength
|
||||
}
|
||||
|
||||
export class SearchReplaceDiffStrategy implements DiffStrategy {
|
||||
|
||||
@@ -472,13 +472,11 @@ const PromptsView = ({ onDone }: PromptsViewProps) => {
|
||||
<div
|
||||
style={{
|
||||
display: "flex",
|
||||
gap: "16px",
|
||||
gap: "8px",
|
||||
alignItems: "center",
|
||||
marginBottom: "12px",
|
||||
overflowX: "auto",
|
||||
flexWrap: "nowrap",
|
||||
paddingBottom: "4px",
|
||||
paddingRight: "20px",
|
||||
flexWrap: "wrap",
|
||||
padding: "4px 0",
|
||||
}}>
|
||||
{modes.map((modeConfig) => {
|
||||
const isActive = mode === modeConfig.slug
|
||||
@@ -859,13 +857,11 @@ const PromptsView = ({ onDone }: PromptsViewProps) => {
|
||||
<div
|
||||
style={{
|
||||
display: "flex",
|
||||
gap: "16px",
|
||||
gap: "8px",
|
||||
alignItems: "center",
|
||||
marginBottom: "12px",
|
||||
overflowX: "auto",
|
||||
flexWrap: "nowrap",
|
||||
paddingBottom: "4px",
|
||||
paddingRight: "20px",
|
||||
flexWrap: "wrap",
|
||||
padding: "4px 0",
|
||||
}}>
|
||||
{Object.keys(supportPrompt.default).map((type) => (
|
||||
<button
|
||||
|
||||
Reference in New Issue
Block a user