merge(upstream): merge upstream changes keeping VSCode LM provider and adding Glama support

This commit is contained in:
RaySinner
2025-01-07 01:54:46 +03:00
29 changed files with 2040 additions and 280 deletions

View File

@@ -1,5 +1,17 @@
# Roo Cline Changelog
## [2.2.42]
- Add a Git section to the context mentions
## [2.2.41]
- Checkbox to disable streaming for OpenAI-compatible providers
## [2.2.40]
- Add the Glama provider (thanks @punkpeye!)
## [2.2.39]
- Add toggle to enable/disable the MCP-related sections of the system prompt (thanks @daniel-lxs!)

View File

@@ -6,6 +6,7 @@ A fork of Cline, an autonomous coding agent, with some additional experimental f
- Drag and drop images into chats
- Delete messages from chats
- @-mention Git commits to include their context in the chat
- "Enhance prompt" button (OpenRouter models only for now)
- Sound effects for feedback
- Option to use browsers of different sizes and adjust screenshot quality
@@ -16,7 +17,9 @@ A fork of Cline, an autonomous coding agent, with some additional experimental f
- Language selection for Cline's communication (English, Japanese, Spanish, French, German, and more)
- Support for DeepSeek V3
- Support for Amazon Nova and Meta 3, 3.1, and 3.2 models via AWS Bedrock
- Support for Glama
- Support for listing models from OpenAI-compatible providers
- Support for adding OpenAI-compatible models with or without streaming
- Per-tool MCP auto-approval
- Enable/disable individual MCP servers
- Enable/disable the MCP feature overall
@@ -135,7 +138,7 @@ Thanks to [Claude 3.5 Sonnet's agentic coding capabilities](https://www-cdn.ant
### Use any API and Model
Cline supports API providers like OpenRouter, Anthropic, OpenAI, Google Gemini, AWS Bedrock, Azure, and GCP Vertex. You can also configure any OpenAI compatible API, or use a local model through LM Studio/Ollama. If you're using OpenRouter, the extension fetches their latest model list, allowing you to use the newest models as soon as they're available.
Cline supports API providers like OpenRouter, Anthropic, Glama, OpenAI, Google Gemini, AWS Bedrock, Azure, and GCP Vertex. You can also configure any OpenAI compatible API, or use a local model through LM Studio/Ollama. If you're using OpenRouter, the extension fetches their latest model list, allowing you to use the newest models as soon as they're available.
The extension also keeps track of total tokens and API usage cost for the entire task loop and individual requests, keeping you informed of spend every step of the way.

6
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "roo-cline",
"version": "2.2.39",
"version": "2.2.42",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "roo-cline",
"version": "2.2.39",
"version": "2.2.42",
"dependencies": {
"@anthropic-ai/bedrock-sdk": "^0.10.2",
"@anthropic-ai/sdk": "^0.26.0",
@@ -31,7 +31,7 @@
"isbinaryfile": "^5.0.2",
"mammoth": "^1.8.0",
"monaco-vscode-textmate-theme-converter": "^0.1.7",
"openai": "^4.61.0",
"openai": "^4.73.1",
"os-name": "^6.0.0",
"p-wait-for": "^5.0.2",
"pdf-parse": "^1.1.1",

View File

@@ -3,7 +3,7 @@
"displayName": "Roo Cline",
"description": "A fork of Cline, an autonomous coding agent, with some added experimental configuration and automation features.",
"publisher": "RooVeterinaryInc",
"version": "2.2.39",
"version": "2.2.42",
"icon": "assets/icons/rocket.png",
"galleryBanner": {
"color": "#617A91",
@@ -231,7 +231,7 @@
"isbinaryfile": "^5.0.2",
"mammoth": "^1.8.0",
"monaco-vscode-textmate-theme-converter": "^0.1.7",
"openai": "^4.61.0",
"openai": "^4.73.1",
"os-name": "^6.0.0",
"p-wait-for": "^5.0.2",
"pdf-parse": "^1.1.1",

View File

@@ -1,4 +1,5 @@
import { Anthropic } from "@anthropic-ai/sdk"
import { GlamaHandler } from "./providers/glama"
import { ApiConfiguration, ModelInfo } from "../shared/api"
import { AnthropicHandler } from "./providers/anthropic"
import { AwsBedrockHandler } from "./providers/bedrock"
@@ -28,6 +29,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
switch (apiProvider) {
case "anthropic":
return new AnthropicHandler(options)
case "glama":
return new GlamaHandler(options)
case "openrouter":
return new OpenRouterHandler(options)
case "bedrock":

View File

@@ -0,0 +1,192 @@
import { OpenAiHandler } from '../openai'
import { ApiHandlerOptions, openAiModelInfoSaneDefaults } from '../../../shared/api'
import OpenAI, { AzureOpenAI } from 'openai'
import { Anthropic } from '@anthropic-ai/sdk'
// Mock dependencies
jest.mock('openai')
describe('OpenAiHandler', () => {
const mockOptions: ApiHandlerOptions = {
openAiApiKey: 'test-key',
openAiModelId: 'gpt-4',
openAiStreamingEnabled: true,
openAiBaseUrl: 'https://api.openai.com/v1'
}
beforeEach(() => {
jest.clearAllMocks()
})
test('constructor initializes with correct options', () => {
const handler = new OpenAiHandler(mockOptions)
expect(handler).toBeInstanceOf(OpenAiHandler)
expect(OpenAI).toHaveBeenCalledWith({
apiKey: mockOptions.openAiApiKey,
baseURL: mockOptions.openAiBaseUrl
})
})
test('constructor initializes Azure client when Azure URL is provided', () => {
const azureOptions: ApiHandlerOptions = {
...mockOptions,
openAiBaseUrl: 'https://example.azure.com',
azureApiVersion: '2023-05-15'
}
const handler = new OpenAiHandler(azureOptions)
expect(handler).toBeInstanceOf(OpenAiHandler)
expect(AzureOpenAI).toHaveBeenCalledWith({
baseURL: azureOptions.openAiBaseUrl,
apiKey: azureOptions.openAiApiKey,
apiVersion: azureOptions.azureApiVersion
})
})
test('getModel returns correct model info', () => {
const handler = new OpenAiHandler(mockOptions)
const result = handler.getModel()
expect(result).toEqual({
id: mockOptions.openAiModelId,
info: openAiModelInfoSaneDefaults
})
})
test('createMessage handles streaming correctly when enabled', async () => {
const handler = new OpenAiHandler({
...mockOptions,
openAiStreamingEnabled: true,
includeMaxTokens: true
})
const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
choices: [{
delta: {
content: 'test response'
}
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5
}
}
}
}
const mockCreate = jest.fn().mockResolvedValue(mockStream)
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
completions: { create: mockCreate }
} as any
const systemPrompt = 'test system prompt'
const messages: Anthropic.Messages.MessageParam[] = [
{ role: 'user', content: 'test message' }
]
const generator = handler.createMessage(systemPrompt, messages)
const chunks = []
for await (const chunk of generator) {
chunks.push(chunk)
}
expect(chunks).toEqual([
{
type: 'text',
text: 'test response'
},
{
type: 'usage',
inputTokens: 10,
outputTokens: 5
}
])
expect(mockCreate).toHaveBeenCalledWith({
model: mockOptions.openAiModelId,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: 'test message' }
],
temperature: 0,
stream: true,
stream_options: { include_usage: true },
max_tokens: openAiModelInfoSaneDefaults.maxTokens
})
})
test('createMessage handles non-streaming correctly when disabled', async () => {
const handler = new OpenAiHandler({
...mockOptions,
openAiStreamingEnabled: false
})
const mockResponse = {
choices: [{
message: {
content: 'test response'
}
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5
}
}
const mockCreate = jest.fn().mockResolvedValue(mockResponse)
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
completions: { create: mockCreate }
} as any
const systemPrompt = 'test system prompt'
const messages: Anthropic.Messages.MessageParam[] = [
{ role: 'user', content: 'test message' }
]
const generator = handler.createMessage(systemPrompt, messages)
const chunks = []
for await (const chunk of generator) {
chunks.push(chunk)
}
expect(chunks).toEqual([
{
type: 'text',
text: 'test response'
},
{
type: 'usage',
inputTokens: 10,
outputTokens: 5
}
])
expect(mockCreate).toHaveBeenCalledWith({
model: mockOptions.openAiModelId,
messages: [
{ role: 'user', content: systemPrompt },
{ role: 'user', content: 'test message' }
]
})
})
test('createMessage handles API errors', async () => {
const handler = new OpenAiHandler(mockOptions)
const mockStream = {
async *[Symbol.asyncIterator]() {
throw new Error('API Error')
}
}
const mockCreate = jest.fn().mockResolvedValue(mockStream)
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
completions: { create: mockCreate }
} as any
const generator = handler.createMessage('test', [])
await expect(generator.next()).rejects.toThrow('API Error')
})
})

132
src/api/providers/glama.ts Normal file
View File

@@ -0,0 +1,132 @@
import { Anthropic } from "@anthropic-ai/sdk"
import axios from "axios"
import OpenAI from "openai"
import { ApiHandler } from "../"
import { ApiHandlerOptions, ModelInfo, glamaDefaultModelId, glamaDefaultModelInfo } from "../../shared/api"
import { convertToOpenAiMessages } from "../transform/openai-format"
import { ApiStream } from "../transform/stream"
import delay from "delay"
export class GlamaHandler implements ApiHandler {
private options: ApiHandlerOptions
private client: OpenAI
constructor(options: ApiHandlerOptions) {
this.options = options
this.client = new OpenAI({
baseURL: "https://glama.ai/api/gateway/openai/v1",
apiKey: this.options.glamaApiKey,
})
}
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
// Convert Anthropic messages to OpenAI format
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
]
// this is specifically for claude models (some models may 'support prompt caching' automatically without this)
if (this.getModel().id.startsWith("anthropic/claude-3")) {
openAiMessages[0] = {
role: "system",
content: [
{
type: "text",
text: systemPrompt,
// @ts-ignore-next-line
cache_control: { type: "ephemeral" },
},
],
}
// Add cache_control to the last two user messages
// (note: this works because we only ever add one user message at a time,
// but if we added multiple we'd need to mark the user message before the last assistant message)
const lastTwoUserMessages = openAiMessages.filter((msg) => msg.role === "user").slice(-2)
lastTwoUserMessages.forEach((msg) => {
if (typeof msg.content === "string") {
msg.content = [{ type: "text", text: msg.content }]
}
if (Array.isArray(msg.content)) {
// NOTE: this is fine since env details will always be added at the end.
// but if it weren't there, and the user added a image_url type message,
// it would pop a text part before it and then move it after to the end.
let lastTextPart = msg.content.filter((part) => part.type === "text").pop()
if (!lastTextPart) {
lastTextPart = { type: "text", text: "..." }
msg.content.push(lastTextPart)
}
// @ts-ignore-next-line
lastTextPart["cache_control"] = { type: "ephemeral" }
}
})
}
// Required by Anthropic
// Other providers default to max tokens allowed.
let maxTokens: number | undefined
if (this.getModel().id.startsWith("anthropic/")) {
maxTokens = 8_192
}
const { data: completion, response } = await this.client.chat.completions.create({
model: this.getModel().id,
max_tokens: maxTokens,
temperature: 0,
messages: openAiMessages,
stream: true,
}).withResponse();
const completionRequestId = response.headers.get(
'x-completion-request-id',
);
for await (const chunk of completion) {
const delta = chunk.choices[0]?.delta
if (delta?.content) {
yield {
type: "text",
text: delta.content,
}
}
}
try {
const response = await axios.get(`https://glama.ai/api/gateway/v1/completion-requests/${completionRequestId}`, {
headers: {
Authorization: `Bearer ${this.options.glamaApiKey}`,
},
})
const completionRequest = response.data;
if (completionRequest.tokenUsage) {
yield {
type: "usage",
cacheWriteTokens: completionRequest.tokenUsage.cacheCreationInputTokens,
cacheReadTokens: completionRequest.tokenUsage.cacheReadInputTokens,
inputTokens: completionRequest.tokenUsage.promptTokens,
outputTokens: completionRequest.tokenUsage.completionTokens,
totalCost: parseFloat(completionRequest.totalCostUsd),
}
}
} catch (error) {
console.error("Error fetching Glama completion details", error)
}
}
getModel(): { id: string; info: ModelInfo } {
const modelId = this.options.glamaModelId
const modelInfo = this.options.glamaModelInfo
if (modelId && modelInfo) {
return { id: modelId, info: modelInfo }
}
return { id: glamaDefaultModelId, info: glamaDefaultModelInfo }
}
}

View File

@@ -32,42 +32,64 @@ export class OpenAiHandler implements ApiHandler {
}
}
// Include stream_options for OpenAI Compatible providers if the checkbox is checked
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
]
const modelInfo = this.getModel().info
const requestOptions: OpenAI.Chat.ChatCompletionCreateParams = {
model: this.options.openAiModelId ?? "",
messages: openAiMessages,
temperature: 0,
stream: true,
}
if (this.options.includeMaxTokens) {
requestOptions.max_tokens = modelInfo.maxTokens
}
const modelId = this.options.openAiModelId ?? ""
if (this.options.includeStreamOptions ?? true) {
requestOptions.stream_options = { include_usage: true }
}
if (this.options.openAiStreamingEnabled ?? true) {
const systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = {
role: "system",
content: systemPrompt
}
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model: modelId,
temperature: 0,
messages: [systemMessage, ...convertToOpenAiMessages(messages)],
stream: true as const,
stream_options: { include_usage: true },
}
if (this.options.includeMaxTokens) {
requestOptions.max_tokens = modelInfo.maxTokens
}
const stream = await this.client.chat.completions.create(requestOptions)
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta
if (delta?.content) {
yield {
type: "text",
text: delta.content,
const stream = await this.client.chat.completions.create(requestOptions)
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta
if (delta?.content) {
yield {
type: "text",
text: delta.content,
}
}
if (chunk.usage) {
yield {
type: "usage",
inputTokens: chunk.usage.prompt_tokens || 0,
outputTokens: chunk.usage.completion_tokens || 0,
}
}
}
if (chunk.usage) {
yield {
type: "usage",
inputTokens: chunk.usage.prompt_tokens || 0,
outputTokens: chunk.usage.completion_tokens || 0,
}
} else {
// o1 for instance doesnt support streaming, non-1 temp, or system prompt
const systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam = {
role: "user",
content: systemPrompt
}
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
model: modelId,
messages: [systemMessage, ...convertToOpenAiMessages(messages)],
}
const response = await this.client.chat.completions.create(requestOptions)
yield {
type: "text",
text: response.choices[0]?.message.content || "",
}
yield {
type: "usage",
inputTokens: response.usage?.prompt_tokens || 0,
outputTokens: response.usage?.completion_tokens || 0,
}
}
}

View File

@@ -12,7 +12,7 @@ import { ApiHandler, SingleCompletionHandler, buildApiHandler } from "../api"
import { ApiStream } from "../api/transform/stream"
import { DiffViewProvider } from "../integrations/editor/DiffViewProvider"
import { findToolName, formatContentBlockToMarkdown } from "../integrations/misc/export-markdown"
import { extractTextFromFile, addLineNumbers, stripLineNumbers, everyLineHasLineNumbers } from "../integrations/misc/extract-text"
import { extractTextFromFile, addLineNumbers, stripLineNumbers, everyLineHasLineNumbers, truncateOutput } from "../integrations/misc/extract-text"
import { TerminalManager } from "../integrations/terminal/TerminalManager"
import { UrlContentFetcher } from "../services/browser/UrlContentFetcher"
import { listFiles } from "../services/glob/list-files"
@@ -716,22 +716,6 @@ export class Cline {
}
})
const getFormattedOutput = async () => {
const { terminalOutputLineLimit } = await this.providerRef.deref()?.getState() ?? {}
const limit = terminalOutputLineLimit ?? 0
if (limit > 0 && lines.length > limit) {
const beforeLimit = Math.floor(limit * 0.2) // 20% of lines before
const afterLimit = limit - beforeLimit // remaining 80% after
return [
...lines.slice(0, beforeLimit),
`\n[...${lines.length - limit} lines omitted...]\n`,
...lines.slice(-afterLimit)
].join('\n')
}
return lines.join('\n')
}
let completed = false
process.once("completed", () => {
completed = true
@@ -750,7 +734,8 @@ export class Cline {
// grouping command_output messages despite any gaps anyways)
await delay(50)
const output = await getFormattedOutput()
const { terminalOutputLineLimit } = await this.providerRef.deref()?.getState() ?? {}
const output = truncateOutput(lines.join('\n'), terminalOutputLineLimit)
const result = output.trim()
if (userFeedback) {

View File

@@ -0,0 +1,155 @@
// Create mock vscode module before importing anything
const createMockUri = (scheme: string, path: string) => ({
scheme,
authority: '',
path,
query: '',
fragment: '',
fsPath: path,
with: jest.fn(),
toString: () => path,
toJSON: () => ({
scheme,
authority: '',
path,
query: '',
fragment: ''
})
})
const mockExecuteCommand = jest.fn()
const mockOpenExternal = jest.fn()
const mockShowErrorMessage = jest.fn()
const mockVscode = {
workspace: {
workspaceFolders: [{
uri: { fsPath: "/test/workspace" }
}]
},
window: {
showErrorMessage: mockShowErrorMessage,
showInformationMessage: jest.fn(),
showWarningMessage: jest.fn(),
createTextEditorDecorationType: jest.fn(),
createOutputChannel: jest.fn(),
createWebviewPanel: jest.fn(),
activeTextEditor: undefined
},
commands: {
executeCommand: mockExecuteCommand
},
env: {
openExternal: mockOpenExternal
},
Uri: {
parse: jest.fn((url: string) => createMockUri('https', url)),
file: jest.fn((path: string) => createMockUri('file', path))
},
Position: jest.fn(),
Range: jest.fn(),
TextEdit: jest.fn(),
WorkspaceEdit: jest.fn(),
DiagnosticSeverity: {
Error: 0,
Warning: 1,
Information: 2,
Hint: 3
}
}
// Mock modules
jest.mock('vscode', () => mockVscode)
jest.mock("../../../services/browser/UrlContentFetcher")
jest.mock("../../../utils/git")
// Now import the modules that use the mocks
import { parseMentions, openMention } from "../index"
import { UrlContentFetcher } from "../../../services/browser/UrlContentFetcher"
import * as git from "../../../utils/git"
describe("mentions", () => {
const mockCwd = "/test/workspace"
let mockUrlContentFetcher: UrlContentFetcher
beforeEach(() => {
jest.clearAllMocks()
// Create a mock instance with just the methods we need
mockUrlContentFetcher = {
launchBrowser: jest.fn().mockResolvedValue(undefined),
closeBrowser: jest.fn().mockResolvedValue(undefined),
urlToMarkdown: jest.fn().mockResolvedValue(""),
} as unknown as UrlContentFetcher
})
describe("parseMentions", () => {
it("should parse git commit mentions", async () => {
const commitHash = "abc1234"
const commitInfo = `abc1234 Fix bug in parser
Author: John Doe
Date: Mon Jan 5 23:50:06 2025 -0500
Detailed commit message with multiple lines
- Fixed parsing issue
- Added tests`
jest.mocked(git.getCommitInfo).mockResolvedValue(commitInfo)
const result = await parseMentions(
`Check out this commit @${commitHash}`,
mockCwd,
mockUrlContentFetcher
)
expect(result).toContain(`'${commitHash}' (see below for commit info)`)
expect(result).toContain(`<git_commit hash="${commitHash}">`)
expect(result).toContain(commitInfo)
})
it("should handle errors fetching git info", async () => {
const commitHash = "abc1234"
const errorMessage = "Failed to get commit info"
jest.mocked(git.getCommitInfo).mockRejectedValue(new Error(errorMessage))
const result = await parseMentions(
`Check out this commit @${commitHash}`,
mockCwd,
mockUrlContentFetcher
)
expect(result).toContain(`'${commitHash}' (see below for commit info)`)
expect(result).toContain(`<git_commit hash="${commitHash}">`)
expect(result).toContain(`Error fetching commit info: ${errorMessage}`)
})
})
describe("openMention", () => {
it("should handle file paths and problems", async () => {
await openMention("/path/to/file")
expect(mockExecuteCommand).not.toHaveBeenCalled()
expect(mockOpenExternal).not.toHaveBeenCalled()
expect(mockShowErrorMessage).toHaveBeenCalledWith("Could not open file!")
await openMention("problems")
expect(mockExecuteCommand).toHaveBeenCalledWith("workbench.actions.view.problems")
})
it("should handle URLs", async () => {
const url = "https://example.com"
await openMention(url)
const mockUri = mockVscode.Uri.parse(url)
expect(mockOpenExternal).toHaveBeenCalled()
const calledArg = mockOpenExternal.mock.calls[0][0]
expect(calledArg).toEqual(expect.objectContaining({
scheme: mockUri.scheme,
authority: mockUri.authority,
path: mockUri.path,
query: mockUri.query,
fragment: mockUri.fragment
}))
})
})
})

View File

@@ -2,27 +2,28 @@ import * as vscode from "vscode"
import * as path from "path"
import { openFile } from "../../integrations/misc/open-file"
import { UrlContentFetcher } from "../../services/browser/UrlContentFetcher"
import { mentionRegexGlobal } from "../../shared/context-mentions"
import { mentionRegexGlobal, formatGitSuggestion, type MentionSuggestion } from "../../shared/context-mentions"
import fs from "fs/promises"
import { extractTextFromFile } from "../../integrations/misc/extract-text"
import { isBinaryFile } from "isbinaryfile"
import { diagnosticsToProblemsString } from "../../integrations/diagnostics"
import { getCommitInfo, getWorkingState } from "../../utils/git"
export function openMention(mention?: string): void {
export async function openMention(mention?: string): Promise<void> {
if (!mention) {
return
}
const cwd = vscode.workspace.workspaceFolders?.map((folder) => folder.uri.fsPath).at(0)
if (!cwd) {
return
}
if (mention.startsWith("/")) {
const relPath = mention.slice(1)
const cwd = vscode.workspace.workspaceFolders?.map((folder) => folder.uri.fsPath).at(0)
if (!cwd) {
return
}
const absPath = path.resolve(cwd, relPath)
if (mention.endsWith("/")) {
vscode.commands.executeCommand("revealInExplorer", vscode.Uri.file(absPath))
// vscode.commands.executeCommand("vscode.openFolder", , { forceNewWindow: false }) opens in new window
} else {
openFile(absPath)
}
@@ -40,12 +41,16 @@ export async function parseMentions(text: string, cwd: string, urlContentFetcher
if (mention.startsWith("http")) {
return `'${mention}' (see below for site content)`
} else if (mention.startsWith("/")) {
const mentionPath = mention.slice(1) // Remove the leading '/'
const mentionPath = mention.slice(1)
return mentionPath.endsWith("/")
? `'${mentionPath}' (see below for folder content)`
: `'${mentionPath}' (see below for file content)`
} else if (mention === "problems") {
return `Workspace Problems (see below for diagnostics)`
} else if (mention === "git-changes") {
return `Working directory changes (see below for details)`
} else if (/^[a-f0-9]{7,40}$/.test(mention)) {
return `Git commit '${mention}' (see below for commit info)`
}
return match
})
@@ -99,6 +104,20 @@ export async function parseMentions(text: string, cwd: string, urlContentFetcher
} catch (error) {
parsedText += `\n\n<workspace_diagnostics>\nError fetching diagnostics: ${error.message}\n</workspace_diagnostics>`
}
} else if (mention === "git-changes") {
try {
const workingState = await getWorkingState(cwd)
parsedText += `\n\n<git_working_state>\n${workingState}\n</git_working_state>`
} catch (error) {
parsedText += `\n\n<git_working_state>\nError fetching working state: ${error.message}\n</git_working_state>`
}
} else if (/^[a-f0-9]{7,40}$/.test(mention)) {
try {
const commitInfo = await getCommitInfo(mention, cwd)
parsedText += `\n\n<git_commit hash="${mention}">\n${commitInfo}\n</git_commit>`
} catch (error) {
parsedText += `\n\n<git_commit hash="${mention}">\nError fetching commit info: ${error.message}\n</git_commit>`
}
}
}
@@ -137,7 +156,6 @@ async function getFileOrFolderContent(mentionPath: string, cwd: string): Promise
folderContent += `${linePrefix}${entry.name}\n`
const filePath = path.join(mentionPath, entry.name)
const absoluteFilePath = path.resolve(absPath, entry.name)
// const relativeFilePath = path.relative(cwd, absoluteFilePath);
fileContentPromises.push(
(async () => {
try {
@@ -154,7 +172,6 @@ async function getFileOrFolderContent(mentionPath: string, cwd: string): Promise
)
} else if (entry.isDirectory()) {
folderContent += `${linePrefix}${entry.name}/\n`
// not recursively getting folder contents
} else {
folderContent += `${linePrefix}${entry.name}\n`
}

View File

@@ -24,6 +24,7 @@ import { getNonce } from "./getNonce"
import { getUri } from "./getUri"
import { playSound, setSoundEnabled, setSoundVolume } from "../../utils/sound"
import { enhancePrompt } from "../../utils/enhance-prompt"
import { getCommitInfo, searchCommits, getWorkingState } from "../../utils/git"
/*
https://github.com/microsoft/vscode-webview-ui-toolkit-samples/blob/main/default/weather-webview/src/providers/WeatherViewProvider.ts
@@ -33,6 +34,7 @@ https://github.com/KumarVariable/vscode-extension-sidebar-html/blob/master/src/c
type SecretKey =
| "apiKey"
| "glamaApiKey"
| "openRouterApiKey"
| "awsAccessKey"
| "awsSecretKey"
@@ -45,6 +47,8 @@ type SecretKey =
type GlobalStateKey =
| "apiProvider"
| "apiModelId"
| "glamaModelId"
| "glamaModelInfo"
| "awsRegion"
| "awsUseCrossRegionInference"
| "vertexProjectId"
@@ -64,7 +68,7 @@ type GlobalStateKey =
| "lmStudioBaseUrl"
| "anthropicBaseUrl"
| "azureApiVersion"
| "includeStreamOptions"
| "openAiStreamingEnabled"
| "openRouterModelId"
| "openRouterModelInfo"
| "openRouterUseMiddleOutTransform"
@@ -85,6 +89,7 @@ type GlobalStateKey =
export const GlobalFileNames = {
apiConversationHistory: "api_conversation_history.json",
uiMessages: "ui_messages.json",
glamaModels: "glama_models.json",
openRouterModels: "openrouter_models.json",
mcpSettings: "cline_mcp_settings.json",
}
@@ -388,6 +393,24 @@ export class ClineProvider implements vscode.WebviewViewProvider {
}
}
})
this.readGlamaModels().then((glamaModels) => {
if (glamaModels) {
this.postMessageToWebview({ type: "glamaModels", glamaModels })
}
})
this.refreshGlamaModels().then(async (glamaModels) => {
if (glamaModels) {
// update model info in state (this needs to be done here since we don't want to update state while settings is open, and we may refresh models there)
const { apiConfiguration } = await this.getState()
if (apiConfiguration.glamaModelId) {
await this.updateGlobalState(
"glamaModelInfo",
glamaModels[apiConfiguration.glamaModelId],
)
await this.postStateToWebview()
}
}
})
break
case "newTask":
// Code that should run in response to the hello message command
@@ -406,6 +429,9 @@ export class ClineProvider implements vscode.WebviewViewProvider {
apiProvider,
apiModelId,
apiKey,
glamaModelId,
glamaModelInfo,
glamaApiKey,
openRouterApiKey,
awsAccessKey,
awsSecretKey,
@@ -425,7 +451,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
geminiApiKey,
openAiNativeApiKey,
azureApiVersion,
includeStreamOptions,
openAiStreamingEnabled,
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
@@ -434,6 +460,9 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.updateGlobalState("apiProvider", apiProvider)
await this.updateGlobalState("apiModelId", apiModelId)
await this.storeSecret("apiKey", apiKey)
await this.updateGlobalState("glamaModelId", glamaModelId)
await this.updateGlobalState("glamaModelInfo", glamaModelInfo)
await this.storeSecret("glamaApiKey", glamaApiKey)
await this.storeSecret("openRouterApiKey", openRouterApiKey)
await this.storeSecret("awsAccessKey", awsAccessKey)
await this.storeSecret("awsSecretKey", awsSecretKey)
@@ -454,7 +483,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.storeSecret("openAiNativeApiKey", openAiNativeApiKey)
await this.storeSecret("deepSeekApiKey", message.apiConfiguration.deepSeekApiKey)
await this.updateGlobalState("azureApiVersion", azureApiVersion)
await this.updateGlobalState("includeStreamOptions", includeStreamOptions)
await this.updateGlobalState("openAiStreamingEnabled", openAiStreamingEnabled)
await this.updateGlobalState("openRouterModelId", openRouterModelId)
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
@@ -534,6 +563,9 @@ export class ClineProvider implements vscode.WebviewViewProvider {
const vsCodeLmModels = await this.getVsCodeLmModels()
this.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
break
case "refreshGlamaModels":
await this.refreshGlamaModels()
break
case "refreshOpenRouterModels":
await this.refreshOpenRouterModels()
break
@@ -710,6 +742,24 @@ export class ClineProvider implements vscode.WebviewViewProvider {
}
}
break
case "searchCommits": {
const cwd = vscode.workspace.workspaceFolders?.map((folder) => folder.uri.fsPath).at(0)
if (cwd) {
try {
const commits = await searchCommits(message.query || "", cwd)
await this.postMessageToWebview({
type: "commitSearchResults",
commits
})
} catch (error) {
console.error("Error searching commits:", error)
vscode.window.showErrorMessage("Failed to search commits")
}
}
break
}
}
},
null,
@@ -851,6 +901,94 @@ export class ClineProvider implements vscode.WebviewViewProvider {
return cacheDir
}
async readGlamaModels(): Promise<Record<string, ModelInfo> | undefined> {
const glamaModelsFilePath = path.join(
await this.ensureCacheDirectoryExists(),
GlobalFileNames.glamaModels,
)
const fileExists = await fileExistsAtPath(glamaModelsFilePath)
if (fileExists) {
const fileContents = await fs.readFile(glamaModelsFilePath, "utf8")
return JSON.parse(fileContents)
}
return undefined
}
async refreshGlamaModels() {
const glamaModelsFilePath = path.join(
await this.ensureCacheDirectoryExists(),
GlobalFileNames.glamaModels,
)
let models: Record<string, ModelInfo> = {}
try {
const response = await axios.get("https://glama.ai/api/gateway/v1/models")
/*
{
"added": "2024-12-24T15:12:49.324Z",
"capabilities": [
"adjustable_safety_settings",
"caching",
"code_execution",
"function_calling",
"json_mode",
"json_schema",
"system_instructions",
"tuning",
"input:audio",
"input:image",
"input:text",
"input:video",
"output:text"
],
"id": "google-vertex/gemini-1.5-flash-002",
"maxTokensInput": 1048576,
"maxTokensOutput": 8192,
"pricePerToken": {
"cacheRead": null,
"cacheWrite": null,
"input": "0.000000075",
"output": "0.0000003"
}
}
*/
if (response.data) {
const rawModels = response.data;
const parsePrice = (price: any) => {
if (price) {
return parseFloat(price) * 1_000_000
}
return undefined
}
for (const rawModel of rawModels) {
const modelInfo: ModelInfo = {
maxTokens: rawModel.maxTokensOutput,
contextWindow: rawModel.maxTokensInput,
supportsImages: rawModel.capabilities?.includes("input:image"),
supportsComputerUse: rawModel.capabilities?.includes("computer_use"),
supportsPromptCache: rawModel.capabilities?.includes("caching"),
inputPrice: parsePrice(rawModel.pricePerToken?.input),
outputPrice: parsePrice(rawModel.pricePerToken?.output),
description: undefined,
cacheWritesPrice: parsePrice(rawModel.pricePerToken?.cacheWrite),
cacheReadsPrice: parsePrice(rawModel.pricePerToken?.cacheRead),
}
models[rawModel.id] = modelInfo
}
} else {
console.error("Invalid response from Glama API")
}
await fs.writeFile(glamaModelsFilePath, JSON.stringify(models))
console.log("Glama models fetched and saved", models)
} catch (error) {
console.error("Error fetching Glama models:", error)
}
await this.postMessageToWebview({ type: "glamaModels", glamaModels: models })
return models
}
async readOpenRouterModels(): Promise<Record<string, ModelInfo> | undefined> {
const openRouterModelsFilePath = path.join(
await this.ensureCacheDirectoryExists(),
@@ -1174,6 +1312,9 @@ export class ClineProvider implements vscode.WebviewViewProvider {
storedApiProvider,
apiModelId,
apiKey,
glamaApiKey,
glamaModelId,
glamaModelInfo,
openRouterApiKey,
awsAccessKey,
awsSecretKey,
@@ -1194,7 +1335,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openAiNativeApiKey,
deepSeekApiKey,
azureApiVersion,
includeStreamOptions,
openAiStreamingEnabled,
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
@@ -1222,6 +1363,9 @@ export class ClineProvider implements vscode.WebviewViewProvider {
this.getGlobalState("apiProvider") as Promise<ApiProvider | undefined>,
this.getGlobalState("apiModelId") as Promise<string | undefined>,
this.getSecret("apiKey") as Promise<string | undefined>,
this.getSecret("glamaApiKey") as Promise<string | undefined>,
this.getGlobalState("glamaModelId") as Promise<string | undefined>,
this.getGlobalState("glamaModelInfo") as Promise<ModelInfo | undefined>,
this.getSecret("openRouterApiKey") as Promise<string | undefined>,
this.getSecret("awsAccessKey") as Promise<string | undefined>,
this.getSecret("awsSecretKey") as Promise<string | undefined>,
@@ -1242,7 +1386,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
this.getSecret("openAiNativeApiKey") as Promise<string | undefined>,
this.getSecret("deepSeekApiKey") as Promise<string | undefined>,
this.getGlobalState("azureApiVersion") as Promise<string | undefined>,
this.getGlobalState("includeStreamOptions") as Promise<boolean | undefined>,
this.getGlobalState("openAiStreamingEnabled") as Promise<boolean | undefined>,
this.getGlobalState("openRouterModelId") as Promise<string | undefined>,
this.getGlobalState("openRouterModelInfo") as Promise<ModelInfo | undefined>,
this.getGlobalState("openRouterUseMiddleOutTransform") as Promise<boolean | undefined>,
@@ -1287,6 +1431,9 @@ export class ClineProvider implements vscode.WebviewViewProvider {
apiProvider,
apiModelId,
apiKey,
glamaApiKey,
glamaModelId,
glamaModelInfo,
openRouterApiKey,
awsAccessKey,
awsSecretKey,
@@ -1307,7 +1454,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openAiNativeApiKey,
deepSeekApiKey,
azureApiVersion,
includeStreamOptions,
openAiStreamingEnabled,
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
@@ -1426,6 +1573,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
}
const secretKeys: SecretKey[] = [
"apiKey",
"glamaApiKey",
"openRouterApiKey",
"awsAccessKey",
"awsSecretKey",

View File

@@ -1,109 +1,176 @@
import { addLineNumbers, everyLineHasLineNumbers, stripLineNumbers } from '../extract-text';
import { addLineNumbers, everyLineHasLineNumbers, stripLineNumbers, truncateOutput } from '../extract-text';
describe('addLineNumbers', () => {
it('should add line numbers starting from 1 by default', () => {
const input = 'line 1\nline 2\nline 3';
const expected = '1 | line 1\n2 | line 2\n3 | line 3';
expect(addLineNumbers(input)).toBe(expected);
});
it('should add line numbers starting from 1 by default', () => {
const input = 'line 1\nline 2\nline 3';
const expected = '1 | line 1\n2 | line 2\n3 | line 3';
expect(addLineNumbers(input)).toBe(expected);
});
it('should add line numbers starting from specified line number', () => {
const input = 'line 1\nline 2\nline 3';
const expected = '10 | line 1\n11 | line 2\n12 | line 3';
expect(addLineNumbers(input, 10)).toBe(expected);
});
it('should add line numbers starting from specified line number', () => {
const input = 'line 1\nline 2\nline 3';
const expected = '10 | line 1\n11 | line 2\n12 | line 3';
expect(addLineNumbers(input, 10)).toBe(expected);
});
it('should handle empty content', () => {
expect(addLineNumbers('')).toBe('1 | ');
expect(addLineNumbers('', 5)).toBe('5 | ');
});
it('should handle empty content', () => {
expect(addLineNumbers('')).toBe('1 | ');
expect(addLineNumbers('', 5)).toBe('5 | ');
});
it('should handle single line content', () => {
expect(addLineNumbers('single line')).toBe('1 | single line');
expect(addLineNumbers('single line', 42)).toBe('42 | single line');
});
it('should handle single line content', () => {
expect(addLineNumbers('single line')).toBe('1 | single line');
expect(addLineNumbers('single line', 42)).toBe('42 | single line');
});
it('should pad line numbers based on the highest line number', () => {
const input = 'line 1\nline 2';
// When starting from 99, highest line will be 100, so needs 3 spaces padding
const expected = ' 99 | line 1\n100 | line 2';
expect(addLineNumbers(input, 99)).toBe(expected);
});
it('should pad line numbers based on the highest line number', () => {
const input = 'line 1\nline 2';
// When starting from 99, highest line will be 100, so needs 3 spaces padding
const expected = ' 99 | line 1\n100 | line 2';
expect(addLineNumbers(input, 99)).toBe(expected);
});
});
describe('everyLineHasLineNumbers', () => {
it('should return true for content with line numbers', () => {
const input = '1 | line one\n2 | line two\n3 | line three';
expect(everyLineHasLineNumbers(input)).toBe(true);
});
it('should return true for content with line numbers', () => {
const input = '1 | line one\n2 | line two\n3 | line three';
expect(everyLineHasLineNumbers(input)).toBe(true);
});
it('should return true for content with padded line numbers', () => {
const input = ' 1 | line one\n 2 | line two\n 3 | line three';
expect(everyLineHasLineNumbers(input)).toBe(true);
});
it('should return true for content with padded line numbers', () => {
const input = ' 1 | line one\n 2 | line two\n 3 | line three';
expect(everyLineHasLineNumbers(input)).toBe(true);
});
it('should return false for content without line numbers', () => {
const input = 'line one\nline two\nline three';
expect(everyLineHasLineNumbers(input)).toBe(false);
});
it('should return false for content without line numbers', () => {
const input = 'line one\nline two\nline three';
expect(everyLineHasLineNumbers(input)).toBe(false);
});
it('should return false for mixed content', () => {
const input = '1 | line one\nline two\n3 | line three';
expect(everyLineHasLineNumbers(input)).toBe(false);
});
it('should return false for mixed content', () => {
const input = '1 | line one\nline two\n3 | line three';
expect(everyLineHasLineNumbers(input)).toBe(false);
});
it('should handle empty content', () => {
expect(everyLineHasLineNumbers('')).toBe(false);
});
it('should handle empty content', () => {
expect(everyLineHasLineNumbers('')).toBe(false);
});
it('should return false for content with pipe but no line numbers', () => {
const input = 'a | b\nc | d';
expect(everyLineHasLineNumbers(input)).toBe(false);
});
it('should return false for content with pipe but no line numbers', () => {
const input = 'a | b\nc | d';
expect(everyLineHasLineNumbers(input)).toBe(false);
});
});
describe('stripLineNumbers', () => {
it('should strip line numbers from content', () => {
const input = '1 | line one\n2 | line two\n3 | line three';
const expected = 'line one\nline two\nline three';
expect(stripLineNumbers(input)).toBe(expected);
});
it('should strip line numbers from content', () => {
const input = '1 | line one\n2 | line two\n3 | line three';
const expected = 'line one\nline two\nline three';
expect(stripLineNumbers(input)).toBe(expected);
});
it('should strip padded line numbers', () => {
const input = ' 1 | line one\n 2 | line two\n 3 | line three';
const expected = 'line one\nline two\nline three';
expect(stripLineNumbers(input)).toBe(expected);
});
it('should strip padded line numbers', () => {
const input = ' 1 | line one\n 2 | line two\n 3 | line three';
const expected = 'line one\nline two\nline three';
expect(stripLineNumbers(input)).toBe(expected);
});
it('should handle content without line numbers', () => {
const input = 'line one\nline two\nline three';
expect(stripLineNumbers(input)).toBe(input);
});
it('should handle content without line numbers', () => {
const input = 'line one\nline two\nline three';
expect(stripLineNumbers(input)).toBe(input);
});
it('should handle empty content', () => {
expect(stripLineNumbers('')).toBe('');
});
it('should handle empty content', () => {
expect(stripLineNumbers('')).toBe('');
});
it('should preserve content with pipe but no line numbers', () => {
const input = 'a | b\nc | d';
expect(stripLineNumbers(input)).toBe(input);
});
it('should preserve content with pipe but no line numbers', () => {
const input = 'a | b\nc | d';
expect(stripLineNumbers(input)).toBe(input);
});
it('should handle windows-style line endings', () => {
const input = '1 | line one\r\n2 | line two\r\n3 | line three';
const expected = 'line one\r\nline two\r\nline three';
expect(stripLineNumbers(input)).toBe(expected);
});
it('should handle windows-style line endings', () => {
const input = '1 | line one\r\n2 | line two\r\n3 | line three';
const expected = 'line one\r\nline two\r\nline three';
expect(stripLineNumbers(input)).toBe(expected);
});
it('should handle content with varying line number widths', () => {
const input = ' 1 | line one\n 10 | line two\n100 | line three';
const expected = 'line one\nline two\nline three';
expect(stripLineNumbers(input)).toBe(expected);
});
it('should preserve indentation after line numbers', () => {
const input = '1 | indented line\n2 | another indented';
const expected = ' indented line\n another indented';
expect(stripLineNumbers(input)).toBe(expected);
});
it('should handle content with varying line number widths', () => {
const input = ' 1 | line one\n 10 | line two\n100 | line three';
const expected = 'line one\nline two\nline three';
expect(stripLineNumbers(input)).toBe(expected);
});
});
describe('truncateOutput', () => {
it('returns original content when no line limit provided', () => {
const content = 'line1\nline2\nline3'
expect(truncateOutput(content)).toBe(content)
})
it('returns original content when lines are under limit', () => {
const content = 'line1\nline2\nline3'
expect(truncateOutput(content, 5)).toBe(content)
})
it('truncates content with 20/80 split when over limit', () => {
// Create 25 lines of content
const lines = Array.from({ length: 25 }, (_, i) => `line${i + 1}`)
const content = lines.join('\n')
// Set limit to 10 lines
const result = truncateOutput(content, 10)
// Should keep:
// - First 2 lines (20% of 10)
// - Last 8 lines (80% of 10)
// - Omission indicator in between
const expectedLines = [
'line1',
'line2',
'',
'[...15 lines omitted...]',
'',
'line18',
'line19',
'line20',
'line21',
'line22',
'line23',
'line24',
'line25'
]
expect(result).toBe(expectedLines.join('\n'))
})
it('handles empty content', () => {
expect(truncateOutput('', 10)).toBe('')
})
it('handles single line content', () => {
expect(truncateOutput('single line', 10)).toBe('single line')
})
it('handles windows-style line endings', () => {
// Create content with windows line endings
const lines = Array.from({ length: 15 }, (_, i) => `line${i + 1}`)
const content = lines.join('\r\n')
const result = truncateOutput(content, 5)
// Should keep first line (20% of 5 = 1) and last 4 lines (80% of 5 = 4)
// Split result by either \r\n or \n to normalize line endings
const resultLines = result.split(/\r?\n/)
const expectedLines = [
'line1',
'',
'[...10 lines omitted...]',
'',
'line12',
'line13',
'line14',
'line15'
]
expect(resultLines).toEqual(expectedLines)
})
})

View File

@@ -88,3 +88,37 @@ export function stripLineNumbers(content: string): string {
const lineEnding = content.includes('\r\n') ? '\r\n' : '\n'
return processedLines.join(lineEnding)
}
/**
* Truncates multi-line output while preserving context from both the beginning and end.
* When truncation is needed, it keeps 20% of the lines from the start and 80% from the end,
* with a clear indicator of how many lines were omitted in between.
*
* @param content The multi-line string to truncate
* @param lineLimit Optional maximum number of lines to keep. If not provided or 0, returns the original content
* @returns The truncated string with an indicator of omitted lines, or the original content if no truncation needed
*
* @example
* // With 10 line limit on 25 lines of content:
* // - Keeps first 2 lines (20% of 10)
* // - Keeps last 8 lines (80% of 10)
* // - Adds "[...15 lines omitted...]" in between
*/
export function truncateOutput(content: string, lineLimit?: number): string {
if (!lineLimit) {
return content
}
const lines = content.split('\n')
if (lines.length <= lineLimit) {
return content
}
const beforeLimit = Math.floor(lineLimit * 0.2) // 20% of lines before
const afterLimit = lineLimit - beforeLimit // remaining 80% after
return [
...lines.slice(0, beforeLimit),
`\n[...${lines.length - lineLimit} lines omitted...]\n`,
...lines.slice(-afterLimit)
].join('\n')
}

View File

@@ -3,26 +3,31 @@
import { ApiConfiguration, ModelInfo } from "./api"
import { HistoryItem } from "./HistoryItem"
import { McpServer } from "./mcp"
import { GitCommit } from "../utils/git"
// webview will hold state
export interface ExtensionMessage {
type:
| "action"
| "state"
| "selectedImages"
| "ollamaModels"
| "lmStudioModels"
| "vsCodeLmModels"
| "vsCodeLmApiAvailable"
| "requestVsCodeLmModels"
| "theme"
| "workspaceUpdated"
| "invoke"
| "partialMessage"
| "openRouterModels"
| "openAiModels"
| "mcpServers"
| "enhancedPrompt"
type:
| "action"
| "state"
| "selectedImages"
| "ollamaModels"
| "lmStudioModels"
| "vsCodeLmModels"
| "vsCodeLmApiAvailable"
| "requestVsCodeLmModels"
| "theme"
| "workspaceUpdated"
| "invoke"
| "partialMessage"
| "glamaModels"
| "openRouterModels"
| "openAiModels"
| "mcpServers"
| "enhancedPrompt"
| "commitSearchResults"
text?: string
action?:
| "chatButtonClicked"
@@ -38,9 +43,11 @@ type:
vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[]
filePaths?: string[]
partialMessage?: ClineMessage
glamaModels?: Record<string, ModelInfo>
openRouterModels?: Record<string, ModelInfo>
openAiModels?: string[]
mcpServers?: McpServer[]
commits?: GitCommit[]
}
export interface ExtensionState {

View File

@@ -51,6 +51,9 @@ export interface WebviewMessage {
| "deleteMessage"
| "terminalOutputLineLimit"
| "mcpEnabled"
| "refreshGlamaModels"
| "searchCommits"
text?: string
disabled?: boolean
askResponse?: ClineAskResponse
@@ -65,6 +68,7 @@ export interface WebviewMessage {
alwaysAllow?: boolean
dataUrls?: string[]
values?: Record<string, any>
query?: string
}
export type ClineAskResponse = "yesButtonClicked" | "noButtonClicked" | "messageResponse"

View File

@@ -2,6 +2,7 @@ import * as vscode from 'vscode';
export type ApiProvider =
| "anthropic"
| "glama"
| "openrouter"
| "bedrock"
| "vertex"
@@ -18,6 +19,9 @@ export interface ApiHandlerOptions {
apiKey?: string // anthropic
anthropicBaseUrl?: string
vsCodeLmModelSelector?: vscode.LanguageModelChatSelector
glamaModelId?: string
glamaModelInfo?: ModelInfo
glamaApiKey?: string
openRouterApiKey?: string
openRouterModelId?: string
openRouterModelInfo?: ModelInfo
@@ -41,7 +45,7 @@ export interface ApiHandlerOptions {
openAiNativeApiKey?: string
azureApiVersion?: string
openRouterUseMiddleOutTransform?: boolean
includeStreamOptions?: boolean
openAiStreamingEnabled?: boolean
setAzureApiVersion?: boolean
deepSeekBaseUrl?: string
deepSeekApiKey?: string
@@ -314,6 +318,23 @@ export const bedrockModels = {
},
} as const satisfies Record<string, ModelInfo>
// Glama
// https://glama.ai/models
export const glamaDefaultModelId = "anthropic/claude-3-5-sonnet"
export const glamaDefaultModelInfo: ModelInfo = {
maxTokens: 8192,
contextWindow: 200_000,
supportsImages: true,
supportsComputerUse: true,
supportsPromptCache: true,
inputPrice: 3.0,
outputPrice: 15.0,
cacheWritesPrice: 3.75,
cacheReadsPrice: 0.3,
description:
"The new Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: New Sonnet scores ~49% on SWE-Bench Verified, higher than the last best score, and without any fancy prompt scaffolding\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\n#multimodal\n\n_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/anthropic/claude-3.5-sonnet) variant._",
}
// OpenRouter
// https://openrouter.ai/models?order=newest&supported_parameters=tools
export const openRouterDefaultModelId = "anthropic/claude-3.5-sonnet:beta" // will always exist in openRouterModels

View File

@@ -7,42 +7,79 @@ Mention regex:
- **Regex Breakdown**:
- `/@`:
- **@**: The mention must start with the '@' symbol.
- **@**: The mention must start with the '@' symbol.
- `((?:\/|\w+:\/\/)[^\s]+?|problems\b)`:
- **Capturing Group (`(...)`)**: Captures the part of the string that matches one of the specified patterns.
- `(?:\/|\w+:\/\/)`:
- **Non-Capturing Group (`(?:...)`)**: Groups the alternatives without capturing them for back-referencing.
- `\/`:
- **Slash (`/`)**: Indicates that the mention is a file or folder path starting with a '/'.
- `|`: Logical OR.
- `\w+:\/\/`:
- **Protocol (`\w+://`)**: Matches URLs that start with a word character sequence followed by '://', such as 'http://', 'https://', 'ftp://', etc.
- `[^\s]+?`:
- **Non-Whitespace Characters (`[^\s]+`)**: Matches one or more characters that are not whitespace.
- **Non-Greedy (`+?`)**: Ensures the smallest possible match, preventing the inclusion of trailing punctuation.
- `|`: Logical OR.
- `problems\b`:
- **Exact Word ('problems')**: Matches the exact word 'problems'.
- **Word Boundary (`\b`)**: Ensures that 'problems' is matched as a whole word and not as part of another word (e.g., 'problematic').
- `((?:\/|\w+:\/\/)[^\s]+?|problems\b|git-changes\b)`:
- **Capturing Group (`(...)`)**: Captures the part of the string that matches one of the specified patterns.
- `(?:\/|\w+:\/\/)`:
- **Non-Capturing Group (`(?:...)`)**: Groups the alternatives without capturing them for back-referencing.
- `\/`:
- **Slash (`/`)**: Indicates that the mention is a file or folder path starting with a '/'.
- `|`: Logical OR.
- `\w+:\/\/`:
- **Protocol (`\w+://`)**: Matches URLs that start with a word character sequence followed by '://', such as 'http://', 'https://', 'ftp://', etc.
- `[^\s]+?`:
- **Non-Whitespace Characters (`[^\s]+`)**: Matches one or more characters that are not whitespace.
- **Non-Greedy (`+?`)**: Ensures the smallest possible match, preventing the inclusion of trailing punctuation.
- `|`: Logical OR.
- `problems\b`:
- **Exact Word ('problems')**: Matches the exact word 'problems'.
- **Word Boundary (`\b`)**: Ensures that 'problems' is matched as a whole word and not as part of another word (e.g., 'problematic').
- `|`: Logical OR.
- `problems\b`:
- **Exact Word ('git-changes')**: Matches the exact word 'git-changes'.
- **Word Boundary (`\b`)**: Ensures that 'git-changes' is matched as a whole word and not as part of another word.
- `(?=[.,;:!?]?(?=[\s\r\n]|$))`:
- **Positive Lookahead (`(?=...)`)**: Ensures that the match is followed by specific patterns without including them in the match.
- `[.,;:!?]?`:
- **Optional Punctuation (`[.,;:!?]?`)**: Matches zero or one of the specified punctuation marks.
- `(?=[\s\r\n]|$)`:
- **Nested Positive Lookahead (`(?=[\s\r\n]|$)`)**: Ensures that the punctuation (if present) is followed by a whitespace character, a line break, or the end of the string.
- **Positive Lookahead (`(?=...)`)**: Ensures that the match is followed by specific patterns without including them in the match.
- `[.,;:!?]?`:
- **Optional Punctuation (`[.,;:!?]?`)**: Matches zero or one of the specified punctuation marks.
- `(?=[\s\r\n]|$)`:
- **Nested Positive Lookahead (`(?=[\s\r\n]|$)`)**: Ensures that the punctuation (if present) is followed by a whitespace character, a line break, or the end of the string.
- **Summary**:
- The regex effectively matches:
- Mentions that are file or folder paths starting with '/' and containing any non-whitespace characters (including periods within the path).
- URLs that start with a protocol (like 'http://') followed by any non-whitespace characters (including query parameters).
- The exact word 'problems'.
- Mentions that are file or folder paths starting with '/' and containing any non-whitespace characters (including periods within the path).
- URLs that start with a protocol (like 'http://') followed by any non-whitespace characters (including query parameters).
- The exact word 'problems'.
- The exact word 'git-changes'.
- It ensures that any trailing punctuation marks (such as ',', '.', '!', etc.) are not included in the matched mention, allowing the punctuation to follow the mention naturally in the text.
- **Global Regex**:
- `mentionRegexGlobal`: Creates a global version of the `mentionRegex` to find all matches within a given string.
*/
export const mentionRegex = /@((?:\/|\w+:\/\/)[^\s]+?|problems\b)(?=[.,;:!?]?(?=[\s\r\n]|$))/
export const mentionRegex = /@((?:\/|\w+:\/\/)[^\s]+?|[a-f0-9]{7,40}\b|problems\b|git-changes\b)(?=[.,;:!?]?(?=[\s\r\n]|$))/
export const mentionRegexGlobal = new RegExp(mentionRegex.source, "g")
export interface MentionSuggestion {
type: 'file' | 'folder' | 'git' | 'problems'
label: string
description?: string
value: string
icon?: string
}
export interface GitMentionSuggestion extends MentionSuggestion {
type: 'git'
hash: string
shortHash: string
subject: string
author: string
date: string
}
export function formatGitSuggestion(commit: { hash: string; shortHash: string; subject: string; author: string; date: string }): GitMentionSuggestion {
return {
type: 'git',
label: commit.subject,
description: `${commit.shortHash} by ${commit.author} on ${commit.date}`,
value: commit.hash,
icon: '$(git-commit)', // VSCode git commit icon
hash: commit.hash,
shortHash: commit.shortHash,
subject: commit.subject,
author: commit.author,
date: commit.date
}
}

166
src/utils/git.ts Normal file
View File

@@ -0,0 +1,166 @@
import { exec } from "child_process"
import { promisify } from "util"
import { truncateOutput } from "../integrations/misc/extract-text"
const execAsync = promisify(exec)
const GIT_OUTPUT_LINE_LIMIT = 500
export interface GitCommit {
hash: string
shortHash: string
subject: string
author: string
date: string
}
async function checkGitRepo(cwd: string): Promise<boolean> {
try {
await execAsync('git rev-parse --git-dir', { cwd })
return true
} catch (error) {
return false
}
}
async function checkGitInstalled(): Promise<boolean> {
try {
await execAsync('git --version')
return true
} catch (error) {
return false
}
}
export async function searchCommits(query: string, cwd: string): Promise<GitCommit[]> {
try {
const isInstalled = await checkGitInstalled()
if (!isInstalled) {
console.error("Git is not installed")
return []
}
const isRepo = await checkGitRepo(cwd)
if (!isRepo) {
console.error("Not a git repository")
return []
}
// Search commits by hash or message, limiting to 10 results
const { stdout } = await execAsync(
`git log -n 10 --format="%H%n%h%n%s%n%an%n%ad" --date=short ` +
`--grep="${query}" --regexp-ignore-case`,
{ cwd }
)
let output = stdout
if (!output.trim() && /^[a-f0-9]+$/i.test(query)) {
// If no results from grep search and query looks like a hash, try searching by hash
const { stdout: hashStdout } = await execAsync(
`git log -n 10 --format="%H%n%h%n%s%n%an%n%ad" --date=short ` +
`--author-date-order ${query}`,
{ cwd }
).catch(() => ({ stdout: "" }))
if (!hashStdout.trim()) {
return []
}
output = hashStdout
}
const commits: GitCommit[] = []
const lines = output.trim().split("\n").filter(line => line !== "--")
for (let i = 0; i < lines.length; i += 5) {
commits.push({
hash: lines[i],
shortHash: lines[i + 1],
subject: lines[i + 2],
author: lines[i + 3],
date: lines[i + 4]
})
}
return commits
} catch (error) {
console.error("Error searching commits:", error)
return []
}
}
export async function getCommitInfo(hash: string, cwd: string): Promise<string> {
try {
const isInstalled = await checkGitInstalled()
if (!isInstalled) {
return "Git is not installed"
}
const isRepo = await checkGitRepo(cwd)
if (!isRepo) {
return "Not a git repository"
}
// Get commit info, stats, and diff separately
const { stdout: info } = await execAsync(
`git show --format="%H%n%h%n%s%n%an%n%ad%n%b" --no-patch ${hash}`,
{ cwd }
)
const [fullHash, shortHash, subject, author, date, body] = info.trim().split('\n')
const { stdout: stats } = await execAsync(
`git show --stat --format="" ${hash}`,
{ cwd }
)
const { stdout: diff } = await execAsync(
`git show --format="" ${hash}`,
{ cwd }
)
const summary = [
`Commit: ${shortHash} (${fullHash})`,
`Author: ${author}`,
`Date: ${date}`,
`\nMessage: ${subject}`,
body ? `\nDescription:\n${body}` : '',
'\nFiles Changed:',
stats.trim(),
'\nFull Changes:'
].join('\n')
const output = summary + '\n\n' + diff.trim()
return truncateOutput(output, GIT_OUTPUT_LINE_LIMIT)
} catch (error) {
console.error("Error getting commit info:", error)
return `Failed to get commit info: ${error instanceof Error ? error.message : String(error)}`
}
}
export async function getWorkingState(cwd: string): Promise<string> {
try {
const isInstalled = await checkGitInstalled()
if (!isInstalled) {
return "Git is not installed"
}
const isRepo = await checkGitRepo(cwd)
if (!isRepo) {
return "Not a git repository"
}
// Get status of working directory
const { stdout: status } = await execAsync('git status --short', { cwd })
if (!status.trim()) {
return "No changes in working directory"
}
// Get all changes (both staged and unstaged) compared to HEAD
const { stdout: diff } = await execAsync('git diff HEAD', { cwd })
const lineLimit = GIT_OUTPUT_LINE_LIMIT
const output = `Working directory changes:\n\n${status}\n\n${diff}`.trim()
return truncateOutput(output, lineLimit)
} catch (error) {
console.error("Error getting working state:", error)
return `Failed to get working state: ${error instanceof Error ? error.message : String(error)}`
}
}

View File

@@ -12,8 +12,8 @@ import {
import { MAX_IMAGES_PER_MESSAGE } from "./ChatView"
import ContextMenu from "./ContextMenu"
import Thumbnails from "../common/Thumbnails"
import { vscode } from "../../utils/vscode"
import { WebviewMessage } from "../../../../src/shared/WebviewMessage"
interface ChatTextAreaProps {
inputValue: string
@@ -46,6 +46,7 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
) => {
const { filePaths, apiConfiguration } = useExtensionState()
const [isTextAreaFocused, setIsTextAreaFocused] = useState(false)
const [gitCommits, setGitCommits] = useState<any[]>([])
// Handle enhanced prompt response
useEffect(() => {
@@ -54,6 +55,15 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
if (message.type === 'enhancedPrompt' && message.text) {
setInputValue(message.text)
setIsEnhancingPrompt(false)
} else if (message.type === 'commitSearchResults') {
const commits = message.commits.map((commit: any) => ({
type: ContextMenuOptionType.Git,
value: commit.hash,
label: commit.subject,
description: `${commit.shortHash} by ${commit.author} on ${commit.date}`,
icon: "$(git-commit)"
}))
setGitCommits(commits)
}
}
window.addEventListener('message', messageHandler)
@@ -73,29 +83,40 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
const [justDeletedSpaceAfterMention, setJustDeletedSpaceAfterMention] = useState(false)
const [intendedCursorPosition, setIntendedCursorPosition] = useState<number | null>(null)
const contextMenuContainerRef = useRef<HTMLDivElement>(null)
const [isEnhancingPrompt, setIsEnhancingPrompt] = useState(false)
// Fetch git commits when Git is selected or when typing a hash
useEffect(() => {
if (selectedType === ContextMenuOptionType.Git || /^[a-f0-9]+$/i.test(searchQuery)) {
const message: WebviewMessage = {
type: "searchCommits",
query: searchQuery || ""
} as const
vscode.postMessage(message)
}
}, [selectedType, searchQuery])
const handleEnhancePrompt = useCallback(() => {
if (!textAreaDisabled) {
const trimmedInput = inputValue.trim()
if (trimmedInput) {
setIsEnhancingPrompt(true)
const message = {
type: "enhancePrompt" as const,
text: trimmedInput,
}
vscode.postMessage(message)
} else {
const promptDescription = "The 'Enhance Prompt' button helps improve your prompt by providing additional context, clarification, or rephrasing. Try typing a prompt in here and clicking the button again to see how it works."
setInputValue(promptDescription)
}
}
if (!textAreaDisabled) {
const trimmedInput = inputValue.trim()
if (trimmedInput) {
setIsEnhancingPrompt(true)
const message = {
type: "enhancePrompt" as const,
text: trimmedInput,
}
vscode.postMessage(message)
} else {
const promptDescription = "The 'Enhance Prompt' button helps improve your prompt by providing additional context, clarification, or rephrasing. Try typing a prompt in here and clicking the button again to see how it works."
setInputValue(promptDescription)
}
}
}, [inputValue, textAreaDisabled, setInputValue])
const queryItems = useMemo(() => {
return [
{ type: ContextMenuOptionType.Problems, value: "problems" },
...gitCommits,
...filePaths
.map((file) => "/" + file)
.map((path) => ({
@@ -103,7 +124,7 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
value: path,
})),
]
}, [filePaths])
}, [filePaths, gitCommits])
useEffect(() => {
const handleClickOutside = (event: MouseEvent) => {
@@ -130,7 +151,9 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
return
}
if (type === ContextMenuOptionType.File || type === ContextMenuOptionType.Folder) {
if (type === ContextMenuOptionType.File ||
type === ContextMenuOptionType.Folder ||
type === ContextMenuOptionType.Git) {
if (!value) {
setSelectedType(type)
setSearchQuery("")
@@ -149,6 +172,8 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
insertValue = value || ""
} else if (type === ContextMenuOptionType.Problems) {
insertValue = "problems"
} else if (type === ContextMenuOptionType.Git) {
insertValue = value || ""
}
const { newValue, mentionIndex } = insertMention(
@@ -161,7 +186,6 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
const newCursorPosition = newValue.indexOf(" ", mentionIndex + insertValue.length) + 1
setCursorPosition(newCursorPosition)
setIntendedCursorPosition(newCursorPosition)
// textAreaRef.current.focus()
// scroll to cursor
setTimeout(() => {
@@ -179,7 +203,6 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
(event: React.KeyboardEvent<HTMLTextAreaElement>) => {
if (showContextMenu) {
if (event.key === "Escape") {
// event.preventDefault()
setSelectedType(null)
setSelectedMenuIndex(3) // File by default
return
@@ -356,19 +379,17 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
setShowContextMenu(false)
// Scroll to new cursor position
// https://stackoverflow.com/questions/29899364/how-do-you-scroll-to-the-position-of-the-cursor-in-a-textarea/40951875#40951875
setTimeout(() => {
if (textAreaRef.current) {
textAreaRef.current.blur()
textAreaRef.current.focus()
}
}, 0)
// NOTE: callbacks dont utilize return function to cleanup, but it's fine since this timeout immediately executes and will be cleaned up by the browser (no chance component unmounts before it executes)
return
}
const acceptedTypes = ["png", "jpeg", "webp"] // supported by anthropic and openrouter (jpg is just a file extension but the image will be recognized as jpeg)
const acceptedTypes = ["png", "jpeg", "webp"]
const imageItems = Array.from(items).filter((item) => {
const [type, subtype] = item.type.split("/")
return type === "image" && acceptedTypes.includes(subtype)
@@ -397,7 +418,6 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
})
const imageDataArray = await Promise.all(imagePromises)
const dataUrls = imageDataArray.filter((dataUrl): dataUrl is string => dataUrl !== null)
//.map((dataUrl) => dataUrl.split(",")[1]) // strip the mime type prefix, sharp doesn't need it
if (dataUrls.length > 0) {
setSelectedImages((prevImages) => [...prevImages, ...dataUrls].slice(0, MAX_IMAGES_PER_MESSAGE))
} else {
@@ -602,7 +622,6 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
boxSizing: "border-box",
backgroundColor: "transparent",
color: "var(--vscode-input-foreground)",
//border: "1px solid var(--vscode-input-border)",
borderRadius: 2,
fontFamily: "var(--vscode-font-family)",
fontSize: "var(--vscode-editor-font-size)",
@@ -610,18 +629,12 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
resize: "none",
overflowX: "hidden",
overflowY: "scroll",
// Since we have maxRows, when text is long enough it starts to overflow the bottom padding, appearing behind the thumbnails. To fix this, we use a transparent border to push the text up instead. (https://stackoverflow.com/questions/42631947/maintaining-a-padding-inside-of-text-area/52538410#52538410)
// borderTop: "9px solid transparent",
borderLeft: 0,
borderRight: 0,
borderTop: 0,
borderBottom: `${thumbnailsHeight + 6}px solid transparent`,
borderColor: "transparent",
padding: "9px 9px 25px 9px",
// borderRight: "54px solid transparent",
// borderLeft: "9px solid transparent", // NOTE: react-textarea-autosize doesn't calculate correct height when using borderLeft/borderRight so we need to use horizontal padding instead
// Instead of using boxShadow, we use a div with a border to better replicate the behavior when the textarea is focused
// boxShadow: "0px 0px 0px 1px var(--vscode-input-border)",
cursor: textAreaDisabled ? "not-allowed" : undefined,
flex: 1,
zIndex: 1,
@@ -645,21 +658,21 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
)}
<div className="button-row" style={{ position: "absolute", right: 20, display: "flex", alignItems: "center", height: 31, bottom: 8, zIndex: 2, justifyContent: "flex-end" }}>
<span style={{ display: "flex", alignItems: "center", gap: 12 }}>
{apiConfiguration?.apiProvider === "openrouter" && (
<div style={{ display: "flex", alignItems: "center" }}>
{isEnhancingPrompt && <span style={{ marginRight: 10, color: "var(--vscode-input-foreground)", opacity: 0.5 }}>Enhancing prompt...</span>}
<span
role="button"
aria-label="enhance prompt"
data-testid="enhance-prompt-button"
className={`input-icon-button ${textAreaDisabled ? "disabled" : ""} codicon codicon-sparkle`}
onClick={() => !textAreaDisabled && handleEnhancePrompt()}
style={{ fontSize: 16.5 }}
/>
</div>
)}
<span className={`input-icon-button ${shouldDisableImages ? "disabled" : ""} codicon codicon-device-camera`} onClick={() => !shouldDisableImages && onSelectImages()} style={{ fontSize: 16.5 }} />
<span className={`input-icon-button ${textAreaDisabled ? "disabled" : ""} codicon codicon-send`} onClick={() => !textAreaDisabled && onSend()} style={{ fontSize: 15 }} />
{apiConfiguration?.apiProvider === "openrouter" && (
<div style={{ display: "flex", alignItems: "center" }}>
{isEnhancingPrompt && <span style={{ marginRight: 10, color: "var(--vscode-input-foreground)", opacity: 0.5 }}>Enhancing prompt...</span>}
<span
role="button"
aria-label="enhance prompt"
data-testid="enhance-prompt-button"
className={`input-icon-button ${textAreaDisabled ? "disabled" : ""} codicon codicon-sparkle`}
onClick={() => !textAreaDisabled && handleEnhancePrompt()}
style={{ fontSize: 16.5 }}
/>
</div>
)}
<span className={`input-icon-button ${shouldDisableImages ? "disabled" : ""} codicon codicon-device-camera`} onClick={() => !shouldDisableImages && onSelectImages()} style={{ fontSize: 16.5 }} />
<span className={`input-icon-button ${textAreaDisabled ? "disabled" : ""} codicon codicon-send`} onClick={() => !textAreaDisabled && onSend()} style={{ fontSize: 15 }} />
</span>
</div>
</div>

View File

@@ -52,6 +52,26 @@ const ContextMenu: React.FC<ContextMenuProps> = ({
return <span>Paste URL to fetch contents</span>
case ContextMenuOptionType.NoResults:
return <span>No results found</span>
case ContextMenuOptionType.Git:
if (option.value) {
return (
<div style={{ display: 'flex', flexDirection: 'column', gap: 0 }}>
<span style={{ lineHeight: '1.2' }}>{option.label}</span>
<span style={{
fontSize: '0.85em',
opacity: 0.7,
whiteSpace: 'nowrap',
overflow: 'hidden',
textOverflow: 'ellipsis',
lineHeight: '1.2'
}}>
{option.description}
</span>
</div>
)
} else {
return <span>Git Commits</span>
}
case ContextMenuOptionType.File:
case ContextMenuOptionType.Folder:
if (option.value) {
@@ -87,6 +107,8 @@ const ContextMenu: React.FC<ContextMenuProps> = ({
return "warning"
case ContextMenuOptionType.URL:
return "link"
case ContextMenuOptionType.Git:
return "git-commit"
case ContextMenuOptionType.NoResults:
return "info"
default:
@@ -121,7 +143,6 @@ const ContextMenu: React.FC<ContextMenuProps> = ({
maxHeight: "200px",
overflowY: "auto",
}}>
{/* Can't use virtuoso since it requires fixed height and menu height is dynamic based on # of items */}
{filteredOptions.map((option, index) => (
<div
key={`${option.type}-${option.value || index}`}
@@ -147,24 +168,33 @@ const ContextMenu: React.FC<ContextMenuProps> = ({
flex: 1,
minWidth: 0,
overflow: "hidden",
paddingTop: 0
}}>
<i
className={`codicon codicon-${getIconForOption(option)}`}
style={{ marginRight: "8px", flexShrink: 0, fontSize: "14px" }}
style={{
marginRight: "6px",
flexShrink: 0,
fontSize: "14px",
marginTop: 0
}}
/>
{renderOptionContent(option)}
</div>
{(option.type === ContextMenuOptionType.File || option.type === ContextMenuOptionType.Folder) &&
!option.value && (
<i
className="codicon codicon-chevron-right"
style={{ fontSize: "14px", flexShrink: 0, marginLeft: 8 }}
/>
)}
{((option.type === ContextMenuOptionType.File ||
option.type === ContextMenuOptionType.Folder ||
option.type === ContextMenuOptionType.Git) &&
!option.value) && (
<i
className="codicon codicon-chevron-right"
style={{ fontSize: "14px", flexShrink: 0, marginLeft: 8 }}
/>
)}
{(option.type === ContextMenuOptionType.Problems ||
((option.type === ContextMenuOptionType.File ||
option.type === ContextMenuOptionType.Folder) &&
option.value)) && (
((option.type === ContextMenuOptionType.File ||
option.type === ContextMenuOptionType.Folder ||
option.type === ContextMenuOptionType.Git) &&
option.value)) && (
<i
className="codicon codicon-add"
style={{ fontSize: "14px", flexShrink: 0, marginLeft: 8 }}

View File

@@ -21,6 +21,8 @@ import {
deepSeekModels,
geminiDefaultModelId,
geminiModels,
glamaDefaultModelId,
glamaDefaultModelInfo,
openAiModelInfoSaneDefaults,
openAiNativeDefaultModelId,
openAiNativeModels,
@@ -38,6 +40,7 @@ import OpenRouterModelPicker, {
OPENROUTER_MODEL_PICKER_Z_INDEX,
} from "./OpenRouterModelPicker"
import OpenAiModelPicker from "./OpenAiModelPicker"
import GlamaModelPicker from "./GlamaModelPicker"
interface ApiOptionsProps {
showModelOptions: boolean
@@ -141,6 +144,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
<VSCodeOption value="openai">OpenAI Compatible</VSCodeOption>
<VSCodeOption value="vertex">GCP Vertex AI</VSCodeOption>
<VSCodeOption value="bedrock">AWS Bedrock</VSCodeOption>
<VSCodeOption value="glama">Glama</VSCodeOption>
<VSCodeOption value="lmstudio">LM Studio</VSCodeOption>
<VSCodeOption value="ollama">Ollama</VSCodeOption>
<VSCodeOption value="vscode-lm">VS Code LM API</VSCodeOption>
@@ -198,6 +202,34 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
</div>
)}
{selectedProvider === "glama" && (
<div>
<VSCodeTextField
value={apiConfiguration?.glamaApiKey || ""}
style={{ width: "100%" }}
type="password"
onInput={handleInputChange("glamaApiKey")}
placeholder="Enter API Key...">
<span style={{ fontWeight: 500 }}>Glama API Key</span>
</VSCodeTextField>
{!apiConfiguration?.glamaApiKey && (
<VSCodeLink
href="https://glama.ai/settings/api-keys"
style={{ display: "inline", fontSize: "inherit" }}>
You can get an Glama API key by signing up here.
</VSCodeLink>
)}
<p
style={{
fontSize: "12px",
marginTop: "5px",
color: "var(--vscode-descriptionForeground)",
}}>
This key is stored locally and only used to make API requests from this extension.
</p>
</div>
)}
{selectedProvider === "openai-native" && (
<div>
<VSCodeTextField
@@ -450,21 +482,16 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
<OpenAiModelPicker />
<div style={{ display: 'flex', alignItems: 'center' }}>
<VSCodeCheckbox
checked={apiConfiguration?.includeStreamOptions ?? true}
checked={apiConfiguration?.openAiStreamingEnabled ?? true}
onChange={(e: any) => {
const isChecked = e.target.checked
setApiConfiguration({
...apiConfiguration,
includeStreamOptions: isChecked
openAiStreamingEnabled: isChecked
})
}}>
Include stream options
Enable streaming
</VSCodeCheckbox>
<span
className="codicon codicon-info"
title="Stream options are for { include_usage: true }. Some providers may not support this option."
style={{ marginLeft: '5px', cursor: 'help' }}
></span>
</div>
<VSCodeCheckbox
checked={azureApiVersionSelected}
@@ -715,9 +742,12 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
</p>
)}
{selectedProvider === "glama" && showModelOptions && <GlamaModelPicker />}
{selectedProvider === "openrouter" && showModelOptions && <OpenRouterModelPicker />}
{selectedProvider !== "openrouter" &&
{selectedProvider !== "glama" &&
selectedProvider !== "openrouter" &&
selectedProvider !== "openai" &&
selectedProvider !== "ollama" &&
selectedProvider !== "lmstudio" &&
@@ -921,6 +951,12 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
return getProviderData(deepSeekModels, deepSeekDefaultModelId)
case "openai-native":
return getProviderData(openAiNativeModels, openAiNativeDefaultModelId)
case "glama":
return {
selectedProvider: provider,
selectedModelId: apiConfiguration?.glamaModelId || glamaDefaultModelId,
selectedModelInfo: apiConfiguration?.glamaModelInfo || glamaDefaultModelInfo,
}
case "openrouter":
return {
selectedProvider: provider,

View File

@@ -0,0 +1,396 @@
import { VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react"
import Fuse from "fuse.js"
import React, { KeyboardEvent, memo, useEffect, useMemo, useRef, useState } from "react"
import { useRemark } from "react-remark"
import { useMount } from "react-use"
import styled from "styled-components"
import { glamaDefaultModelId } from "../../../../src/shared/api"
import { useExtensionState } from "../../context/ExtensionStateContext"
import { vscode } from "../../utils/vscode"
import { highlight } from "../history/HistoryView"
import { ModelInfoView, normalizeApiConfiguration } from "./ApiOptions"
const GlamaModelPicker: React.FC = () => {
const { apiConfiguration, setApiConfiguration, glamaModels } = useExtensionState()
const [searchTerm, setSearchTerm] = useState(apiConfiguration?.glamaModelId || glamaDefaultModelId)
const [isDropdownVisible, setIsDropdownVisible] = useState(false)
const [selectedIndex, setSelectedIndex] = useState(-1)
const dropdownRef = useRef<HTMLDivElement>(null)
const itemRefs = useRef<(HTMLDivElement | null)[]>([])
const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false)
const dropdownListRef = useRef<HTMLDivElement>(null)
const handleModelChange = (newModelId: string) => {
// could be setting invalid model id/undefined info but validation will catch it
setApiConfiguration({
...apiConfiguration,
glamaModelId: newModelId,
glamaModelInfo: glamaModels[newModelId],
})
setSearchTerm(newModelId)
}
const { selectedModelId, selectedModelInfo } = useMemo(() => {
return normalizeApiConfiguration(apiConfiguration)
}, [apiConfiguration])
useMount(() => {
vscode.postMessage({ type: "refreshGlamaModels" })
})
useEffect(() => {
const handleClickOutside = (event: MouseEvent) => {
if (dropdownRef.current && !dropdownRef.current.contains(event.target as Node)) {
setIsDropdownVisible(false)
}
}
document.addEventListener("mousedown", handleClickOutside)
return () => {
document.removeEventListener("mousedown", handleClickOutside)
}
}, [])
const modelIds = useMemo(() => {
return Object.keys(glamaModels).sort((a, b) => a.localeCompare(b))
}, [glamaModels])
const searchableItems = useMemo(() => {
return modelIds.map((id) => ({
id,
html: id,
}))
}, [modelIds])
const fuse = useMemo(() => {
return new Fuse(searchableItems, {
keys: ["html"], // highlight function will update this
threshold: 0.6,
shouldSort: true,
isCaseSensitive: false,
ignoreLocation: false,
includeMatches: true,
minMatchCharLength: 1,
})
}, [searchableItems])
const modelSearchResults = useMemo(() => {
let results: { id: string; html: string }[] = searchTerm
? highlight(fuse.search(searchTerm), "model-item-highlight")
: searchableItems
// results.sort((a, b) => a.id.localeCompare(b.id)) NOTE: sorting like this causes ids in objects to be reordered and mismatched
return results
}, [searchableItems, searchTerm, fuse])
const handleKeyDown = (event: KeyboardEvent<HTMLInputElement>) => {
if (!isDropdownVisible) return
switch (event.key) {
case "ArrowDown":
event.preventDefault()
setSelectedIndex((prev) => (prev < modelSearchResults.length - 1 ? prev + 1 : prev))
break
case "ArrowUp":
event.preventDefault()
setSelectedIndex((prev) => (prev > 0 ? prev - 1 : prev))
break
case "Enter":
event.preventDefault()
if (selectedIndex >= 0 && selectedIndex < modelSearchResults.length) {
handleModelChange(modelSearchResults[selectedIndex].id)
setIsDropdownVisible(false)
}
break
case "Escape":
setIsDropdownVisible(false)
setSelectedIndex(-1)
break
}
}
const hasInfo = useMemo(() => {
return modelIds.some((id) => id.toLowerCase() === searchTerm.toLowerCase())
}, [modelIds, searchTerm])
useEffect(() => {
setSelectedIndex(-1)
if (dropdownListRef.current) {
dropdownListRef.current.scrollTop = 0
}
}, [searchTerm])
useEffect(() => {
if (selectedIndex >= 0 && itemRefs.current[selectedIndex]) {
itemRefs.current[selectedIndex]?.scrollIntoView({
block: "nearest",
behavior: "smooth",
})
}
}, [selectedIndex])
return (
<>
<style>
{`
.model-item-highlight {
background-color: var(--vscode-editor-findMatchHighlightBackground);
color: inherit;
}
`}
</style>
<div>
<label htmlFor="model-search">
<span style={{ fontWeight: 500 }}>Model</span>
</label>
<DropdownWrapper ref={dropdownRef}>
<VSCodeTextField
id="model-search"
placeholder="Search and select a model..."
value={searchTerm}
onInput={(e) => {
handleModelChange((e.target as HTMLInputElement)?.value?.toLowerCase())
setIsDropdownVisible(true)
}}
onFocus={() => setIsDropdownVisible(true)}
onKeyDown={handleKeyDown}
style={{ width: "100%", zIndex: GLAMA_MODEL_PICKER_Z_INDEX, position: "relative" }}>
{searchTerm && (
<div
className="input-icon-button codicon codicon-close"
aria-label="Clear search"
onClick={() => {
handleModelChange("")
setIsDropdownVisible(true)
}}
slot="end"
style={{
display: "flex",
justifyContent: "center",
alignItems: "center",
height: "100%",
}}
/>
)}
</VSCodeTextField>
{isDropdownVisible && (
<DropdownList ref={dropdownListRef}>
{modelSearchResults.map((item, index) => (
<DropdownItem
key={item.id}
ref={(el) => (itemRefs.current[index] = el)}
isSelected={index === selectedIndex}
onMouseEnter={() => setSelectedIndex(index)}
onClick={() => {
handleModelChange(item.id)
setIsDropdownVisible(false)
}}
dangerouslySetInnerHTML={{
__html: item.html,
}}
/>
))}
</DropdownList>
)}
</DropdownWrapper>
</div>
{hasInfo ? (
<ModelInfoView
selectedModelId={selectedModelId}
modelInfo={selectedModelInfo}
isDescriptionExpanded={isDescriptionExpanded}
setIsDescriptionExpanded={setIsDescriptionExpanded}
/>
) : (
<p
style={{
fontSize: "12px",
marginTop: 0,
color: "var(--vscode-descriptionForeground)",
}}>
The extension automatically fetches the latest list of models available on{" "}
<VSCodeLink style={{ display: "inline", fontSize: "inherit" }} href="https://glama.ai/models">
Glama.
</VSCodeLink>
If you're unsure which model to choose, Cline works best with{" "}
<VSCodeLink
style={{ display: "inline", fontSize: "inherit" }}
onClick={() => handleModelChange("anthropic/claude-3.5-sonnet")}>
anthropic/claude-3.5-sonnet.
</VSCodeLink>
You can also try searching "free" for no-cost options currently available.
</p>
)}
</>
)
}
export default GlamaModelPicker
// Dropdown
const DropdownWrapper = styled.div`
position: relative;
width: 100%;
`
export const GLAMA_MODEL_PICKER_Z_INDEX = 1_000
const DropdownList = styled.div`
position: absolute;
top: calc(100% - 3px);
left: 0;
width: calc(100% - 2px);
max-height: 200px;
overflow-y: auto;
background-color: var(--vscode-dropdown-background);
border: 1px solid var(--vscode-list-activeSelectionBackground);
z-index: ${GLAMA_MODEL_PICKER_Z_INDEX - 1};
border-bottom-left-radius: 3px;
border-bottom-right-radius: 3px;
`
const DropdownItem = styled.div<{ isSelected: boolean }>`
padding: 5px 10px;
cursor: pointer;
word-break: break-all;
white-space: normal;
background-color: ${({ isSelected }) => (isSelected ? "var(--vscode-list-activeSelectionBackground)" : "inherit")};
&:hover {
background-color: var(--vscode-list-activeSelectionBackground);
}
`
// Markdown
const StyledMarkdown = styled.div`
font-family:
var(--vscode-font-family),
system-ui,
-apple-system,
BlinkMacSystemFont,
"Segoe UI",
Roboto,
Oxygen,
Ubuntu,
Cantarell,
"Open Sans",
"Helvetica Neue",
sans-serif;
font-size: 12px;
color: var(--vscode-descriptionForeground);
p,
li,
ol,
ul {
line-height: 1.25;
margin: 0;
}
ol,
ul {
padding-left: 1.5em;
margin-left: 0;
}
p {
white-space: pre-wrap;
}
a {
text-decoration: none;
}
a {
&:hover {
text-decoration: underline;
}
}
`
export const ModelDescriptionMarkdown = memo(
({
markdown,
key,
isExpanded,
setIsExpanded,
}: {
markdown?: string
key: string
isExpanded: boolean
setIsExpanded: (isExpanded: boolean) => void
}) => {
const [reactContent, setMarkdown] = useRemark()
const [showSeeMore, setShowSeeMore] = useState(false)
const textContainerRef = useRef<HTMLDivElement>(null)
const textRef = useRef<HTMLDivElement>(null)
useEffect(() => {
setMarkdown(markdown || "")
}, [markdown, setMarkdown])
useEffect(() => {
if (textRef.current && textContainerRef.current) {
const { scrollHeight } = textRef.current
const { clientHeight } = textContainerRef.current
const isOverflowing = scrollHeight > clientHeight
setShowSeeMore(isOverflowing)
}
}, [reactContent, setIsExpanded])
return (
<StyledMarkdown key={key} style={{ display: "inline-block", marginBottom: 0 }}>
<div
ref={textContainerRef}
style={{
overflowY: isExpanded ? "auto" : "hidden",
position: "relative",
wordBreak: "break-word",
overflowWrap: "anywhere",
}}>
<div
ref={textRef}
style={{
display: "-webkit-box",
WebkitLineClamp: isExpanded ? "unset" : 3,
WebkitBoxOrient: "vertical",
overflow: "hidden",
}}>
{reactContent}
</div>
{!isExpanded && showSeeMore && (
<div
style={{
position: "absolute",
right: 0,
bottom: 0,
display: "flex",
alignItems: "center",
}}>
<div
style={{
width: 30,
height: "1.2em",
background:
"linear-gradient(to right, transparent, var(--vscode-sideBar-background))",
}}
/>
<VSCodeLink
style={{
fontSize: "inherit",
paddingRight: 0,
paddingLeft: 3,
backgroundColor: "var(--vscode-sideBar-background)",
}}
onClick={() => setIsExpanded(true)}>
See more
</VSCodeLink>
</div>
)}
</div>
</StyledMarkdown>
)
},
)

View File

@@ -37,6 +37,7 @@ const SettingsView = ({ onDone }: SettingsViewProps) => {
browserViewportSize,
setBrowserViewportSize,
openRouterModels,
glamaModels,
setAllowedCommands,
allowedCommands,
fuzzyMatchThreshold,
@@ -56,7 +57,7 @@ const SettingsView = ({ onDone }: SettingsViewProps) => {
const [commandInput, setCommandInput] = useState("")
const handleSubmit = () => {
const apiValidationResult = validateApiConfiguration(apiConfiguration)
const modelIdValidationResult = validateModelId(apiConfiguration, openRouterModels)
const modelIdValidationResult = validateModelId(apiConfiguration, glamaModels, openRouterModels)
setApiErrorMessage(apiValidationResult)
setModelIdErrorMessage(modelIdValidationResult)
@@ -94,10 +95,10 @@ const SettingsView = ({ onDone }: SettingsViewProps) => {
// Initial validation on mount
useEffect(() => {
const apiValidationResult = validateApiConfiguration(apiConfiguration)
const modelIdValidationResult = validateModelId(apiConfiguration, openRouterModels)
const modelIdValidationResult = validateModelId(apiConfiguration, glamaModels, openRouterModels)
setApiErrorMessage(apiValidationResult)
setModelIdErrorMessage(modelIdValidationResult)
}, [apiConfiguration, openRouterModels])
}, [apiConfiguration, glamaModels, openRouterModels])
const handleResetState = () => {
vscode.postMessage({ type: "resetState" })

View File

@@ -4,6 +4,8 @@ import { ExtensionMessage, ExtensionState } from "../../../src/shared/ExtensionM
import {
ApiConfiguration,
ModelInfo,
glamaDefaultModelId,
glamaDefaultModelInfo,
openRouterDefaultModelId,
openRouterDefaultModelInfo,
} from "../../../src/shared/api"
@@ -16,6 +18,7 @@ export interface ExtensionStateContextType extends ExtensionState {
didHydrateState: boolean
showWelcome: boolean
theme: any
glamaModels: Record<string, ModelInfo>
openRouterModels: Record<string, ModelInfo>
openAiModels: string[],
mcpServers: McpServer[]
@@ -69,6 +72,9 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
const [showWelcome, setShowWelcome] = useState(false)
const [theme, setTheme] = useState<any>(undefined)
const [filePaths, setFilePaths] = useState<string[]>([])
const [glamaModels, setGlamaModels] = useState<Record<string, ModelInfo>>({
[glamaDefaultModelId]: glamaDefaultModelInfo,
})
const [openRouterModels, setOpenRouterModels] = useState<Record<string, ModelInfo>>({
[openRouterDefaultModelId]: openRouterDefaultModelInfo,
})
@@ -85,6 +91,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
const hasKey = config
? [
config.apiKey,
config.glamaApiKey,
config.openRouterApiKey,
config.awsRegion,
config.vertexProjectId,
@@ -93,6 +100,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
config.lmStudioModelId,
config.geminiApiKey,
config.openAiNativeApiKey,
config.deepSeekApiKey,
].some((key) => key !== undefined)
: false
setShowWelcome(!hasKey)
@@ -123,6 +131,14 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
})
break
}
case "glamaModels": {
const updatedModels = message.glamaModels ?? {}
setGlamaModels({
[glamaDefaultModelId]: glamaDefaultModelInfo, // in case the extension sent a model list without the default model
...updatedModels,
})
break
}
case "openRouterModels": {
const updatedModels = message.openRouterModels ?? {}
setOpenRouterModels({
@@ -154,6 +170,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
didHydrateState,
showWelcome,
theme,
glamaModels,
openRouterModels,
openAiModels,
mcpServers,

View File

@@ -0,0 +1,46 @@
import { vscode } from "../utils/vscode"
export interface GitCommit {
hash: string
shortHash: string
subject: string
author: string
date: string
}
class GitService {
private commits: GitCommit[] | null = null
private lastQuery: string = ''
async searchCommits(query: string = ''): Promise<GitCommit[]> {
if (query === this.lastQuery && this.commits) {
return this.commits
}
// Request search from extension
vscode.postMessage({ type: 'searchCommits', query })
// Wait for response
const response = await new Promise<GitCommit[]>((resolve) => {
const handler = (event: MessageEvent) => {
const message = event.data
if (message.type === 'commitSearchResults') {
window.removeEventListener('message', handler)
resolve(message.commits)
}
}
window.addEventListener('message', handler)
})
this.commits = response
this.lastQuery = query
return response
}
clearCache() {
this.commits = null
this.lastQuery = ''
}
}
export const gitService = new GitService()

View File

@@ -0,0 +1,130 @@
import { insertMention, removeMention, getContextMenuOptions, shouldShowContextMenu, ContextMenuOptionType, ContextMenuQueryItem } from '../context-mentions'
describe('insertMention', () => {
it('should insert mention at cursor position when no @ symbol exists', () => {
const result = insertMention('Hello world', 5, 'test')
expect(result.newValue).toBe('Hello@test world')
expect(result.mentionIndex).toBe(5)
})
it('should replace text after last @ symbol', () => {
const result = insertMention('Hello @wor world', 8, 'test')
expect(result.newValue).toBe('Hello @test world')
expect(result.mentionIndex).toBe(6)
})
it('should handle empty text', () => {
const result = insertMention('', 0, 'test')
expect(result.newValue).toBe('@test ')
expect(result.mentionIndex).toBe(0)
})
})
describe('removeMention', () => {
it('should remove mention when cursor is at end of mention', () => {
// Test with the problems keyword that matches the regex
const result = removeMention('Hello @problems ', 15)
expect(result.newText).toBe('Hello ')
expect(result.newPosition).toBe(6)
})
it('should not remove text when not at end of mention', () => {
const result = removeMention('Hello @test world', 8)
expect(result.newText).toBe('Hello @test world')
expect(result.newPosition).toBe(8)
})
it('should handle text without mentions', () => {
const result = removeMention('Hello world', 5)
expect(result.newText).toBe('Hello world')
expect(result.newPosition).toBe(5)
})
})
describe('getContextMenuOptions', () => {
const mockQueryItems: ContextMenuQueryItem[] = [
{
type: ContextMenuOptionType.File,
value: 'src/test.ts',
label: 'test.ts',
description: 'Source file'
},
{
type: ContextMenuOptionType.Git,
value: 'abc1234',
label: 'Initial commit',
description: 'First commit',
icon: '$(git-commit)'
},
{
type: ContextMenuOptionType.Folder,
value: 'src',
label: 'src',
description: 'Source folder'
}
]
it('should return all option types for empty query', () => {
const result = getContextMenuOptions('', null, [])
expect(result).toHaveLength(5)
expect(result.map(item => item.type)).toEqual([
ContextMenuOptionType.Problems,
ContextMenuOptionType.URL,
ContextMenuOptionType.Folder,
ContextMenuOptionType.File,
ContextMenuOptionType.Git
])
})
it('should filter by selected type when query is empty', () => {
const result = getContextMenuOptions('', ContextMenuOptionType.File, mockQueryItems)
expect(result).toHaveLength(1)
expect(result[0].type).toBe(ContextMenuOptionType.File)
expect(result[0].value).toBe('src/test.ts')
})
it('should match git commands', () => {
const result = getContextMenuOptions('git', null, mockQueryItems)
expect(result[0].type).toBe(ContextMenuOptionType.Git)
expect(result[0].label).toBe('Git Commits')
})
it('should match git commit hashes', () => {
const result = getContextMenuOptions('abc1234', null, mockQueryItems)
expect(result[0].type).toBe(ContextMenuOptionType.Git)
expect(result[0].value).toBe('abc1234')
})
it('should return NoResults when no matches found', () => {
const result = getContextMenuOptions('nonexistent', null, mockQueryItems)
expect(result).toHaveLength(1)
expect(result[0].type).toBe(ContextMenuOptionType.NoResults)
})
})
describe('shouldShowContextMenu', () => {
it('should return true for @ symbol', () => {
expect(shouldShowContextMenu('@', 1)).toBe(true)
})
it('should return true for @ followed by text', () => {
expect(shouldShowContextMenu('Hello @test', 10)).toBe(true)
})
it('should return false when no @ symbol exists', () => {
expect(shouldShowContextMenu('Hello world', 5)).toBe(false)
})
it('should return false for @ followed by whitespace', () => {
expect(shouldShowContextMenu('Hello @ world', 6)).toBe(false)
})
it('should return false for @ in URL', () => {
expect(shouldShowContextMenu('Hello @http://test.com', 17)).toBe(false)
})
it('should return false for @problems', () => {
// Position cursor at the end to test the full word
expect(shouldShowContextMenu('@problems', 9)).toBe(false)
})
})

View File

@@ -51,12 +51,16 @@ export enum ContextMenuOptionType {
Folder = "folder",
Problems = "problems",
URL = "url",
Git = "git",
NoResults = "noResults",
}
export interface ContextMenuQueryItem {
type: ContextMenuOptionType
value?: string
label?: string
description?: string
icon?: string
}
export function getContextMenuOptions(
@@ -64,6 +68,14 @@ export function getContextMenuOptions(
selectedType: ContextMenuOptionType | null = null,
queryItems: ContextMenuQueryItem[],
): ContextMenuQueryItem[] {
const workingChanges: ContextMenuQueryItem = {
type: ContextMenuOptionType.Git,
value: "git-changes",
label: "Working changes",
description: "Current uncommitted changes",
icon: "$(git-commit)"
}
if (query === "") {
if (selectedType === ContextMenuOptionType.File) {
const files = queryItems
@@ -79,30 +91,88 @@ export function getContextMenuOptions(
return folders.length > 0 ? folders : [{ type: ContextMenuOptionType.NoResults }]
}
if (selectedType === ContextMenuOptionType.Git) {
const commits = queryItems
.filter((item) => item.type === ContextMenuOptionType.Git)
return commits.length > 0 ? [workingChanges, ...commits] : [workingChanges]
}
return [
{ type: ContextMenuOptionType.URL },
{ type: ContextMenuOptionType.Problems },
{ type: ContextMenuOptionType.URL },
{ type: ContextMenuOptionType.Folder },
{ type: ContextMenuOptionType.File },
{ type: ContextMenuOptionType.Git },
]
}
const lowerQuery = query.toLowerCase()
const suggestions: ContextMenuQueryItem[] = []
// Check for top-level option matches
if ("git".startsWith(lowerQuery)) {
suggestions.push({
type: ContextMenuOptionType.Git,
label: "Git Commits",
description: "Search repository history",
icon: "$(git-commit)"
})
} else if ("git-changes".startsWith(lowerQuery)) {
suggestions.push(workingChanges)
}
if ("problems".startsWith(lowerQuery)) {
suggestions.push({ type: ContextMenuOptionType.Problems })
}
if (query.startsWith("http")) {
return [{ type: ContextMenuOptionType.URL, value: query }]
} else {
const matchingItems = queryItems.filter((item) => item.value?.toLowerCase().includes(lowerQuery))
suggestions.push({ type: ContextMenuOptionType.URL, value: query })
}
if (matchingItems.length > 0) {
return matchingItems.map((item) => ({
type: item.type,
value: item.value,
}))
// Add exact SHA matches to suggestions
if (/^[a-f0-9]{7,40}$/i.test(lowerQuery)) {
const exactMatches = queryItems.filter((item) =>
item.type === ContextMenuOptionType.Git &&
item.value?.toLowerCase() === lowerQuery
)
if (exactMatches.length > 0) {
suggestions.push(...exactMatches)
} else {
return [{ type: ContextMenuOptionType.NoResults }]
// If no exact match but valid SHA format, add as option
suggestions.push({
type: ContextMenuOptionType.Git,
value: lowerQuery,
label: `Commit ${lowerQuery}`,
description: "Git commit hash",
icon: "$(git-commit)"
})
}
}
// Get matching items, separating by type
const matchingItems = queryItems.filter((item) =>
item.value?.toLowerCase().includes(lowerQuery) ||
item.label?.toLowerCase().includes(lowerQuery) ||
item.description?.toLowerCase().includes(lowerQuery)
)
const fileMatches = matchingItems.filter(item =>
item.type === ContextMenuOptionType.File ||
item.type === ContextMenuOptionType.Folder
)
const gitMatches = matchingItems.filter(item =>
item.type === ContextMenuOptionType.Git
)
const otherMatches = matchingItems.filter(item =>
item.type !== ContextMenuOptionType.File &&
item.type !== ContextMenuOptionType.Folder &&
item.type !== ContextMenuOptionType.Git
)
// Combine suggestions with matching items in the desired order
if (suggestions.length > 0 || matchingItems.length > 0) {
return [...suggestions, ...fileMatches, ...gitMatches, ...otherMatches]
}
return [{ type: ContextMenuOptionType.NoResults }]
}
export function shouldShowContextMenu(text: string, position: number): boolean {

View File

@@ -1,4 +1,4 @@
import { ApiConfiguration, openRouterDefaultModelId } from "../../../src/shared/api"
import { ApiConfiguration, glamaDefaultModelId, openRouterDefaultModelId } from "../../../src/shared/api"
import { ModelInfo } from "../../../src/shared/api"
export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): string | undefined {
if (apiConfiguration) {
@@ -8,6 +8,11 @@ export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): s
return "You must provide a valid API key or choose a different provider."
}
break
case "glama":
if (!apiConfiguration.glamaApiKey) {
return "You must provide a valid API key or choose a different provider."
}
break
case "bedrock":
if (!apiConfiguration.awsRegion) {
return "You must choose a region to use with AWS Bedrock."
@@ -59,10 +64,21 @@ export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): s
export function validateModelId(
apiConfiguration?: ApiConfiguration,
glamaModels?: Record<string, ModelInfo>,
openRouterModels?: Record<string, ModelInfo>,
): string | undefined {
if (apiConfiguration) {
switch (apiConfiguration.apiProvider) {
case "glama":
const glamaModelId = apiConfiguration.glamaModelId || glamaDefaultModelId // in case the user hasn't changed the model id, it will be undefined by default
if (!glamaModelId) {
return "You must provide a model ID."
}
if (glamaModels && !Object.keys(glamaModels).includes(glamaModelId)) {
// even if the model list endpoint failed, extensionstatecontext will always have the default model info
return "The model ID you provided is not available. Please choose a different model."
}
break
case "openrouter":
const modelId = apiConfiguration.openRouterModelId || openRouterDefaultModelId // in case the user hasn't changed the model id, it will be undefined by default
if (!modelId) {