From e5e700ffcb6f19ef08ea7461c1af68418a93008b Mon Sep 17 00:00:00 2001 From: Frank Date: Sat, 4 Jan 2025 21:25:33 -0600 Subject: [PATCH 01/24] feat: add Glama gateway --- README.md | 2 +- package-lock.json | 2 +- package.json | 2 +- src/api/index.ts | 3 + src/api/providers/glama.ts | 134 ++++++ src/core/webview/ClineProvider.ts | 128 ++++++ src/shared/ExtensionMessage.ts | 2 + src/shared/WebviewMessage.ts | 1 + src/shared/api.ts | 21 + .../src/components/settings/ApiOptions.tsx | 43 +- .../components/settings/GlamaModelPicker.tsx | 396 ++++++++++++++++++ .../src/components/settings/SettingsView.tsx | 3 +- .../src/context/ExtensionStateContext.tsx | 16 + webview-ui/src/utils/validate.ts | 18 +- 14 files changed, 765 insertions(+), 6 deletions(-) create mode 100644 src/api/providers/glama.ts create mode 100644 webview-ui/src/components/settings/GlamaModelPicker.tsx diff --git a/README.md b/README.md index fba0aee..7c9471f 100644 --- a/README.md +++ b/README.md @@ -135,7 +135,7 @@ Thanks to [Claude 3.5 Sonnet's agentic coding capabilities](https://www-cdn.ant ### Use any API and Model -Cline supports API providers like OpenRouter, Anthropic, OpenAI, Google Gemini, AWS Bedrock, Azure, and GCP Vertex. You can also configure any OpenAI compatible API, or use a local model through LM Studio/Ollama. If you're using OpenRouter, the extension fetches their latest model list, allowing you to use the newest models as soon as they're available. +Cline supports API providers like OpenRouter, Anthropic, Glama, OpenAI, Google Gemini, AWS Bedrock, Azure, and GCP Vertex. You can also configure any OpenAI compatible API, or use a local model through LM Studio/Ollama. If you're using OpenRouter, the extension fetches their latest model list, allowing you to use the newest models as soon as they're available. The extension also keeps track of total tokens and API usage cost for the entire task loop and individual requests, keeping you informed of spend every step of the way. diff --git a/package-lock.json b/package-lock.json index 0417b70..dd46780 100644 --- a/package-lock.json +++ b/package-lock.json @@ -31,7 +31,7 @@ "isbinaryfile": "^5.0.2", "mammoth": "^1.8.0", "monaco-vscode-textmate-theme-converter": "^0.1.7", - "openai": "^4.61.0", + "openai": "^4.73.1", "os-name": "^6.0.0", "p-wait-for": "^5.0.2", "pdf-parse": "^1.1.1", diff --git a/package.json b/package.json index c872c43..264bbe7 100644 --- a/package.json +++ b/package.json @@ -214,7 +214,7 @@ "isbinaryfile": "^5.0.2", "mammoth": "^1.8.0", "monaco-vscode-textmate-theme-converter": "^0.1.7", - "openai": "^4.61.0", + "openai": "^4.73.1", "os-name": "^6.0.0", "p-wait-for": "^5.0.2", "pdf-parse": "^1.1.1", diff --git a/src/api/index.ts b/src/api/index.ts index 06983de..999b588 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -1,4 +1,5 @@ import { Anthropic } from "@anthropic-ai/sdk" +import { GlamaHandler } from "./providers/glama" import { ApiConfiguration, ModelInfo } from "../shared/api" import { AnthropicHandler } from "./providers/anthropic" import { AwsBedrockHandler } from "./providers/bedrock" @@ -26,6 +27,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler { switch (apiProvider) { case "anthropic": return new AnthropicHandler(options) + case "glama": + return new GlamaHandler(options) case "openrouter": return new OpenRouterHandler(options) case "bedrock": diff --git a/src/api/providers/glama.ts b/src/api/providers/glama.ts new file mode 100644 index 0000000..4be1291 --- /dev/null +++ b/src/api/providers/glama.ts @@ -0,0 +1,134 @@ +import { Anthropic } from "@anthropic-ai/sdk" +import axios from "axios" +import OpenAI from "openai" +import { ApiHandler } from "../" +import { ApiHandlerOptions, ModelInfo, glamaDefaultModelId, glamaDefaultModelInfo } from "../../shared/api" +import { convertToOpenAiMessages } from "../transform/openai-format" +import { ApiStream } from "../transform/stream" +import delay from "delay" + +export class GlamaHandler implements ApiHandler { + private options: ApiHandlerOptions + private client: OpenAI + + constructor(options: ApiHandlerOptions) { + this.options = options + this.client = new OpenAI({ + baseURL: "https://glama.ai/api/gateway/openai/v1", + apiKey: this.options.glamaApiKey, + }) + } + + async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { + // Convert Anthropic messages to OpenAI format + const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ + { role: "system", content: systemPrompt }, + ...convertToOpenAiMessages(messages), + ] + + // this is specifically for claude models (some models may 'support prompt caching' automatically without this) + if (this.getModel().id.startsWith("anthropic/claude-3")) { + openAiMessages[0] = { + role: "system", + content: [ + { + type: "text", + text: systemPrompt, + // @ts-ignore-next-line + cache_control: { type: "ephemeral" }, + }, + ], + } + + // Add cache_control to the last two user messages + // (note: this works because we only ever add one user message at a time, + // but if we added multiple we'd need to mark the user message before the last assistant message) + const lastTwoUserMessages = openAiMessages.filter((msg) => msg.role === "user").slice(-2) + lastTwoUserMessages.forEach((msg) => { + if (typeof msg.content === "string") { + msg.content = [{ type: "text", text: msg.content }] + } + if (Array.isArray(msg.content)) { + // NOTE: this is fine since env details will always be added at the end. + // but if it weren't there, and the user added a image_url type message, + // it would pop a text part before it and then move it after to the end. + let lastTextPart = msg.content.filter((part) => part.type === "text").pop() + + if (!lastTextPart) { + lastTextPart = { type: "text", text: "..." } + msg.content.push(lastTextPart) + } + // @ts-ignore-next-line + lastTextPart["cache_control"] = { type: "ephemeral" } + } + }) + } + + // Required by Anthropic + // Other providers default to max tokens allowed. + let maxTokens: number | undefined + + if (this.getModel().id.startsWith("anthropic/")) { + maxTokens = 8_192 + } + + const { data: completion, response } = await this.client.chat.completions.create({ + model: this.getModel().id, + max_tokens: maxTokens, + temperature: 0, + messages: openAiMessages, + stream: true, + }).withResponse(); + + const completionRequestUuid = response.headers.get( + 'x-completion-request-uuid', + ); + + for await (const chunk of completion) { + const delta = chunk.choices[0]?.delta + + if (delta?.content) { + yield { + type: "text", + text: delta.content, + } + } + } + + // The usage information is only available after a few moments after the completion + await delay(1000) + + try { + const response = await axios.get(`https://glama.ai/api/gateway/v1/completion-requests/${completionRequestUuid}`, { + headers: { + Authorization: `Bearer ${this.options.glamaApiKey}`, + }, + }) + + const completionRequest = response.data; + + if (completionRequest.tokenUsage) { + yield { + type: "usage", + inputTokens: completionRequest.tokenUsage.promptTokens, + outputTokens: completionRequest.tokenUsage.completionTokens, + totalCost: completionRequest.totalCostUsd, + } + } + } catch (error) { + // ignore if fails + console.error("Error fetching Glama generation details:", error) + } + } + + getModel(): { id: string; info: ModelInfo } { + const modelId = this.options.glamaModelId + const modelInfo = this.options.glamaModelInfo + + if (modelId && modelInfo) { + return { id: modelId, info: modelInfo } + } + + return { id: glamaDefaultModelId, info: glamaDefaultModelInfo } + } +} diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 45f9d06..8c71324 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -33,6 +33,7 @@ https://github.com/KumarVariable/vscode-extension-sidebar-html/blob/master/src/c type SecretKey = | "apiKey" + | "glamaApiKey" | "openRouterApiKey" | "awsAccessKey" | "awsSecretKey" @@ -44,6 +45,8 @@ type SecretKey = type GlobalStateKey = | "apiProvider" | "apiModelId" + | "glamaModelId" + | "glamaModelInfo" | "awsRegion" | "awsUseCrossRegionInference" | "vertexProjectId" @@ -82,6 +85,7 @@ type GlobalStateKey = export const GlobalFileNames = { apiConversationHistory: "api_conversation_history.json", uiMessages: "ui_messages.json", + glamaModels: "glama_models.json", openRouterModels: "openrouter_models.json", mcpSettings: "cline_mcp_settings.json", } @@ -385,6 +389,24 @@ export class ClineProvider implements vscode.WebviewViewProvider { } } }) + this.readGlamaModels().then((glamaModels) => { + if (glamaModels) { + this.postMessageToWebview({ type: "glamaModels", glamaModels }) + } + }) + this.refreshGlamaModels().then(async (glamaModels) => { + if (glamaModels) { + // update model info in state (this needs to be done here since we don't want to update state while settings is open, and we may refresh models there) + const { apiConfiguration } = await this.getState() + if (apiConfiguration.glamaModelId) { + await this.updateGlobalState( + "glamaModelInfo", + glamaModels[apiConfiguration.glamaModelId], + ) + await this.postStateToWebview() + } + } + }) break case "newTask": // Code that should run in response to the hello message command @@ -403,6 +425,9 @@ export class ClineProvider implements vscode.WebviewViewProvider { apiProvider, apiModelId, apiKey, + glamaModelId, + glamaModelInfo, + glamaApiKey, openRouterApiKey, awsAccessKey, awsSecretKey, @@ -430,6 +455,9 @@ export class ClineProvider implements vscode.WebviewViewProvider { await this.updateGlobalState("apiProvider", apiProvider) await this.updateGlobalState("apiModelId", apiModelId) await this.storeSecret("apiKey", apiKey) + await this.updateGlobalState("glamaModelId", glamaModelId) + await this.updateGlobalState("glamaModelInfo", glamaModelInfo) + await this.storeSecret("glamaApiKey", glamaApiKey) await this.storeSecret("openRouterApiKey", openRouterApiKey) await this.storeSecret("awsAccessKey", awsAccessKey) await this.storeSecret("awsSecretKey", awsSecretKey) @@ -525,6 +553,9 @@ export class ClineProvider implements vscode.WebviewViewProvider { const lmStudioModels = await this.getLmStudioModels(message.text) this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels }) break + case "refreshGlamaModels": + await this.refreshGlamaModels() + break case "refreshOpenRouterModels": await this.refreshOpenRouterModels() break @@ -831,6 +862,93 @@ export class ClineProvider implements vscode.WebviewViewProvider { return cacheDir } + async readGlamaModels(): Promise | undefined> { + const glamaModelsFilePath = path.join( + await this.ensureCacheDirectoryExists(), + GlobalFileNames.glamaModels, + ) + const fileExists = await fileExistsAtPath(glamaModelsFilePath) + if (fileExists) { + const fileContents = await fs.readFile(glamaModelsFilePath, "utf8") + return JSON.parse(fileContents) + } + return undefined + } + + async refreshGlamaModels() { + const glamaModelsFilePath = path.join( + await this.ensureCacheDirectoryExists(), + GlobalFileNames.glamaModels, + ) + + let models: Record = {} + try { + const response = await axios.get("https://glama.ai/api/gateway/v1/models") + /* + { + "added": "2024-12-24T15:12:49.324Z", + "capabilities": [ + "adjustable_safety_settings", + "caching", + "code_execution", + "function_calling", + "json_mode", + "json_schema", + "system_instructions", + "tuning", + "input:audio", + "input:image", + "input:text", + "input:video", + "output:text" + ], + "id": "google-vertex/gemini-1.5-flash-002", + "maxTokensInput": 1048576, + "maxTokensOutput": 8192, + "pricePerToken": { + "cacheRead": null, + "cacheWrite": null, + "input": "0.000000075", + "output": "0.0000003" + } + } + */ + if (response.data) { + const rawModels = response.data; + const parsePrice = (price: any) => { + if (price) { + return parseFloat(price) * 1_000_000 + } + return undefined + } + for (const rawModel of rawModels) { + const modelInfo: ModelInfo = { + maxTokens: rawModel.maxTokensOutput, + contextWindow: rawModel.maxTokensInput, + supportsImages: rawModel.capabilities?.includes("input:image"), + supportsPromptCache: rawModel.capabilities?.includes("caching"), + inputPrice: parsePrice(rawModel.pricePerToken?.input), + outputPrice: parsePrice(rawModel.pricePerToken?.output), + description: undefined, + cacheWritesPrice: parsePrice(rawModel.pricePerToken?.cacheWrite), + cacheReadsPrice: parsePrice(rawModel.pricePerToken?.cacheRead), + } + + models[rawModel.id] = modelInfo + } + } else { + console.error("Invalid response from Glama API") + } + await fs.writeFile(glamaModelsFilePath, JSON.stringify(models)) + console.log("Glama models fetched and saved", models) + } catch (error) { + console.error("Error fetching Glama models:", error) + } + + await this.postMessageToWebview({ type: "glamaModels", glamaModels: models }) + return models + } + async readOpenRouterModels(): Promise | undefined> { const openRouterModelsFilePath = path.join( await this.ensureCacheDirectoryExists(), @@ -1153,6 +1271,9 @@ export class ClineProvider implements vscode.WebviewViewProvider { storedApiProvider, apiModelId, apiKey, + glamaApiKey, + glamaModelId, + glamaModelInfo, openRouterApiKey, awsAccessKey, awsSecretKey, @@ -1200,6 +1321,9 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.getGlobalState("apiProvider") as Promise, this.getGlobalState("apiModelId") as Promise, this.getSecret("apiKey") as Promise, + this.getSecret("glamaApiKey") as Promise, + this.getGlobalState("glamaModelId") as Promise, + this.getGlobalState("glamaModelInfo") as Promise, this.getSecret("openRouterApiKey") as Promise, this.getSecret("awsAccessKey") as Promise, this.getSecret("awsSecretKey") as Promise, @@ -1264,6 +1388,9 @@ export class ClineProvider implements vscode.WebviewViewProvider { apiProvider, apiModelId, apiKey, + glamaApiKey, + glamaModelId, + glamaModelInfo, openRouterApiKey, awsAccessKey, awsSecretKey, @@ -1402,6 +1529,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { } const secretKeys: SecretKey[] = [ "apiKey", + "glamaApiKey", "openRouterApiKey", "awsAccessKey", "awsSecretKey", diff --git a/src/shared/ExtensionMessage.ts b/src/shared/ExtensionMessage.ts index c00fa6c..887945f 100644 --- a/src/shared/ExtensionMessage.ts +++ b/src/shared/ExtensionMessage.ts @@ -16,6 +16,7 @@ export interface ExtensionMessage { | "workspaceUpdated" | "invoke" | "partialMessage" + | "glamaModels" | "openRouterModels" | "openAiModels" | "mcpServers" @@ -34,6 +35,7 @@ export interface ExtensionMessage { lmStudioModels?: string[] filePaths?: string[] partialMessage?: ClineMessage + glamaModels?: Record openRouterModels?: Record openAiModels?: string[] mcpServers?: McpServer[] diff --git a/src/shared/WebviewMessage.ts b/src/shared/WebviewMessage.ts index ee602ed..111faac 100644 --- a/src/shared/WebviewMessage.ts +++ b/src/shared/WebviewMessage.ts @@ -27,6 +27,7 @@ export interface WebviewMessage { | "openFile" | "openMention" | "cancelTask" + | "refreshGlamaModels" | "refreshOpenRouterModels" | "refreshOpenAiModels" | "alwaysAllowBrowser" diff --git a/src/shared/api.ts b/src/shared/api.ts index 2759a26..7675237 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -1,5 +1,6 @@ export type ApiProvider = | "anthropic" + | "glama" | "openrouter" | "bedrock" | "vertex" @@ -14,6 +15,9 @@ export interface ApiHandlerOptions { apiModelId?: string apiKey?: string // anthropic anthropicBaseUrl?: string + glamaModelId?: string + glamaModelInfo?: ModelInfo + glamaApiKey?: string openRouterApiKey?: string openRouterModelId?: string openRouterModelInfo?: ModelInfo @@ -309,6 +313,23 @@ export const bedrockModels = { }, } as const satisfies Record +// Glama +// https://glama.ai/models +export const glamaDefaultModelId = "anthropic/claude-3-5-sonnet" // will always exist in openRouterModels +export const glamaDefaultModelInfo: ModelInfo = { + maxTokens: 8192, + contextWindow: 200_000, + supportsImages: true, + supportsComputerUse: true, + supportsPromptCache: true, + inputPrice: 3.0, + outputPrice: 15.0, + cacheWritesPrice: 3.75, + cacheReadsPrice: 0.3, + description: + "The new Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: New Sonnet scores ~49% on SWE-Bench Verified, higher than the last best score, and without any fancy prompt scaffolding\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\n#multimodal\n\n_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/anthropic/claude-3.5-sonnet) variant._", +} + // OpenRouter // https://openrouter.ai/models?order=newest&supported_parameters=tools export const openRouterDefaultModelId = "anthropic/claude-3.5-sonnet:beta" // will always exist in openRouterModels diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index c72342e..2621a8c 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -21,6 +21,8 @@ import { deepSeekModels, geminiDefaultModelId, geminiModels, + glamaDefaultModelId, + glamaDefaultModelInfo, openAiModelInfoSaneDefaults, openAiNativeDefaultModelId, openAiNativeModels, @@ -38,6 +40,7 @@ import OpenRouterModelPicker, { OPENROUTER_MODEL_PICKER_Z_INDEX, } from "./OpenRouterModelPicker" import OpenAiModelPicker from "./OpenAiModelPicker" +import GlamaModelPicker from "./GlamaModelPicker" interface ApiOptionsProps { showModelOptions: boolean @@ -131,6 +134,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }: style={{ minWidth: 130, position: "relative", zIndex: OPENROUTER_MODEL_PICKER_Z_INDEX + 1 }}> OpenRouter Anthropic + Glama Google Gemini DeepSeek OpenAI @@ -193,6 +197,34 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }: )} + {selectedProvider === "glama" && ( +
+ + Glama API Key + + {!apiConfiguration?.glamaApiKey && ( + + You can get an Glama API key by signing up here. + + )} +

+ This key is stored locally and only used to make API requests from this extension. +

+
+ )} + {selectedProvider === "openai-native" && (
)} + {selectedProvider === "glama" && showModelOptions && } + {selectedProvider === "openrouter" && showModelOptions && } - {selectedProvider !== "openrouter" && + {selectedProvider !== "glama" && + selectedProvider !== "openrouter" && selectedProvider !== "openai" && selectedProvider !== "ollama" && selectedProvider !== "lmstudio" && @@ -872,6 +907,12 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) { return getProviderData(deepSeekModels, deepSeekDefaultModelId) case "openai-native": return getProviderData(openAiNativeModels, openAiNativeDefaultModelId) + case "glama": + return { + selectedProvider: provider, + selectedModelId: apiConfiguration?.glamaModelId || glamaDefaultModelId, + selectedModelInfo: apiConfiguration?.glamaModelInfo || glamaDefaultModelInfo, + } case "openrouter": return { selectedProvider: provider, diff --git a/webview-ui/src/components/settings/GlamaModelPicker.tsx b/webview-ui/src/components/settings/GlamaModelPicker.tsx new file mode 100644 index 0000000..e7af3c5 --- /dev/null +++ b/webview-ui/src/components/settings/GlamaModelPicker.tsx @@ -0,0 +1,396 @@ +import { VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" +import Fuse from "fuse.js" +import React, { KeyboardEvent, memo, useEffect, useMemo, useRef, useState } from "react" +import { useRemark } from "react-remark" +import { useMount } from "react-use" +import styled from "styled-components" +import { glamaDefaultModelId } from "../../../../src/shared/api" +import { useExtensionState } from "../../context/ExtensionStateContext" +import { vscode } from "../../utils/vscode" +import { highlight } from "../history/HistoryView" +import { ModelInfoView, normalizeApiConfiguration } from "./ApiOptions" + +const GlamaModelPicker: React.FC = () => { + const { apiConfiguration, setApiConfiguration, glamaModels } = useExtensionState() + const [searchTerm, setSearchTerm] = useState(apiConfiguration?.glamaModelId || glamaDefaultModelId) + const [isDropdownVisible, setIsDropdownVisible] = useState(false) + const [selectedIndex, setSelectedIndex] = useState(-1) + const dropdownRef = useRef(null) + const itemRefs = useRef<(HTMLDivElement | null)[]>([]) + const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) + const dropdownListRef = useRef(null) + + const handleModelChange = (newModelId: string) => { + // could be setting invalid model id/undefined info but validation will catch it + setApiConfiguration({ + ...apiConfiguration, + glamaModelId: newModelId, + glamaModelInfo: glamaModels[newModelId], + }) + setSearchTerm(newModelId) + } + + const { selectedModelId, selectedModelInfo } = useMemo(() => { + return normalizeApiConfiguration(apiConfiguration) + }, [apiConfiguration]) + + useMount(() => { + vscode.postMessage({ type: "refreshGlamaModels" }) + }) + + useEffect(() => { + const handleClickOutside = (event: MouseEvent) => { + if (dropdownRef.current && !dropdownRef.current.contains(event.target as Node)) { + setIsDropdownVisible(false) + } + } + + document.addEventListener("mousedown", handleClickOutside) + return () => { + document.removeEventListener("mousedown", handleClickOutside) + } + }, []) + + const modelIds = useMemo(() => { + return Object.keys(glamaModels).sort((a, b) => a.localeCompare(b)) + }, [glamaModels]) + + const searchableItems = useMemo(() => { + return modelIds.map((id) => ({ + id, + html: id, + })) + }, [modelIds]) + + const fuse = useMemo(() => { + return new Fuse(searchableItems, { + keys: ["html"], // highlight function will update this + threshold: 0.6, + shouldSort: true, + isCaseSensitive: false, + ignoreLocation: false, + includeMatches: true, + minMatchCharLength: 1, + }) + }, [searchableItems]) + + const modelSearchResults = useMemo(() => { + let results: { id: string; html: string }[] = searchTerm + ? highlight(fuse.search(searchTerm), "model-item-highlight") + : searchableItems + // results.sort((a, b) => a.id.localeCompare(b.id)) NOTE: sorting like this causes ids in objects to be reordered and mismatched + return results + }, [searchableItems, searchTerm, fuse]) + + const handleKeyDown = (event: KeyboardEvent) => { + if (!isDropdownVisible) return + + switch (event.key) { + case "ArrowDown": + event.preventDefault() + setSelectedIndex((prev) => (prev < modelSearchResults.length - 1 ? prev + 1 : prev)) + break + case "ArrowUp": + event.preventDefault() + setSelectedIndex((prev) => (prev > 0 ? prev - 1 : prev)) + break + case "Enter": + event.preventDefault() + if (selectedIndex >= 0 && selectedIndex < modelSearchResults.length) { + handleModelChange(modelSearchResults[selectedIndex].id) + setIsDropdownVisible(false) + } + break + case "Escape": + setIsDropdownVisible(false) + setSelectedIndex(-1) + break + } + } + + const hasInfo = useMemo(() => { + return modelIds.some((id) => id.toLowerCase() === searchTerm.toLowerCase()) + }, [modelIds, searchTerm]) + + useEffect(() => { + setSelectedIndex(-1) + if (dropdownListRef.current) { + dropdownListRef.current.scrollTop = 0 + } + }, [searchTerm]) + + useEffect(() => { + if (selectedIndex >= 0 && itemRefs.current[selectedIndex]) { + itemRefs.current[selectedIndex]?.scrollIntoView({ + block: "nearest", + behavior: "smooth", + }) + } + }, [selectedIndex]) + + return ( + <> + +
+ + + { + handleModelChange((e.target as HTMLInputElement)?.value?.toLowerCase()) + setIsDropdownVisible(true) + }} + onFocus={() => setIsDropdownVisible(true)} + onKeyDown={handleKeyDown} + style={{ width: "100%", zIndex: GLAMA_MODEL_PICKER_Z_INDEX, position: "relative" }}> + {searchTerm && ( +
{ + handleModelChange("") + setIsDropdownVisible(true) + }} + slot="end" + style={{ + display: "flex", + justifyContent: "center", + alignItems: "center", + height: "100%", + }} + /> + )} + + {isDropdownVisible && ( + + {modelSearchResults.map((item, index) => ( + (itemRefs.current[index] = el)} + isSelected={index === selectedIndex} + onMouseEnter={() => setSelectedIndex(index)} + onClick={() => { + handleModelChange(item.id) + setIsDropdownVisible(false) + }} + dangerouslySetInnerHTML={{ + __html: item.html, + }} + /> + ))} + + )} + +
+ + {hasInfo ? ( + + ) : ( +

+ The extension automatically fetches the latest list of models available on{" "} + + Glama. + + If you're unsure which model to choose, Cline works best with{" "} + handleModelChange("anthropic/claude-3.5-sonnet")}> + anthropic/claude-3.5-sonnet. + + You can also try searching "free" for no-cost options currently available. +

+ )} + + ) +} + +export default GlamaModelPicker + +// Dropdown + +const DropdownWrapper = styled.div` + position: relative; + width: 100%; +` + +export const GLAMA_MODEL_PICKER_Z_INDEX = 1_001 + +const DropdownList = styled.div` + position: absolute; + top: calc(100% - 3px); + left: 0; + width: calc(100% - 2px); + max-height: 200px; + overflow-y: auto; + background-color: var(--vscode-dropdown-background); + border: 1px solid var(--vscode-list-activeSelectionBackground); + z-index: ${GLAMA_MODEL_PICKER_Z_INDEX - 1}; + border-bottom-left-radius: 3px; + border-bottom-right-radius: 3px; +` + +const DropdownItem = styled.div<{ isSelected: boolean }>` + padding: 5px 10px; + cursor: pointer; + word-break: break-all; + white-space: normal; + + background-color: ${({ isSelected }) => (isSelected ? "var(--vscode-list-activeSelectionBackground)" : "inherit")}; + + &:hover { + background-color: var(--vscode-list-activeSelectionBackground); + } +` + +// Markdown + +const StyledMarkdown = styled.div` + font-family: + var(--vscode-font-family), + system-ui, + -apple-system, + BlinkMacSystemFont, + "Segoe UI", + Roboto, + Oxygen, + Ubuntu, + Cantarell, + "Open Sans", + "Helvetica Neue", + sans-serif; + font-size: 12px; + color: var(--vscode-descriptionForeground); + + p, + li, + ol, + ul { + line-height: 1.25; + margin: 0; + } + + ol, + ul { + padding-left: 1.5em; + margin-left: 0; + } + + p { + white-space: pre-wrap; + } + + a { + text-decoration: none; + } + a { + &:hover { + text-decoration: underline; + } + } +` + +export const ModelDescriptionMarkdown = memo( + ({ + markdown, + key, + isExpanded, + setIsExpanded, + }: { + markdown?: string + key: string + isExpanded: boolean + setIsExpanded: (isExpanded: boolean) => void + }) => { + const [reactContent, setMarkdown] = useRemark() + const [showSeeMore, setShowSeeMore] = useState(false) + const textContainerRef = useRef(null) + const textRef = useRef(null) + + useEffect(() => { + setMarkdown(markdown || "") + }, [markdown, setMarkdown]) + + useEffect(() => { + if (textRef.current && textContainerRef.current) { + const { scrollHeight } = textRef.current + const { clientHeight } = textContainerRef.current + const isOverflowing = scrollHeight > clientHeight + setShowSeeMore(isOverflowing) + } + }, [reactContent, setIsExpanded]) + + return ( + +
+
+ {reactContent} +
+ {!isExpanded && showSeeMore && ( +
+
+ setIsExpanded(true)}> + See more + +
+ )} +
+ + ) + }, +) diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 645a260..3ac195a 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -37,6 +37,7 @@ const SettingsView = ({ onDone }: SettingsViewProps) => { browserViewportSize, setBrowserViewportSize, openRouterModels, + glamaModels, setAllowedCommands, allowedCommands, fuzzyMatchThreshold, @@ -56,7 +57,7 @@ const SettingsView = ({ onDone }: SettingsViewProps) => { const [commandInput, setCommandInput] = useState("") const handleSubmit = () => { const apiValidationResult = validateApiConfiguration(apiConfiguration) - const modelIdValidationResult = validateModelId(apiConfiguration, openRouterModels) + const modelIdValidationResult = validateModelId(apiConfiguration, glamaModels, openRouterModels) setApiErrorMessage(apiValidationResult) setModelIdErrorMessage(modelIdValidationResult) diff --git a/webview-ui/src/context/ExtensionStateContext.tsx b/webview-ui/src/context/ExtensionStateContext.tsx index 45e0614..8572b79 100644 --- a/webview-ui/src/context/ExtensionStateContext.tsx +++ b/webview-ui/src/context/ExtensionStateContext.tsx @@ -4,6 +4,8 @@ import { ExtensionMessage, ExtensionState } from "../../../src/shared/ExtensionM import { ApiConfiguration, ModelInfo, + glamaDefaultModelId, + glamaDefaultModelInfo, openRouterDefaultModelId, openRouterDefaultModelInfo, } from "../../../src/shared/api" @@ -16,6 +18,7 @@ export interface ExtensionStateContextType extends ExtensionState { didHydrateState: boolean showWelcome: boolean theme: any + glamaModels: Record openRouterModels: Record openAiModels: string[], mcpServers: McpServer[] @@ -69,6 +72,9 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode const [showWelcome, setShowWelcome] = useState(false) const [theme, setTheme] = useState(undefined) const [filePaths, setFilePaths] = useState([]) + const [glamaModels, setGlamaModels] = useState>({ + [glamaDefaultModelId]: glamaDefaultModelInfo, + }) const [openRouterModels, setOpenRouterModels] = useState>({ [openRouterDefaultModelId]: openRouterDefaultModelInfo, }) @@ -85,6 +91,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode const hasKey = config ? [ config.apiKey, + config.glamaApiKey, config.openRouterApiKey, config.awsRegion, config.vertexProjectId, @@ -123,6 +130,14 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode }) break } + case "glamaModels": { + const updatedModels = message.glamaModels ?? {} + setGlamaModels({ + [glamaDefaultModelId]: glamaDefaultModelInfo, // in case the extension sent a model list without the default model + ...updatedModels, + }) + break + } case "openRouterModels": { const updatedModels = message.openRouterModels ?? {} setOpenRouterModels({ @@ -154,6 +169,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode didHydrateState, showWelcome, theme, + glamaModels, openRouterModels, openAiModels, mcpServers, diff --git a/webview-ui/src/utils/validate.ts b/webview-ui/src/utils/validate.ts index 437e126..2ddc46d 100644 --- a/webview-ui/src/utils/validate.ts +++ b/webview-ui/src/utils/validate.ts @@ -1,4 +1,4 @@ -import { ApiConfiguration, openRouterDefaultModelId } from "../../../src/shared/api" +import { ApiConfiguration, glamaDefaultModelId, openRouterDefaultModelId } from "../../../src/shared/api" import { ModelInfo } from "../../../src/shared/api" export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): string | undefined { if (apiConfiguration) { @@ -8,6 +8,11 @@ export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): s return "You must provide a valid API key or choose a different provider." } break + case "glama": + if (!apiConfiguration.glamaApiKey) { + return "You must provide a valid API key or choose a different provider." + } + break case "bedrock": if (!apiConfiguration.awsRegion) { return "You must choose a region to use with AWS Bedrock." @@ -59,10 +64,21 @@ export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): s export function validateModelId( apiConfiguration?: ApiConfiguration, + glamaModels?: Record, openRouterModels?: Record, ): string | undefined { if (apiConfiguration) { switch (apiConfiguration.apiProvider) { + case "glama": + const glamaModelId = apiConfiguration.glamaModelId || glamaDefaultModelId // in case the user hasn't changed the model id, it will be undefined by default + if (!glamaModelId) { + return "You must provide a model ID." + } + if (glamaModels && !Object.keys(glamaModels).includes(glamaModelId)) { + // even if the model list endpoint failed, extensionstatecontext will always have the default model info + return "The model ID you provided is not available. Please choose a different model." + } + break case "openrouter": const modelId = apiConfiguration.openRouterModelId || openRouterDefaultModelId // in case the user hasn't changed the model id, it will be undefined by default if (!modelId) { From eb78332d4ede55a98a55986eb303aae82eeca138 Mon Sep 17 00:00:00 2001 From: Frank Date: Sat, 4 Jan 2025 22:26:58 -0600 Subject: [PATCH 02/24] fix: dynamically set computer use value --- src/core/webview/ClineProvider.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 8c71324..a49caee 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -926,6 +926,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { maxTokens: rawModel.maxTokensOutput, contextWindow: rawModel.maxTokensInput, supportsImages: rawModel.capabilities?.includes("input:image"), + supportsComputerUse: rawModel.capabilities?.includes("computer_use"), supportsPromptCache: rawModel.capabilities?.includes("caching"), inputPrice: parsePrice(rawModel.pricePerToken?.input), outputPrice: parsePrice(rawModel.pricePerToken?.output), From 919fb5b91307be4702c5482f19aaa9d701a63c49 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Sun, 5 Jan 2025 01:00:13 -0500 Subject: [PATCH 03/24] fix: validation logic --- webview-ui/src/components/settings/SettingsView.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 3ac195a..5ae0858 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -95,10 +95,10 @@ const SettingsView = ({ onDone }: SettingsViewProps) => { // Initial validation on mount useEffect(() => { const apiValidationResult = validateApiConfiguration(apiConfiguration) - const modelIdValidationResult = validateModelId(apiConfiguration, openRouterModels) + const modelIdValidationResult = validateModelId(apiConfiguration, glamaModels, openRouterModels) setApiErrorMessage(apiValidationResult) setModelIdErrorMessage(modelIdValidationResult) - }, [apiConfiguration, openRouterModels]) + }, [apiConfiguration, glamaModels, openRouterModels]) const handleResetState = () => { vscode.postMessage({ type: "resetState" }) From 67ba60db6e606fda8847c7ce92a98d5a178046f1 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Sun, 5 Jan 2025 01:07:56 -0500 Subject: [PATCH 04/24] fix: z-index --- webview-ui/src/components/settings/GlamaModelPicker.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webview-ui/src/components/settings/GlamaModelPicker.tsx b/webview-ui/src/components/settings/GlamaModelPicker.tsx index e7af3c5..6823cc0 100644 --- a/webview-ui/src/components/settings/GlamaModelPicker.tsx +++ b/webview-ui/src/components/settings/GlamaModelPicker.tsx @@ -234,7 +234,7 @@ const DropdownWrapper = styled.div` width: 100%; ` -export const GLAMA_MODEL_PICKER_Z_INDEX = 1_001 +export const GLAMA_MODEL_PICKER_Z_INDEX = 1_000 const DropdownList = styled.div` position: absolute; From 8725f5ae2c43993f29641aea1ac6e32a59d3d590 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Sun, 5 Jan 2025 01:08:14 -0500 Subject: [PATCH 05/24] fix: adjust order --- webview-ui/src/components/settings/ApiOptions.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 2621a8c..ebeab8d 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -134,13 +134,13 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }: style={{ minWidth: 130, position: "relative", zIndex: OPENROUTER_MODEL_PICKER_Z_INDEX + 1 }}> OpenRouter Anthropic - Glama Google Gemini DeepSeek OpenAI OpenAI Compatible GCP Vertex AI AWS Bedrock + Glama LM Studio Ollama From a966ddb2ee2771a2b719077420543ec7c7f308eb Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Sun, 5 Jan 2025 01:09:11 -0500 Subject: [PATCH 06/24] Release --- .changeset/weak-mugs-battle.md | 5 +++++ README.md | 1 + 2 files changed, 6 insertions(+) create mode 100644 .changeset/weak-mugs-battle.md diff --git a/.changeset/weak-mugs-battle.md b/.changeset/weak-mugs-battle.md new file mode 100644 index 0000000..ba2878d --- /dev/null +++ b/.changeset/weak-mugs-battle.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Add the Glama provider (thanks @punkpeye!) diff --git a/README.md b/README.md index 7c9471f..64b0117 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ A fork of Cline, an autonomous coding agent, with some additional experimental f - Language selection for Cline's communication (English, Japanese, Spanish, French, German, and more) - Support for DeepSeek V3 - Support for Amazon Nova and Meta 3, 3.1, and 3.2 models via AWS Bedrock +- Support for Glama - Support for listing models from OpenAI-compatible providers - Per-tool MCP auto-approval - Enable/disable individual MCP servers From 6b048923c6f979a162bac195690b941b6b85bf5e Mon Sep 17 00:00:00 2001 From: Frank Date: Sun, 5 Jan 2025 14:24:51 -0600 Subject: [PATCH 07/24] fix: remove unnecessary delay --- src/api/providers/glama.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/api/providers/glama.ts b/src/api/providers/glama.ts index 4be1291..381c3bf 100644 --- a/src/api/providers/glama.ts +++ b/src/api/providers/glama.ts @@ -95,9 +95,6 @@ export class GlamaHandler implements ApiHandler { } } - // The usage information is only available after a few moments after the completion - await delay(1000) - try { const response = await axios.get(`https://glama.ai/api/gateway/v1/completion-requests/${completionRequestUuid}`, { headers: { From 8284efc64b1269e029238d8cc7700389c2133ca2 Mon Sep 17 00:00:00 2001 From: Frank Date: Sun, 5 Jan 2025 13:54:18 -0600 Subject: [PATCH 08/24] fix: use x-completion-request-id header --- src/api/providers/glama.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/api/providers/glama.ts b/src/api/providers/glama.ts index 381c3bf..4805219 100644 --- a/src/api/providers/glama.ts +++ b/src/api/providers/glama.ts @@ -80,8 +80,8 @@ export class GlamaHandler implements ApiHandler { stream: true, }).withResponse(); - const completionRequestUuid = response.headers.get( - 'x-completion-request-uuid', + const completionRequestId = response.headers.get( + 'x-completion-request-id', ); for await (const chunk of completion) { @@ -96,7 +96,7 @@ export class GlamaHandler implements ApiHandler { } try { - const response = await axios.get(`https://glama.ai/api/gateway/v1/completion-requests/${completionRequestUuid}`, { + const response = await axios.get(`https://glama.ai/api/gateway/v1/completion-requests/${completionRequestId}`, { headers: { Authorization: `Bearer ${this.options.glamaApiKey}`, }, From 7b94ad7a25f388c40215d84e747069cf149b34c5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 5 Jan 2025 21:47:49 +0000 Subject: [PATCH 09/24] changeset version bump --- .changeset/weak-mugs-battle.md | 5 ----- CHANGELOG.md | 6 ++++++ package-lock.json | 4 ++-- package.json | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) delete mode 100644 .changeset/weak-mugs-battle.md diff --git a/.changeset/weak-mugs-battle.md b/.changeset/weak-mugs-battle.md deleted file mode 100644 index ba2878d..0000000 --- a/.changeset/weak-mugs-battle.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Add the Glama provider (thanks @punkpeye!) diff --git a/CHANGELOG.md b/CHANGELOG.md index a06b7ca..0d9ebbe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Roo Cline Changelog +## 2.2.40 + +### Patch Changes + +- Add the Glama provider (thanks @punkpeye!) + ## [2.2.39] - Add toggle to enable/disable the MCP-related sections of the system prompt (thanks @daniel-lxs!) diff --git a/package-lock.json b/package-lock.json index dd46780..f205c6e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "2.2.39", + "version": "2.2.40", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "2.2.39", + "version": "2.2.40", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.26.0", diff --git a/package.json b/package.json index 264bbe7..364ee72 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Cline", "description": "A fork of Cline, an autonomous coding agent, with some added experimental configuration and automation features.", "publisher": "RooVeterinaryInc", - "version": "2.2.39", + "version": "2.2.40", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From 27edd03cc928c31382a93e0cb5b7d521baa3f453 Mon Sep 17 00:00:00 2001 From: R00-B0T Date: Sun, 5 Jan 2025 21:48:28 +0000 Subject: [PATCH 10/24] Updating CHANGELOG.md format --- CHANGELOG.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d9ebbe..74bbcaf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,6 @@ # Roo Cline Changelog -## 2.2.40 - -### Patch Changes +## [2.2.40] - Add the Glama provider (thanks @punkpeye!) From e63299802676982023134df7d0e47c2385838528 Mon Sep 17 00:00:00 2001 From: Frank Date: Sun, 5 Jan 2025 16:28:12 -0600 Subject: [PATCH 11/24] feat: add cache information --- src/api/providers/glama.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/api/providers/glama.ts b/src/api/providers/glama.ts index 4805219..8b58b15 100644 --- a/src/api/providers/glama.ts +++ b/src/api/providers/glama.ts @@ -107,6 +107,8 @@ export class GlamaHandler implements ApiHandler { if (completionRequest.tokenUsage) { yield { type: "usage", + cacheWriteTokens: completionRequest.tokenUsage.cacheCreationInputTokens, + cacheReadTokens: completionRequest.tokenUsage.cacheReadInputTokens, inputTokens: completionRequest.tokenUsage.promptTokens, outputTokens: completionRequest.tokenUsage.completionTokens, totalCost: completionRequest.totalCostUsd, From 8b0e9bbf32f9196377b9438532c8b19c01712759 Mon Sep 17 00:00:00 2001 From: Frank Date: Sun, 5 Jan 2025 16:34:44 -0600 Subject: [PATCH 12/24] fix: remove unnecessary comment --- src/shared/api.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shared/api.ts b/src/shared/api.ts index 7675237..2863893 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -315,7 +315,7 @@ export const bedrockModels = { // Glama // https://glama.ai/models -export const glamaDefaultModelId = "anthropic/claude-3-5-sonnet" // will always exist in openRouterModels +export const glamaDefaultModelId = "anthropic/claude-3-5-sonnet" export const glamaDefaultModelInfo: ModelInfo = { maxTokens: 8192, contextWindow: 200_000, From f813d65b5a5d01b20c1bea25a1577ee5079d587a Mon Sep 17 00:00:00 2001 From: Frank Date: Sun, 5 Jan 2025 16:47:49 -0600 Subject: [PATCH 13/24] fix: parse text to float --- src/api/providers/glama.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/providers/glama.ts b/src/api/providers/glama.ts index 8b58b15..2ded280 100644 --- a/src/api/providers/glama.ts +++ b/src/api/providers/glama.ts @@ -111,7 +111,7 @@ export class GlamaHandler implements ApiHandler { cacheReadTokens: completionRequest.tokenUsage.cacheReadInputTokens, inputTokens: completionRequest.tokenUsage.promptTokens, outputTokens: completionRequest.tokenUsage.completionTokens, - totalCost: completionRequest.totalCostUsd, + totalCost: parseFloat(completionRequest.totalCostUsd), } } } catch (error) { From ac776cc8bf8781f536749a1b5191643559c80e02 Mon Sep 17 00:00:00 2001 From: Frank Date: Sun, 5 Jan 2025 17:52:44 -0600 Subject: [PATCH 14/24] fix: adjust log wording --- src/api/providers/glama.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/api/providers/glama.ts b/src/api/providers/glama.ts index 2ded280..c17db05 100644 --- a/src/api/providers/glama.ts +++ b/src/api/providers/glama.ts @@ -115,8 +115,7 @@ export class GlamaHandler implements ApiHandler { } } } catch (error) { - // ignore if fails - console.error("Error fetching Glama generation details:", error) + console.error("Error fetching Glama completion details", error) } } From 2cdfff02c089ee9a6471de82119bdeca2398726e Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Sun, 5 Jan 2025 20:14:58 -0500 Subject: [PATCH 15/24] Streaming checkbox for OpenAI-compatible providers --- src/api/providers/openai.ts | 82 ++++++++++++------- src/core/webview/ClineProvider.ts | 12 +-- src/shared/api.ts | 2 +- .../src/components/settings/ApiOptions.tsx | 11 +-- 4 files changed, 62 insertions(+), 45 deletions(-) diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 071df8d..3ec2192 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -32,42 +32,64 @@ export class OpenAiHandler implements ApiHandler { } } - // Include stream_options for OpenAI Compatible providers if the checkbox is checked async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { - const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ - { role: "system", content: systemPrompt }, - ...convertToOpenAiMessages(messages), - ] const modelInfo = this.getModel().info - const requestOptions: OpenAI.Chat.ChatCompletionCreateParams = { - model: this.options.openAiModelId ?? "", - messages: openAiMessages, - temperature: 0, - stream: true, - } - if (this.options.includeMaxTokens) { - requestOptions.max_tokens = modelInfo.maxTokens - } + const modelId = this.options.openAiModelId ?? "" - if (this.options.includeStreamOptions ?? true) { - requestOptions.stream_options = { include_usage: true } - } + if (this.options.openAiStreamingEnabled ?? true) { + const systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = { + role: "system", + content: systemPrompt + } + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { + model: modelId, + temperature: 0, + messages: [systemMessage, ...convertToOpenAiMessages(messages)], + stream: true as const, + stream_options: { include_usage: true }, + } + if (this.options.includeMaxTokens) { + requestOptions.max_tokens = modelInfo.maxTokens + } - const stream = await this.client.chat.completions.create(requestOptions) - for await (const chunk of stream) { - const delta = chunk.choices[0]?.delta - if (delta?.content) { - yield { - type: "text", - text: delta.content, + const stream = await this.client.chat.completions.create(requestOptions) + + for await (const chunk of stream) { + const delta = chunk.choices[0]?.delta + if (delta?.content) { + yield { + type: "text", + text: delta.content, + } + } + if (chunk.usage) { + yield { + type: "usage", + inputTokens: chunk.usage.prompt_tokens || 0, + outputTokens: chunk.usage.completion_tokens || 0, + } } } - if (chunk.usage) { - yield { - type: "usage", - inputTokens: chunk.usage.prompt_tokens || 0, - outputTokens: chunk.usage.completion_tokens || 0, - } + } else { + // o1 for instance doesnt support streaming, non-1 temp, or system prompt + const systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam = { + role: "user", + content: systemPrompt + } + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { + model: modelId, + messages: [systemMessage, ...convertToOpenAiMessages(messages)], + } + const response = await this.client.chat.completions.create(requestOptions) + + yield { + type: "text", + text: response.choices[0]?.message.content || "", + } + yield { + type: "usage", + inputTokens: response.usage?.prompt_tokens || 0, + outputTokens: response.usage?.completion_tokens || 0, } } } diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index a49caee..025cb88 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -66,7 +66,7 @@ type GlobalStateKey = | "lmStudioBaseUrl" | "anthropicBaseUrl" | "azureApiVersion" - | "includeStreamOptions" + | "openAiStreamingEnabled" | "openRouterModelId" | "openRouterModelInfo" | "openRouterUseMiddleOutTransform" @@ -447,7 +447,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { geminiApiKey, openAiNativeApiKey, azureApiVersion, - includeStreamOptions, + openAiStreamingEnabled, openRouterModelId, openRouterModelInfo, openRouterUseMiddleOutTransform, @@ -478,7 +478,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { await this.storeSecret("openAiNativeApiKey", openAiNativeApiKey) await this.storeSecret("deepSeekApiKey", message.apiConfiguration.deepSeekApiKey) await this.updateGlobalState("azureApiVersion", azureApiVersion) - await this.updateGlobalState("includeStreamOptions", includeStreamOptions) + await this.updateGlobalState("openAiStreamingEnabled", openAiStreamingEnabled) await this.updateGlobalState("openRouterModelId", openRouterModelId) await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo) await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform) @@ -1295,7 +1295,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { openAiNativeApiKey, deepSeekApiKey, azureApiVersion, - includeStreamOptions, + openAiStreamingEnabled, openRouterModelId, openRouterModelInfo, openRouterUseMiddleOutTransform, @@ -1345,7 +1345,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.getSecret("openAiNativeApiKey") as Promise, this.getSecret("deepSeekApiKey") as Promise, this.getGlobalState("azureApiVersion") as Promise, - this.getGlobalState("includeStreamOptions") as Promise, + this.getGlobalState("openAiStreamingEnabled") as Promise, this.getGlobalState("openRouterModelId") as Promise, this.getGlobalState("openRouterModelInfo") as Promise, this.getGlobalState("openRouterUseMiddleOutTransform") as Promise, @@ -1412,7 +1412,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { openAiNativeApiKey, deepSeekApiKey, azureApiVersion, - includeStreamOptions, + openAiStreamingEnabled, openRouterModelId, openRouterModelInfo, openRouterUseMiddleOutTransform, diff --git a/src/shared/api.ts b/src/shared/api.ts index 7675237..ccda5dd 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -41,7 +41,7 @@ export interface ApiHandlerOptions { openAiNativeApiKey?: string azureApiVersion?: string openRouterUseMiddleOutTransform?: boolean - includeStreamOptions?: boolean + openAiStreamingEnabled?: boolean setAzureApiVersion?: boolean deepSeekBaseUrl?: string deepSeekApiKey?: string diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index ebeab8d..f38a0a3 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -477,21 +477,16 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
{ const isChecked = e.target.checked setApiConfiguration({ ...apiConfiguration, - includeStreamOptions: isChecked + openAiStreamingEnabled: isChecked }) }}> - Include stream options + Enable streaming -
Date: Sun, 5 Jan 2025 20:52:06 -0500 Subject: [PATCH 16/24] Add test --- src/api/providers/__tests__/openai.test.ts | 192 +++++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 src/api/providers/__tests__/openai.test.ts diff --git a/src/api/providers/__tests__/openai.test.ts b/src/api/providers/__tests__/openai.test.ts new file mode 100644 index 0000000..0a88068 --- /dev/null +++ b/src/api/providers/__tests__/openai.test.ts @@ -0,0 +1,192 @@ +import { OpenAiHandler } from '../openai' +import { ApiHandlerOptions, openAiModelInfoSaneDefaults } from '../../../shared/api' +import OpenAI, { AzureOpenAI } from 'openai' +import { Anthropic } from '@anthropic-ai/sdk' + +// Mock dependencies +jest.mock('openai') + +describe('OpenAiHandler', () => { + const mockOptions: ApiHandlerOptions = { + openAiApiKey: 'test-key', + openAiModelId: 'gpt-4', + openAiStreamingEnabled: true, + openAiBaseUrl: 'https://api.openai.com/v1' + } + + beforeEach(() => { + jest.clearAllMocks() + }) + + test('constructor initializes with correct options', () => { + const handler = new OpenAiHandler(mockOptions) + expect(handler).toBeInstanceOf(OpenAiHandler) + expect(OpenAI).toHaveBeenCalledWith({ + apiKey: mockOptions.openAiApiKey, + baseURL: mockOptions.openAiBaseUrl + }) + }) + + test('constructor initializes Azure client when Azure URL is provided', () => { + const azureOptions: ApiHandlerOptions = { + ...mockOptions, + openAiBaseUrl: 'https://example.azure.com', + azureApiVersion: '2023-05-15' + } + const handler = new OpenAiHandler(azureOptions) + expect(handler).toBeInstanceOf(OpenAiHandler) + expect(AzureOpenAI).toHaveBeenCalledWith({ + baseURL: azureOptions.openAiBaseUrl, + apiKey: azureOptions.openAiApiKey, + apiVersion: azureOptions.azureApiVersion + }) + }) + + test('getModel returns correct model info', () => { + const handler = new OpenAiHandler(mockOptions) + const result = handler.getModel() + + expect(result).toEqual({ + id: mockOptions.openAiModelId, + info: openAiModelInfoSaneDefaults + }) + }) + + test('createMessage handles streaming correctly when enabled', async () => { + const handler = new OpenAiHandler({ + ...mockOptions, + openAiStreamingEnabled: true, + includeMaxTokens: true + }) + + const mockStream = { + async *[Symbol.asyncIterator]() { + yield { + choices: [{ + delta: { + content: 'test response' + } + }], + usage: { + prompt_tokens: 10, + completion_tokens: 5 + } + } + } + } + + const mockCreate = jest.fn().mockResolvedValue(mockStream) + ;(OpenAI as jest.MockedClass).prototype.chat = { + completions: { create: mockCreate } + } as any + + const systemPrompt = 'test system prompt' + const messages: Anthropic.Messages.MessageParam[] = [ + { role: 'user', content: 'test message' } + ] + + const generator = handler.createMessage(systemPrompt, messages) + const chunks = [] + + for await (const chunk of generator) { + chunks.push(chunk) + } + + expect(chunks).toEqual([ + { + type: 'text', + text: 'test response' + }, + { + type: 'usage', + inputTokens: 10, + outputTokens: 5 + } + ]) + + expect(mockCreate).toHaveBeenCalledWith({ + model: mockOptions.openAiModelId, + messages: [ + { role: 'system', content: systemPrompt }, + { role: 'user', content: 'test message' } + ], + temperature: 0, + stream: true, + stream_options: { include_usage: true }, + max_tokens: openAiModelInfoSaneDefaults.maxTokens + }) + }) + + test('createMessage handles non-streaming correctly when disabled', async () => { + const handler = new OpenAiHandler({ + ...mockOptions, + openAiStreamingEnabled: false + }) + + const mockResponse = { + choices: [{ + message: { + content: 'test response' + } + }], + usage: { + prompt_tokens: 10, + completion_tokens: 5 + } + } + + const mockCreate = jest.fn().mockResolvedValue(mockResponse) + ;(OpenAI as jest.MockedClass).prototype.chat = { + completions: { create: mockCreate } + } as any + + const systemPrompt = 'test system prompt' + const messages: Anthropic.Messages.MessageParam[] = [ + { role: 'user', content: 'test message' } + ] + + const generator = handler.createMessage(systemPrompt, messages) + const chunks = [] + + for await (const chunk of generator) { + chunks.push(chunk) + } + + expect(chunks).toEqual([ + { + type: 'text', + text: 'test response' + }, + { + type: 'usage', + inputTokens: 10, + outputTokens: 5 + } + ]) + + expect(mockCreate).toHaveBeenCalledWith({ + model: mockOptions.openAiModelId, + messages: [ + { role: 'user', content: systemPrompt }, + { role: 'user', content: 'test message' } + ] + }) + }) + + test('createMessage handles API errors', async () => { + const handler = new OpenAiHandler(mockOptions) + const mockStream = { + async *[Symbol.asyncIterator]() { + throw new Error('API Error') + } + } + + const mockCreate = jest.fn().mockResolvedValue(mockStream) + ;(OpenAI as jest.MockedClass).prototype.chat = { + completions: { create: mockCreate } + } as any + + const generator = handler.createMessage('test', []) + await expect(generator.next()).rejects.toThrow('API Error') + }) +}) \ No newline at end of file From 26f55d97089f1fb3095a4590f5230cb4ce90fed5 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Sun, 5 Jan 2025 21:04:20 -0500 Subject: [PATCH 17/24] Update README --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 64b0117..88cf173 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ A fork of Cline, an autonomous coding agent, with some additional experimental f - Support for Amazon Nova and Meta 3, 3.1, and 3.2 models via AWS Bedrock - Support for Glama - Support for listing models from OpenAI-compatible providers +- Support for adding OpenAI-compatible models with or without streaming - Per-tool MCP auto-approval - Enable/disable individual MCP servers - Enable/disable the MCP feature overall From 38df02c43cb97d559f65594f32e4ee2c184888e5 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Sun, 5 Jan 2025 22:41:31 -0500 Subject: [PATCH 18/24] Add changeset --- .changeset/light-shoes-rescue.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/light-shoes-rescue.md diff --git a/.changeset/light-shoes-rescue.md b/.changeset/light-shoes-rescue.md new file mode 100644 index 0000000..0da95a2 --- /dev/null +++ b/.changeset/light-shoes-rescue.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Checkbox to disable streaming for OpenAI-compatible providers From 6d0b386595b241504883392fc86bf3bd16791b20 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 6 Jan 2025 03:47:42 +0000 Subject: [PATCH 19/24] changeset version bump --- .changeset/light-shoes-rescue.md | 5 ----- CHANGELOG.md | 6 ++++++ package-lock.json | 4 ++-- package.json | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) delete mode 100644 .changeset/light-shoes-rescue.md diff --git a/.changeset/light-shoes-rescue.md b/.changeset/light-shoes-rescue.md deleted file mode 100644 index 0da95a2..0000000 --- a/.changeset/light-shoes-rescue.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Checkbox to disable streaming for OpenAI-compatible providers diff --git a/CHANGELOG.md b/CHANGELOG.md index 74bbcaf..a850f0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Roo Cline Changelog +## 2.2.41 + +### Patch Changes + +- Checkbox to disable streaming for OpenAI-compatible providers + ## [2.2.40] - Add the Glama provider (thanks @punkpeye!) diff --git a/package-lock.json b/package-lock.json index f205c6e..368a747 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "2.2.40", + "version": "2.2.41", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "2.2.40", + "version": "2.2.41", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.26.0", diff --git a/package.json b/package.json index 364ee72..7871d4b 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Cline", "description": "A fork of Cline, an autonomous coding agent, with some added experimental configuration and automation features.", "publisher": "RooVeterinaryInc", - "version": "2.2.40", + "version": "2.2.41", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From c38da92d726e9ef5fa4fed34bf04d9aefd1e6a72 Mon Sep 17 00:00:00 2001 From: R00-B0T Date: Mon, 6 Jan 2025 03:48:20 +0000 Subject: [PATCH 20/24] Updating CHANGELOG.md format --- CHANGELOG.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a850f0a..4b1b120 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,6 @@ # Roo Cline Changelog -## 2.2.41 - -### Patch Changes +## [2.2.41] - Checkbox to disable streaming for OpenAI-compatible providers From 6e834d2fc3def044432b8e99635d859251a146f8 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 6 Jan 2025 11:22:47 -0500 Subject: [PATCH 21/24] Add missing deepseek check --- webview-ui/src/context/ExtensionStateContext.tsx | 1 + 1 file changed, 1 insertion(+) diff --git a/webview-ui/src/context/ExtensionStateContext.tsx b/webview-ui/src/context/ExtensionStateContext.tsx index 8572b79..52ee1e4 100644 --- a/webview-ui/src/context/ExtensionStateContext.tsx +++ b/webview-ui/src/context/ExtensionStateContext.tsx @@ -100,6 +100,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode config.lmStudioModelId, config.geminiApiKey, config.openAiNativeApiKey, + config.deepSeekApiKey, ].some((key) => key !== undefined) : false setShowWelcome(!hasKey) From 7e9ea7ac2862c7689195cdeed753f8dc3cd0fdaa Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 6 Jan 2025 01:50:06 -0500 Subject: [PATCH 22/24] Add a Git section to the context mentions --- .changeset/two-camels-jam.md | 5 + README.md | 1 + src/core/Cline.ts | 21 +- src/core/mentions/__tests__/index.test.ts | 155 ++++++++++++ src/core/mentions/index.ts | 37 ++- src/core/webview/ClineProvider.ts | 19 ++ .../misc/__tests__/extract-text.test.ts | 237 +++++++++++------- src/integrations/misc/extract-text.ts | 34 +++ src/shared/ExtensionMessage.ts | 3 + src/shared/WebviewMessage.ts | 2 + src/shared/context-mentions.ts | 89 +++++-- src/utils/git.ts | 166 ++++++++++++ .../src/components/chat/ChatTextArea.tsx | 105 ++++---- .../src/components/chat/ContextMenu.tsx | 54 +++- webview-ui/src/services/GitService.ts | 46 ++++ .../utils/__tests__/context-mentions.test.ts | 130 ++++++++++ webview-ui/src/utils/context-mentions.ts | 90 ++++++- 17 files changed, 987 insertions(+), 207 deletions(-) create mode 100644 .changeset/two-camels-jam.md create mode 100644 src/core/mentions/__tests__/index.test.ts create mode 100644 src/utils/git.ts create mode 100644 webview-ui/src/services/GitService.ts create mode 100644 webview-ui/src/utils/__tests__/context-mentions.test.ts diff --git a/.changeset/two-camels-jam.md b/.changeset/two-camels-jam.md new file mode 100644 index 0000000..6f165f7 --- /dev/null +++ b/.changeset/two-camels-jam.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Add a Git section to the context mentions diff --git a/README.md b/README.md index 88cf173..bf2a627 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ A fork of Cline, an autonomous coding agent, with some additional experimental f - Drag and drop images into chats - Delete messages from chats +- @-mention Git commits to include their context in the chat - "Enhance prompt" button (OpenRouter models only for now) - Sound effects for feedback - Option to use browsers of different sizes and adjust screenshot quality diff --git a/src/core/Cline.ts b/src/core/Cline.ts index 6a73a5f..84bf47c 100644 --- a/src/core/Cline.ts +++ b/src/core/Cline.ts @@ -12,7 +12,7 @@ import { ApiHandler, SingleCompletionHandler, buildApiHandler } from "../api" import { ApiStream } from "../api/transform/stream" import { DiffViewProvider } from "../integrations/editor/DiffViewProvider" import { findToolName, formatContentBlockToMarkdown } from "../integrations/misc/export-markdown" -import { extractTextFromFile, addLineNumbers, stripLineNumbers, everyLineHasLineNumbers } from "../integrations/misc/extract-text" +import { extractTextFromFile, addLineNumbers, stripLineNumbers, everyLineHasLineNumbers, truncateOutput } from "../integrations/misc/extract-text" import { TerminalManager } from "../integrations/terminal/TerminalManager" import { UrlContentFetcher } from "../services/browser/UrlContentFetcher" import { listFiles } from "../services/glob/list-files" @@ -716,22 +716,6 @@ export class Cline { } }) - const getFormattedOutput = async () => { - const { terminalOutputLineLimit } = await this.providerRef.deref()?.getState() ?? {} - const limit = terminalOutputLineLimit ?? 0 - - if (limit > 0 && lines.length > limit) { - const beforeLimit = Math.floor(limit * 0.2) // 20% of lines before - const afterLimit = limit - beforeLimit // remaining 80% after - return [ - ...lines.slice(0, beforeLimit), - `\n[...${lines.length - limit} lines omitted...]\n`, - ...lines.slice(-afterLimit) - ].join('\n') - } - return lines.join('\n') - } - let completed = false process.once("completed", () => { completed = true @@ -750,7 +734,8 @@ export class Cline { // grouping command_output messages despite any gaps anyways) await delay(50) - const output = await getFormattedOutput() + const { terminalOutputLineLimit } = await this.providerRef.deref()?.getState() ?? {} + const output = truncateOutput(lines.join('\n'), terminalOutputLineLimit) const result = output.trim() if (userFeedback) { diff --git a/src/core/mentions/__tests__/index.test.ts b/src/core/mentions/__tests__/index.test.ts new file mode 100644 index 0000000..e816726 --- /dev/null +++ b/src/core/mentions/__tests__/index.test.ts @@ -0,0 +1,155 @@ +// Create mock vscode module before importing anything +const createMockUri = (scheme: string, path: string) => ({ + scheme, + authority: '', + path, + query: '', + fragment: '', + fsPath: path, + with: jest.fn(), + toString: () => path, + toJSON: () => ({ + scheme, + authority: '', + path, + query: '', + fragment: '' + }) +}) + +const mockExecuteCommand = jest.fn() +const mockOpenExternal = jest.fn() +const mockShowErrorMessage = jest.fn() + +const mockVscode = { + workspace: { + workspaceFolders: [{ + uri: { fsPath: "/test/workspace" } + }] + }, + window: { + showErrorMessage: mockShowErrorMessage, + showInformationMessage: jest.fn(), + showWarningMessage: jest.fn(), + createTextEditorDecorationType: jest.fn(), + createOutputChannel: jest.fn(), + createWebviewPanel: jest.fn(), + activeTextEditor: undefined + }, + commands: { + executeCommand: mockExecuteCommand + }, + env: { + openExternal: mockOpenExternal + }, + Uri: { + parse: jest.fn((url: string) => createMockUri('https', url)), + file: jest.fn((path: string) => createMockUri('file', path)) + }, + Position: jest.fn(), + Range: jest.fn(), + TextEdit: jest.fn(), + WorkspaceEdit: jest.fn(), + DiagnosticSeverity: { + Error: 0, + Warning: 1, + Information: 2, + Hint: 3 + } +} + +// Mock modules +jest.mock('vscode', () => mockVscode) +jest.mock("../../../services/browser/UrlContentFetcher") +jest.mock("../../../utils/git") + +// Now import the modules that use the mocks +import { parseMentions, openMention } from "../index" +import { UrlContentFetcher } from "../../../services/browser/UrlContentFetcher" +import * as git from "../../../utils/git" + +describe("mentions", () => { + const mockCwd = "/test/workspace" + let mockUrlContentFetcher: UrlContentFetcher + + beforeEach(() => { + jest.clearAllMocks() + + // Create a mock instance with just the methods we need + mockUrlContentFetcher = { + launchBrowser: jest.fn().mockResolvedValue(undefined), + closeBrowser: jest.fn().mockResolvedValue(undefined), + urlToMarkdown: jest.fn().mockResolvedValue(""), + } as unknown as UrlContentFetcher + }) + + describe("parseMentions", () => { + it("should parse git commit mentions", async () => { + const commitHash = "abc1234" + const commitInfo = `abc1234 Fix bug in parser + +Author: John Doe +Date: Mon Jan 5 23:50:06 2025 -0500 + +Detailed commit message with multiple lines +- Fixed parsing issue +- Added tests` + + jest.mocked(git.getCommitInfo).mockResolvedValue(commitInfo) + + const result = await parseMentions( + `Check out this commit @${commitHash}`, + mockCwd, + mockUrlContentFetcher + ) + + expect(result).toContain(`'${commitHash}' (see below for commit info)`) + expect(result).toContain(``) + expect(result).toContain(commitInfo) + }) + + it("should handle errors fetching git info", async () => { + const commitHash = "abc1234" + const errorMessage = "Failed to get commit info" + + jest.mocked(git.getCommitInfo).mockRejectedValue(new Error(errorMessage)) + + const result = await parseMentions( + `Check out this commit @${commitHash}`, + mockCwd, + mockUrlContentFetcher + ) + + expect(result).toContain(`'${commitHash}' (see below for commit info)`) + expect(result).toContain(``) + expect(result).toContain(`Error fetching commit info: ${errorMessage}`) + }) + }) + + describe("openMention", () => { + it("should handle file paths and problems", async () => { + await openMention("/path/to/file") + expect(mockExecuteCommand).not.toHaveBeenCalled() + expect(mockOpenExternal).not.toHaveBeenCalled() + expect(mockShowErrorMessage).toHaveBeenCalledWith("Could not open file!") + + await openMention("problems") + expect(mockExecuteCommand).toHaveBeenCalledWith("workbench.actions.view.problems") + }) + + it("should handle URLs", async () => { + const url = "https://example.com" + await openMention(url) + const mockUri = mockVscode.Uri.parse(url) + expect(mockOpenExternal).toHaveBeenCalled() + const calledArg = mockOpenExternal.mock.calls[0][0] + expect(calledArg).toEqual(expect.objectContaining({ + scheme: mockUri.scheme, + authority: mockUri.authority, + path: mockUri.path, + query: mockUri.query, + fragment: mockUri.fragment + })) + }) + }) +}) \ No newline at end of file diff --git a/src/core/mentions/index.ts b/src/core/mentions/index.ts index 1c9c122..cf5bdea 100644 --- a/src/core/mentions/index.ts +++ b/src/core/mentions/index.ts @@ -2,27 +2,28 @@ import * as vscode from "vscode" import * as path from "path" import { openFile } from "../../integrations/misc/open-file" import { UrlContentFetcher } from "../../services/browser/UrlContentFetcher" -import { mentionRegexGlobal } from "../../shared/context-mentions" +import { mentionRegexGlobal, formatGitSuggestion, type MentionSuggestion } from "../../shared/context-mentions" import fs from "fs/promises" import { extractTextFromFile } from "../../integrations/misc/extract-text" import { isBinaryFile } from "isbinaryfile" import { diagnosticsToProblemsString } from "../../integrations/diagnostics" +import { getCommitInfo, getWorkingState } from "../../utils/git" -export function openMention(mention?: string): void { +export async function openMention(mention?: string): Promise { if (!mention) { return } + const cwd = vscode.workspace.workspaceFolders?.map((folder) => folder.uri.fsPath).at(0) + if (!cwd) { + return + } + if (mention.startsWith("/")) { const relPath = mention.slice(1) - const cwd = vscode.workspace.workspaceFolders?.map((folder) => folder.uri.fsPath).at(0) - if (!cwd) { - return - } const absPath = path.resolve(cwd, relPath) if (mention.endsWith("/")) { vscode.commands.executeCommand("revealInExplorer", vscode.Uri.file(absPath)) - // vscode.commands.executeCommand("vscode.openFolder", , { forceNewWindow: false }) opens in new window } else { openFile(absPath) } @@ -40,12 +41,16 @@ export async function parseMentions(text: string, cwd: string, urlContentFetcher if (mention.startsWith("http")) { return `'${mention}' (see below for site content)` } else if (mention.startsWith("/")) { - const mentionPath = mention.slice(1) // Remove the leading '/' + const mentionPath = mention.slice(1) return mentionPath.endsWith("/") ? `'${mentionPath}' (see below for folder content)` : `'${mentionPath}' (see below for file content)` } else if (mention === "problems") { return `Workspace Problems (see below for diagnostics)` + } else if (mention === "git-changes") { + return `Working directory changes (see below for details)` + } else if (/^[a-f0-9]{7,40}$/.test(mention)) { + return `Git commit '${mention}' (see below for commit info)` } return match }) @@ -99,6 +104,20 @@ export async function parseMentions(text: string, cwd: string, urlContentFetcher } catch (error) { parsedText += `\n\n\nError fetching diagnostics: ${error.message}\n` } + } else if (mention === "git-changes") { + try { + const workingState = await getWorkingState(cwd) + parsedText += `\n\n\n${workingState}\n` + } catch (error) { + parsedText += `\n\n\nError fetching working state: ${error.message}\n` + } + } else if (/^[a-f0-9]{7,40}$/.test(mention)) { + try { + const commitInfo = await getCommitInfo(mention, cwd) + parsedText += `\n\n\n${commitInfo}\n` + } catch (error) { + parsedText += `\n\n\nError fetching commit info: ${error.message}\n` + } } } @@ -137,7 +156,6 @@ async function getFileOrFolderContent(mentionPath: string, cwd: string): Promise folderContent += `${linePrefix}${entry.name}\n` const filePath = path.join(mentionPath, entry.name) const absoluteFilePath = path.resolve(absPath, entry.name) - // const relativeFilePath = path.relative(cwd, absoluteFilePath); fileContentPromises.push( (async () => { try { @@ -154,7 +172,6 @@ async function getFileOrFolderContent(mentionPath: string, cwd: string): Promise ) } else if (entry.isDirectory()) { folderContent += `${linePrefix}${entry.name}/\n` - // not recursively getting folder contents } else { folderContent += `${linePrefix}${entry.name}\n` } diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 025cb88..0c62ba6 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -24,6 +24,7 @@ import { getNonce } from "./getNonce" import { getUri } from "./getUri" import { playSound, setSoundEnabled, setSoundVolume } from "../../utils/sound" import { enhancePrompt } from "../../utils/enhance-prompt" +import { getCommitInfo, searchCommits, getWorkingState } from "../../utils/git" /* https://github.com/microsoft/vscode-webview-ui-toolkit-samples/blob/main/default/weather-webview/src/providers/WeatherViewProvider.ts @@ -732,6 +733,24 @@ export class ClineProvider implements vscode.WebviewViewProvider { } } break + + + case "searchCommits": { + const cwd = vscode.workspace.workspaceFolders?.map((folder) => folder.uri.fsPath).at(0) + if (cwd) { + try { + const commits = await searchCommits(message.query || "", cwd) + await this.postMessageToWebview({ + type: "commitSearchResults", + commits + }) + } catch (error) { + console.error("Error searching commits:", error) + vscode.window.showErrorMessage("Failed to search commits") + } + } + break + } } }, null, diff --git a/src/integrations/misc/__tests__/extract-text.test.ts b/src/integrations/misc/__tests__/extract-text.test.ts index ced548f..5b91324 100644 --- a/src/integrations/misc/__tests__/extract-text.test.ts +++ b/src/integrations/misc/__tests__/extract-text.test.ts @@ -1,109 +1,176 @@ -import { addLineNumbers, everyLineHasLineNumbers, stripLineNumbers } from '../extract-text'; +import { addLineNumbers, everyLineHasLineNumbers, stripLineNumbers, truncateOutput } from '../extract-text'; describe('addLineNumbers', () => { - it('should add line numbers starting from 1 by default', () => { - const input = 'line 1\nline 2\nline 3'; - const expected = '1 | line 1\n2 | line 2\n3 | line 3'; - expect(addLineNumbers(input)).toBe(expected); - }); + it('should add line numbers starting from 1 by default', () => { + const input = 'line 1\nline 2\nline 3'; + const expected = '1 | line 1\n2 | line 2\n3 | line 3'; + expect(addLineNumbers(input)).toBe(expected); + }); - it('should add line numbers starting from specified line number', () => { - const input = 'line 1\nline 2\nline 3'; - const expected = '10 | line 1\n11 | line 2\n12 | line 3'; - expect(addLineNumbers(input, 10)).toBe(expected); - }); + it('should add line numbers starting from specified line number', () => { + const input = 'line 1\nline 2\nline 3'; + const expected = '10 | line 1\n11 | line 2\n12 | line 3'; + expect(addLineNumbers(input, 10)).toBe(expected); + }); - it('should handle empty content', () => { - expect(addLineNumbers('')).toBe('1 | '); - expect(addLineNumbers('', 5)).toBe('5 | '); - }); + it('should handle empty content', () => { + expect(addLineNumbers('')).toBe('1 | '); + expect(addLineNumbers('', 5)).toBe('5 | '); + }); - it('should handle single line content', () => { - expect(addLineNumbers('single line')).toBe('1 | single line'); - expect(addLineNumbers('single line', 42)).toBe('42 | single line'); - }); + it('should handle single line content', () => { + expect(addLineNumbers('single line')).toBe('1 | single line'); + expect(addLineNumbers('single line', 42)).toBe('42 | single line'); + }); - it('should pad line numbers based on the highest line number', () => { - const input = 'line 1\nline 2'; - // When starting from 99, highest line will be 100, so needs 3 spaces padding - const expected = ' 99 | line 1\n100 | line 2'; - expect(addLineNumbers(input, 99)).toBe(expected); - }); + it('should pad line numbers based on the highest line number', () => { + const input = 'line 1\nline 2'; + // When starting from 99, highest line will be 100, so needs 3 spaces padding + const expected = ' 99 | line 1\n100 | line 2'; + expect(addLineNumbers(input, 99)).toBe(expected); + }); }); describe('everyLineHasLineNumbers', () => { - it('should return true for content with line numbers', () => { - const input = '1 | line one\n2 | line two\n3 | line three'; - expect(everyLineHasLineNumbers(input)).toBe(true); - }); + it('should return true for content with line numbers', () => { + const input = '1 | line one\n2 | line two\n3 | line three'; + expect(everyLineHasLineNumbers(input)).toBe(true); + }); - it('should return true for content with padded line numbers', () => { - const input = ' 1 | line one\n 2 | line two\n 3 | line three'; - expect(everyLineHasLineNumbers(input)).toBe(true); - }); + it('should return true for content with padded line numbers', () => { + const input = ' 1 | line one\n 2 | line two\n 3 | line three'; + expect(everyLineHasLineNumbers(input)).toBe(true); + }); - it('should return false for content without line numbers', () => { - const input = 'line one\nline two\nline three'; - expect(everyLineHasLineNumbers(input)).toBe(false); - }); + it('should return false for content without line numbers', () => { + const input = 'line one\nline two\nline three'; + expect(everyLineHasLineNumbers(input)).toBe(false); + }); - it('should return false for mixed content', () => { - const input = '1 | line one\nline two\n3 | line three'; - expect(everyLineHasLineNumbers(input)).toBe(false); - }); + it('should return false for mixed content', () => { + const input = '1 | line one\nline two\n3 | line three'; + expect(everyLineHasLineNumbers(input)).toBe(false); + }); - it('should handle empty content', () => { - expect(everyLineHasLineNumbers('')).toBe(false); - }); + it('should handle empty content', () => { + expect(everyLineHasLineNumbers('')).toBe(false); + }); - it('should return false for content with pipe but no line numbers', () => { - const input = 'a | b\nc | d'; - expect(everyLineHasLineNumbers(input)).toBe(false); - }); + it('should return false for content with pipe but no line numbers', () => { + const input = 'a | b\nc | d'; + expect(everyLineHasLineNumbers(input)).toBe(false); + }); }); describe('stripLineNumbers', () => { - it('should strip line numbers from content', () => { - const input = '1 | line one\n2 | line two\n3 | line three'; - const expected = 'line one\nline two\nline three'; - expect(stripLineNumbers(input)).toBe(expected); - }); + it('should strip line numbers from content', () => { + const input = '1 | line one\n2 | line two\n3 | line three'; + const expected = 'line one\nline two\nline three'; + expect(stripLineNumbers(input)).toBe(expected); + }); - it('should strip padded line numbers', () => { - const input = ' 1 | line one\n 2 | line two\n 3 | line three'; - const expected = 'line one\nline two\nline three'; - expect(stripLineNumbers(input)).toBe(expected); - }); + it('should strip padded line numbers', () => { + const input = ' 1 | line one\n 2 | line two\n 3 | line three'; + const expected = 'line one\nline two\nline three'; + expect(stripLineNumbers(input)).toBe(expected); + }); - it('should handle content without line numbers', () => { - const input = 'line one\nline two\nline three'; - expect(stripLineNumbers(input)).toBe(input); - }); + it('should handle content without line numbers', () => { + const input = 'line one\nline two\nline three'; + expect(stripLineNumbers(input)).toBe(input); + }); - it('should handle empty content', () => { - expect(stripLineNumbers('')).toBe(''); - }); + it('should handle empty content', () => { + expect(stripLineNumbers('')).toBe(''); + }); - it('should preserve content with pipe but no line numbers', () => { - const input = 'a | b\nc | d'; - expect(stripLineNumbers(input)).toBe(input); - }); + it('should preserve content with pipe but no line numbers', () => { + const input = 'a | b\nc | d'; + expect(stripLineNumbers(input)).toBe(input); + }); - it('should handle windows-style line endings', () => { - const input = '1 | line one\r\n2 | line two\r\n3 | line three'; - const expected = 'line one\r\nline two\r\nline three'; - expect(stripLineNumbers(input)).toBe(expected); - }); + it('should handle windows-style line endings', () => { + const input = '1 | line one\r\n2 | line two\r\n3 | line three'; + const expected = 'line one\r\nline two\r\nline three'; + expect(stripLineNumbers(input)).toBe(expected); + }); - it('should handle content with varying line number widths', () => { - const input = ' 1 | line one\n 10 | line two\n100 | line three'; - const expected = 'line one\nline two\nline three'; - expect(stripLineNumbers(input)).toBe(expected); - }); + it('should handle content with varying line number widths', () => { + const input = ' 1 | line one\n 10 | line two\n100 | line three'; + const expected = 'line one\nline two\nline three'; + expect(stripLineNumbers(input)).toBe(expected); + }); +}); - it('should preserve indentation after line numbers', () => { - const input = '1 | indented line\n2 | another indented'; - const expected = ' indented line\n another indented'; - expect(stripLineNumbers(input)).toBe(expected); - }); -}); \ No newline at end of file +describe('truncateOutput', () => { + it('returns original content when no line limit provided', () => { + const content = 'line1\nline2\nline3' + expect(truncateOutput(content)).toBe(content) + }) + + it('returns original content when lines are under limit', () => { + const content = 'line1\nline2\nline3' + expect(truncateOutput(content, 5)).toBe(content) + }) + + it('truncates content with 20/80 split when over limit', () => { + // Create 25 lines of content + const lines = Array.from({ length: 25 }, (_, i) => `line${i + 1}`) + const content = lines.join('\n') + + // Set limit to 10 lines + const result = truncateOutput(content, 10) + + // Should keep: + // - First 2 lines (20% of 10) + // - Last 8 lines (80% of 10) + // - Omission indicator in between + const expectedLines = [ + 'line1', + 'line2', + '', + '[...15 lines omitted...]', + '', + 'line18', + 'line19', + 'line20', + 'line21', + 'line22', + 'line23', + 'line24', + 'line25' + ] + expect(result).toBe(expectedLines.join('\n')) + }) + + it('handles empty content', () => { + expect(truncateOutput('', 10)).toBe('') + }) + + it('handles single line content', () => { + expect(truncateOutput('single line', 10)).toBe('single line') + }) + + it('handles windows-style line endings', () => { + // Create content with windows line endings + const lines = Array.from({ length: 15 }, (_, i) => `line${i + 1}`) + const content = lines.join('\r\n') + + const result = truncateOutput(content, 5) + + // Should keep first line (20% of 5 = 1) and last 4 lines (80% of 5 = 4) + // Split result by either \r\n or \n to normalize line endings + const resultLines = result.split(/\r?\n/) + const expectedLines = [ + 'line1', + '', + '[...10 lines omitted...]', + '', + 'line12', + 'line13', + 'line14', + 'line15' + ] + expect(resultLines).toEqual(expectedLines) + }) +}) \ No newline at end of file diff --git a/src/integrations/misc/extract-text.ts b/src/integrations/misc/extract-text.ts index fb303ec..ee70652 100644 --- a/src/integrations/misc/extract-text.ts +++ b/src/integrations/misc/extract-text.ts @@ -87,4 +87,38 @@ export function stripLineNumbers(content: string): string { // Join back with original line endings const lineEnding = content.includes('\r\n') ? '\r\n' : '\n' return processedLines.join(lineEnding) +} + +/** + * Truncates multi-line output while preserving context from both the beginning and end. + * When truncation is needed, it keeps 20% of the lines from the start and 80% from the end, + * with a clear indicator of how many lines were omitted in between. + * + * @param content The multi-line string to truncate + * @param lineLimit Optional maximum number of lines to keep. If not provided or 0, returns the original content + * @returns The truncated string with an indicator of omitted lines, or the original content if no truncation needed + * + * @example + * // With 10 line limit on 25 lines of content: + * // - Keeps first 2 lines (20% of 10) + * // - Keeps last 8 lines (80% of 10) + * // - Adds "[...15 lines omitted...]" in between + */ +export function truncateOutput(content: string, lineLimit?: number): string { + if (!lineLimit) { + return content + } + + const lines = content.split('\n') + if (lines.length <= lineLimit) { + return content + } + + const beforeLimit = Math.floor(lineLimit * 0.2) // 20% of lines before + const afterLimit = lineLimit - beforeLimit // remaining 80% after + return [ + ...lines.slice(0, beforeLimit), + `\n[...${lines.length - lineLimit} lines omitted...]\n`, + ...lines.slice(-afterLimit) + ].join('\n') } \ No newline at end of file diff --git a/src/shared/ExtensionMessage.ts b/src/shared/ExtensionMessage.ts index 887945f..30df793 100644 --- a/src/shared/ExtensionMessage.ts +++ b/src/shared/ExtensionMessage.ts @@ -3,6 +3,7 @@ import { ApiConfiguration, ModelInfo } from "./api" import { HistoryItem } from "./HistoryItem" import { McpServer } from "./mcp" +import { GitCommit } from "../utils/git" // webview will hold state export interface ExtensionMessage { @@ -21,6 +22,7 @@ export interface ExtensionMessage { | "openAiModels" | "mcpServers" | "enhancedPrompt" + | "commitSearchResults" text?: string action?: | "chatButtonClicked" @@ -39,6 +41,7 @@ export interface ExtensionMessage { openRouterModels?: Record openAiModels?: string[] mcpServers?: McpServer[] + commits?: GitCommit[] } export interface ExtensionState { diff --git a/src/shared/WebviewMessage.ts b/src/shared/WebviewMessage.ts index 111faac..48eeb4a 100644 --- a/src/shared/WebviewMessage.ts +++ b/src/shared/WebviewMessage.ts @@ -51,6 +51,7 @@ export interface WebviewMessage { | "deleteMessage" | "terminalOutputLineLimit" | "mcpEnabled" + | "searchCommits" text?: string disabled?: boolean askResponse?: ClineAskResponse @@ -65,6 +66,7 @@ export interface WebviewMessage { alwaysAllow?: boolean dataUrls?: string[] values?: Record + query?: string } export type ClineAskResponse = "yesButtonClicked" | "noButtonClicked" | "messageResponse" diff --git a/src/shared/context-mentions.ts b/src/shared/context-mentions.ts index 3912868..ed2a657 100644 --- a/src/shared/context-mentions.ts +++ b/src/shared/context-mentions.ts @@ -7,42 +7,79 @@ Mention regex: - **Regex Breakdown**: - `/@`: - - **@**: The mention must start with the '@' symbol. + - **@**: The mention must start with the '@' symbol. - - `((?:\/|\w+:\/\/)[^\s]+?|problems\b)`: - - **Capturing Group (`(...)`)**: Captures the part of the string that matches one of the specified patterns. - - `(?:\/|\w+:\/\/)`: - - **Non-Capturing Group (`(?:...)`)**: Groups the alternatives without capturing them for back-referencing. - - `\/`: - - **Slash (`/`)**: Indicates that the mention is a file or folder path starting with a '/'. - - `|`: Logical OR. - - `\w+:\/\/`: - - **Protocol (`\w+://`)**: Matches URLs that start with a word character sequence followed by '://', such as 'http://', 'https://', 'ftp://', etc. - - `[^\s]+?`: - - **Non-Whitespace Characters (`[^\s]+`)**: Matches one or more characters that are not whitespace. - - **Non-Greedy (`+?`)**: Ensures the smallest possible match, preventing the inclusion of trailing punctuation. - - `|`: Logical OR. - - `problems\b`: - - **Exact Word ('problems')**: Matches the exact word 'problems'. - - **Word Boundary (`\b`)**: Ensures that 'problems' is matched as a whole word and not as part of another word (e.g., 'problematic'). + - `((?:\/|\w+:\/\/)[^\s]+?|problems\b|git-changes\b)`: + - **Capturing Group (`(...)`)**: Captures the part of the string that matches one of the specified patterns. + - `(?:\/|\w+:\/\/)`: + - **Non-Capturing Group (`(?:...)`)**: Groups the alternatives without capturing them for back-referencing. + - `\/`: + - **Slash (`/`)**: Indicates that the mention is a file or folder path starting with a '/'. + - `|`: Logical OR. + - `\w+:\/\/`: + - **Protocol (`\w+://`)**: Matches URLs that start with a word character sequence followed by '://', such as 'http://', 'https://', 'ftp://', etc. + - `[^\s]+?`: + - **Non-Whitespace Characters (`[^\s]+`)**: Matches one or more characters that are not whitespace. + - **Non-Greedy (`+?`)**: Ensures the smallest possible match, preventing the inclusion of trailing punctuation. + - `|`: Logical OR. + - `problems\b`: + - **Exact Word ('problems')**: Matches the exact word 'problems'. + - **Word Boundary (`\b`)**: Ensures that 'problems' is matched as a whole word and not as part of another word (e.g., 'problematic'). + - `|`: Logical OR. + - `problems\b`: + - **Exact Word ('git-changes')**: Matches the exact word 'git-changes'. + - **Word Boundary (`\b`)**: Ensures that 'git-changes' is matched as a whole word and not as part of another word. - `(?=[.,;:!?]?(?=[\s\r\n]|$))`: - - **Positive Lookahead (`(?=...)`)**: Ensures that the match is followed by specific patterns without including them in the match. - - `[.,;:!?]?`: - - **Optional Punctuation (`[.,;:!?]?`)**: Matches zero or one of the specified punctuation marks. - - `(?=[\s\r\n]|$)`: - - **Nested Positive Lookahead (`(?=[\s\r\n]|$)`)**: Ensures that the punctuation (if present) is followed by a whitespace character, a line break, or the end of the string. + - **Positive Lookahead (`(?=...)`)**: Ensures that the match is followed by specific patterns without including them in the match. + - `[.,;:!?]?`: + - **Optional Punctuation (`[.,;:!?]?`)**: Matches zero or one of the specified punctuation marks. + - `(?=[\s\r\n]|$)`: + - **Nested Positive Lookahead (`(?=[\s\r\n]|$)`)**: Ensures that the punctuation (if present) is followed by a whitespace character, a line break, or the end of the string. - **Summary**: - The regex effectively matches: - - Mentions that are file or folder paths starting with '/' and containing any non-whitespace characters (including periods within the path). - - URLs that start with a protocol (like 'http://') followed by any non-whitespace characters (including query parameters). - - The exact word 'problems'. + - Mentions that are file or folder paths starting with '/' and containing any non-whitespace characters (including periods within the path). + - URLs that start with a protocol (like 'http://') followed by any non-whitespace characters (including query parameters). + - The exact word 'problems'. + - The exact word 'git-changes'. - It ensures that any trailing punctuation marks (such as ',', '.', '!', etc.) are not included in the matched mention, allowing the punctuation to follow the mention naturally in the text. - **Global Regex**: - `mentionRegexGlobal`: Creates a global version of the `mentionRegex` to find all matches within a given string. */ -export const mentionRegex = /@((?:\/|\w+:\/\/)[^\s]+?|problems\b)(?=[.,;:!?]?(?=[\s\r\n]|$))/ +export const mentionRegex = /@((?:\/|\w+:\/\/)[^\s]+?|[a-f0-9]{7,40}\b|problems\b|git-changes\b)(?=[.,;:!?]?(?=[\s\r\n]|$))/ export const mentionRegexGlobal = new RegExp(mentionRegex.source, "g") + +export interface MentionSuggestion { + type: 'file' | 'folder' | 'git' | 'problems' + label: string + description?: string + value: string + icon?: string +} + +export interface GitMentionSuggestion extends MentionSuggestion { + type: 'git' + hash: string + shortHash: string + subject: string + author: string + date: string +} + +export function formatGitSuggestion(commit: { hash: string; shortHash: string; subject: string; author: string; date: string }): GitMentionSuggestion { + return { + type: 'git', + label: commit.subject, + description: `${commit.shortHash} by ${commit.author} on ${commit.date}`, + value: commit.hash, + icon: '$(git-commit)', // VSCode git commit icon + hash: commit.hash, + shortHash: commit.shortHash, + subject: commit.subject, + author: commit.author, + date: commit.date + } +} diff --git a/src/utils/git.ts b/src/utils/git.ts new file mode 100644 index 0000000..0cd957f --- /dev/null +++ b/src/utils/git.ts @@ -0,0 +1,166 @@ +import { exec } from "child_process" +import { promisify } from "util" +import { truncateOutput } from "../integrations/misc/extract-text" + +const execAsync = promisify(exec) +const GIT_OUTPUT_LINE_LIMIT = 500 + +export interface GitCommit { + hash: string + shortHash: string + subject: string + author: string + date: string +} + +async function checkGitRepo(cwd: string): Promise { + try { + await execAsync('git rev-parse --git-dir', { cwd }) + return true + } catch (error) { + return false + } +} + +async function checkGitInstalled(): Promise { + try { + await execAsync('git --version') + return true + } catch (error) { + return false + } +} + +export async function searchCommits(query: string, cwd: string): Promise { + try { + const isInstalled = await checkGitInstalled() + if (!isInstalled) { + console.error("Git is not installed") + return [] + } + + const isRepo = await checkGitRepo(cwd) + if (!isRepo) { + console.error("Not a git repository") + return [] + } + + // Search commits by hash or message, limiting to 10 results + const { stdout } = await execAsync( + `git log -n 10 --format="%H%n%h%n%s%n%an%n%ad" --date=short ` + + `--grep="${query}" --regexp-ignore-case`, + { cwd } + ) + + let output = stdout + if (!output.trim() && /^[a-f0-9]+$/i.test(query)) { + // If no results from grep search and query looks like a hash, try searching by hash + const { stdout: hashStdout } = await execAsync( + `git log -n 10 --format="%H%n%h%n%s%n%an%n%ad" --date=short ` + + `--author-date-order ${query}`, + { cwd } + ).catch(() => ({ stdout: "" })) + + if (!hashStdout.trim()) { + return [] + } + + output = hashStdout + } + + const commits: GitCommit[] = [] + const lines = output.trim().split("\n").filter(line => line !== "--") + + for (let i = 0; i < lines.length; i += 5) { + commits.push({ + hash: lines[i], + shortHash: lines[i + 1], + subject: lines[i + 2], + author: lines[i + 3], + date: lines[i + 4] + }) + } + + return commits + } catch (error) { + console.error("Error searching commits:", error) + return [] + } +} + +export async function getCommitInfo(hash: string, cwd: string): Promise { + try { + const isInstalled = await checkGitInstalled() + if (!isInstalled) { + return "Git is not installed" + } + + const isRepo = await checkGitRepo(cwd) + if (!isRepo) { + return "Not a git repository" + } + + // Get commit info, stats, and diff separately + const { stdout: info } = await execAsync( + `git show --format="%H%n%h%n%s%n%an%n%ad%n%b" --no-patch ${hash}`, + { cwd } + ) + const [fullHash, shortHash, subject, author, date, body] = info.trim().split('\n') + + const { stdout: stats } = await execAsync( + `git show --stat --format="" ${hash}`, + { cwd } + ) + + const { stdout: diff } = await execAsync( + `git show --format="" ${hash}`, + { cwd } + ) + + const summary = [ + `Commit: ${shortHash} (${fullHash})`, + `Author: ${author}`, + `Date: ${date}`, + `\nMessage: ${subject}`, + body ? `\nDescription:\n${body}` : '', + '\nFiles Changed:', + stats.trim(), + '\nFull Changes:' + ].join('\n') + + const output = summary + '\n\n' + diff.trim() + return truncateOutput(output, GIT_OUTPUT_LINE_LIMIT) + } catch (error) { + console.error("Error getting commit info:", error) + return `Failed to get commit info: ${error instanceof Error ? error.message : String(error)}` + } +} + +export async function getWorkingState(cwd: string): Promise { + try { + const isInstalled = await checkGitInstalled() + if (!isInstalled) { + return "Git is not installed" + } + + const isRepo = await checkGitRepo(cwd) + if (!isRepo) { + return "Not a git repository" + } + + // Get status of working directory + const { stdout: status } = await execAsync('git status --short', { cwd }) + if (!status.trim()) { + return "No changes in working directory" + } + + // Get all changes (both staged and unstaged) compared to HEAD + const { stdout: diff } = await execAsync('git diff HEAD', { cwd }) + const lineLimit = GIT_OUTPUT_LINE_LIMIT + const output = `Working directory changes:\n\n${status}\n\n${diff}`.trim() + return truncateOutput(output, lineLimit) + } catch (error) { + console.error("Error getting working state:", error) + return `Failed to get working state: ${error instanceof Error ? error.message : String(error)}` + } +} \ No newline at end of file diff --git a/webview-ui/src/components/chat/ChatTextArea.tsx b/webview-ui/src/components/chat/ChatTextArea.tsx index 64b45cd..2078013 100644 --- a/webview-ui/src/components/chat/ChatTextArea.tsx +++ b/webview-ui/src/components/chat/ChatTextArea.tsx @@ -12,8 +12,8 @@ import { import { MAX_IMAGES_PER_MESSAGE } from "./ChatView" import ContextMenu from "./ContextMenu" import Thumbnails from "../common/Thumbnails" - import { vscode } from "../../utils/vscode" +import { WebviewMessage } from "../../../../src/shared/WebviewMessage" interface ChatTextAreaProps { inputValue: string @@ -46,6 +46,7 @@ const ChatTextArea = forwardRef( ) => { const { filePaths, apiConfiguration } = useExtensionState() const [isTextAreaFocused, setIsTextAreaFocused] = useState(false) + const [gitCommits, setGitCommits] = useState([]) // Handle enhanced prompt response useEffect(() => { @@ -54,6 +55,15 @@ const ChatTextArea = forwardRef( if (message.type === 'enhancedPrompt' && message.text) { setInputValue(message.text) setIsEnhancingPrompt(false) + } else if (message.type === 'commitSearchResults') { + const commits = message.commits.map((commit: any) => ({ + type: ContextMenuOptionType.Git, + value: commit.hash, + label: commit.subject, + description: `${commit.shortHash} by ${commit.author} on ${commit.date}`, + icon: "$(git-commit)" + })) + setGitCommits(commits) } } window.addEventListener('message', messageHandler) @@ -73,29 +83,40 @@ const ChatTextArea = forwardRef( const [justDeletedSpaceAfterMention, setJustDeletedSpaceAfterMention] = useState(false) const [intendedCursorPosition, setIntendedCursorPosition] = useState(null) const contextMenuContainerRef = useRef(null) - const [isEnhancingPrompt, setIsEnhancingPrompt] = useState(false) + + // Fetch git commits when Git is selected or when typing a hash + useEffect(() => { + if (selectedType === ContextMenuOptionType.Git || /^[a-f0-9]+$/i.test(searchQuery)) { + const message: WebviewMessage = { + type: "searchCommits", + query: searchQuery || "" + } as const + vscode.postMessage(message) + } + }, [selectedType, searchQuery]) const handleEnhancePrompt = useCallback(() => { - if (!textAreaDisabled) { - const trimmedInput = inputValue.trim() - if (trimmedInput) { - setIsEnhancingPrompt(true) - const message = { - type: "enhancePrompt" as const, - text: trimmedInput, - } - vscode.postMessage(message) - } else { - const promptDescription = "The 'Enhance Prompt' button helps improve your prompt by providing additional context, clarification, or rephrasing. Try typing a prompt in here and clicking the button again to see how it works." - setInputValue(promptDescription) - } - } + if (!textAreaDisabled) { + const trimmedInput = inputValue.trim() + if (trimmedInput) { + setIsEnhancingPrompt(true) + const message = { + type: "enhancePrompt" as const, + text: trimmedInput, + } + vscode.postMessage(message) + } else { + const promptDescription = "The 'Enhance Prompt' button helps improve your prompt by providing additional context, clarification, or rephrasing. Try typing a prompt in here and clicking the button again to see how it works." + setInputValue(promptDescription) + } + } }, [inputValue, textAreaDisabled, setInputValue]) const queryItems = useMemo(() => { return [ { type: ContextMenuOptionType.Problems, value: "problems" }, + ...gitCommits, ...filePaths .map((file) => "/" + file) .map((path) => ({ @@ -103,7 +124,7 @@ const ChatTextArea = forwardRef( value: path, })), ] - }, [filePaths]) + }, [filePaths, gitCommits]) useEffect(() => { const handleClickOutside = (event: MouseEvent) => { @@ -130,7 +151,9 @@ const ChatTextArea = forwardRef( return } - if (type === ContextMenuOptionType.File || type === ContextMenuOptionType.Folder) { + if (type === ContextMenuOptionType.File || + type === ContextMenuOptionType.Folder || + type === ContextMenuOptionType.Git) { if (!value) { setSelectedType(type) setSearchQuery("") @@ -149,6 +172,8 @@ const ChatTextArea = forwardRef( insertValue = value || "" } else if (type === ContextMenuOptionType.Problems) { insertValue = "problems" + } else if (type === ContextMenuOptionType.Git) { + insertValue = value || "" } const { newValue, mentionIndex } = insertMention( @@ -161,7 +186,6 @@ const ChatTextArea = forwardRef( const newCursorPosition = newValue.indexOf(" ", mentionIndex + insertValue.length) + 1 setCursorPosition(newCursorPosition) setIntendedCursorPosition(newCursorPosition) - // textAreaRef.current.focus() // scroll to cursor setTimeout(() => { @@ -179,7 +203,6 @@ const ChatTextArea = forwardRef( (event: React.KeyboardEvent) => { if (showContextMenu) { if (event.key === "Escape") { - // event.preventDefault() setSelectedType(null) setSelectedMenuIndex(3) // File by default return @@ -356,19 +379,17 @@ const ChatTextArea = forwardRef( setShowContextMenu(false) // Scroll to new cursor position - // https://stackoverflow.com/questions/29899364/how-do-you-scroll-to-the-position-of-the-cursor-in-a-textarea/40951875#40951875 setTimeout(() => { if (textAreaRef.current) { textAreaRef.current.blur() textAreaRef.current.focus() } }, 0) - // NOTE: callbacks dont utilize return function to cleanup, but it's fine since this timeout immediately executes and will be cleaned up by the browser (no chance component unmounts before it executes) return } - const acceptedTypes = ["png", "jpeg", "webp"] // supported by anthropic and openrouter (jpg is just a file extension but the image will be recognized as jpeg) + const acceptedTypes = ["png", "jpeg", "webp"] const imageItems = Array.from(items).filter((item) => { const [type, subtype] = item.type.split("/") return type === "image" && acceptedTypes.includes(subtype) @@ -397,7 +418,6 @@ const ChatTextArea = forwardRef( }) const imageDataArray = await Promise.all(imagePromises) const dataUrls = imageDataArray.filter((dataUrl): dataUrl is string => dataUrl !== null) - //.map((dataUrl) => dataUrl.split(",")[1]) // strip the mime type prefix, sharp doesn't need it if (dataUrls.length > 0) { setSelectedImages((prevImages) => [...prevImages, ...dataUrls].slice(0, MAX_IMAGES_PER_MESSAGE)) } else { @@ -602,7 +622,6 @@ const ChatTextArea = forwardRef( boxSizing: "border-box", backgroundColor: "transparent", color: "var(--vscode-input-foreground)", - //border: "1px solid var(--vscode-input-border)", borderRadius: 2, fontFamily: "var(--vscode-font-family)", fontSize: "var(--vscode-editor-font-size)", @@ -610,18 +629,12 @@ const ChatTextArea = forwardRef( resize: "none", overflowX: "hidden", overflowY: "scroll", - // Since we have maxRows, when text is long enough it starts to overflow the bottom padding, appearing behind the thumbnails. To fix this, we use a transparent border to push the text up instead. (https://stackoverflow.com/questions/42631947/maintaining-a-padding-inside-of-text-area/52538410#52538410) - // borderTop: "9px solid transparent", borderLeft: 0, borderRight: 0, borderTop: 0, borderBottom: `${thumbnailsHeight + 6}px solid transparent`, borderColor: "transparent", padding: "9px 9px 25px 9px", - // borderRight: "54px solid transparent", - // borderLeft: "9px solid transparent", // NOTE: react-textarea-autosize doesn't calculate correct height when using borderLeft/borderRight so we need to use horizontal padding instead - // Instead of using boxShadow, we use a div with a border to better replicate the behavior when the textarea is focused - // boxShadow: "0px 0px 0px 1px var(--vscode-input-border)", cursor: textAreaDisabled ? "not-allowed" : undefined, flex: 1, zIndex: 1, @@ -645,21 +658,21 @@ const ChatTextArea = forwardRef( )}
- {apiConfiguration?.apiProvider === "openrouter" && ( -
- {isEnhancingPrompt && Enhancing prompt...} - !textAreaDisabled && handleEnhancePrompt()} - style={{ fontSize: 16.5 }} - /> -
- )} - !shouldDisableImages && onSelectImages()} style={{ fontSize: 16.5 }} /> - !textAreaDisabled && onSend()} style={{ fontSize: 15 }} /> + {apiConfiguration?.apiProvider === "openrouter" && ( +
+ {isEnhancingPrompt && Enhancing prompt...} + !textAreaDisabled && handleEnhancePrompt()} + style={{ fontSize: 16.5 }} + /> +
+ )} + !shouldDisableImages && onSelectImages()} style={{ fontSize: 16.5 }} /> + !textAreaDisabled && onSend()} style={{ fontSize: 15 }} />
diff --git a/webview-ui/src/components/chat/ContextMenu.tsx b/webview-ui/src/components/chat/ContextMenu.tsx index 81a3047..535cf77 100644 --- a/webview-ui/src/components/chat/ContextMenu.tsx +++ b/webview-ui/src/components/chat/ContextMenu.tsx @@ -52,6 +52,26 @@ const ContextMenu: React.FC = ({ return Paste URL to fetch contents case ContextMenuOptionType.NoResults: return No results found + case ContextMenuOptionType.Git: + if (option.value) { + return ( +
+ {option.label} + + {option.description} + +
+ ) + } else { + return Git Commits + } case ContextMenuOptionType.File: case ContextMenuOptionType.Folder: if (option.value) { @@ -87,6 +107,8 @@ const ContextMenu: React.FC = ({ return "warning" case ContextMenuOptionType.URL: return "link" + case ContextMenuOptionType.Git: + return "git-commit" case ContextMenuOptionType.NoResults: return "info" default: @@ -121,7 +143,6 @@ const ContextMenu: React.FC = ({ maxHeight: "200px", overflowY: "auto", }}> - {/* Can't use virtuoso since it requires fixed height and menu height is dynamic based on # of items */} {filteredOptions.map((option, index) => (
= ({ flex: 1, minWidth: 0, overflow: "hidden", + paddingTop: 0 }}> {renderOptionContent(option)}
- {(option.type === ContextMenuOptionType.File || option.type === ContextMenuOptionType.Folder) && - !option.value && ( - - )} + {((option.type === ContextMenuOptionType.File || + option.type === ContextMenuOptionType.Folder || + option.type === ContextMenuOptionType.Git) && + !option.value) && ( + + )} {(option.type === ContextMenuOptionType.Problems || - ((option.type === ContextMenuOptionType.File || - option.type === ContextMenuOptionType.Folder) && - option.value)) && ( + ((option.type === ContextMenuOptionType.File || + option.type === ContextMenuOptionType.Folder || + option.type === ContextMenuOptionType.Git) && + option.value)) && ( { + if (query === this.lastQuery && this.commits) { + return this.commits + } + + // Request search from extension + vscode.postMessage({ type: 'searchCommits', query }) + + // Wait for response + const response = await new Promise((resolve) => { + const handler = (event: MessageEvent) => { + const message = event.data + if (message.type === 'commitSearchResults') { + window.removeEventListener('message', handler) + resolve(message.commits) + } + } + window.addEventListener('message', handler) + }) + + this.commits = response + this.lastQuery = query + return response + } + + clearCache() { + this.commits = null + this.lastQuery = '' + } +} + +export const gitService = new GitService() \ No newline at end of file diff --git a/webview-ui/src/utils/__tests__/context-mentions.test.ts b/webview-ui/src/utils/__tests__/context-mentions.test.ts new file mode 100644 index 0000000..6f37921 --- /dev/null +++ b/webview-ui/src/utils/__tests__/context-mentions.test.ts @@ -0,0 +1,130 @@ +import { insertMention, removeMention, getContextMenuOptions, shouldShowContextMenu, ContextMenuOptionType, ContextMenuQueryItem } from '../context-mentions' + +describe('insertMention', () => { + it('should insert mention at cursor position when no @ symbol exists', () => { + const result = insertMention('Hello world', 5, 'test') + expect(result.newValue).toBe('Hello@test world') + expect(result.mentionIndex).toBe(5) + }) + + it('should replace text after last @ symbol', () => { + const result = insertMention('Hello @wor world', 8, 'test') + expect(result.newValue).toBe('Hello @test world') + expect(result.mentionIndex).toBe(6) + }) + + it('should handle empty text', () => { + const result = insertMention('', 0, 'test') + expect(result.newValue).toBe('@test ') + expect(result.mentionIndex).toBe(0) + }) +}) + +describe('removeMention', () => { + it('should remove mention when cursor is at end of mention', () => { + // Test with the problems keyword that matches the regex + const result = removeMention('Hello @problems ', 15) + expect(result.newText).toBe('Hello ') + expect(result.newPosition).toBe(6) + }) + + it('should not remove text when not at end of mention', () => { + const result = removeMention('Hello @test world', 8) + expect(result.newText).toBe('Hello @test world') + expect(result.newPosition).toBe(8) + }) + + it('should handle text without mentions', () => { + const result = removeMention('Hello world', 5) + expect(result.newText).toBe('Hello world') + expect(result.newPosition).toBe(5) + }) +}) + +describe('getContextMenuOptions', () => { + const mockQueryItems: ContextMenuQueryItem[] = [ + { + type: ContextMenuOptionType.File, + value: 'src/test.ts', + label: 'test.ts', + description: 'Source file' + }, + { + type: ContextMenuOptionType.Git, + value: 'abc1234', + label: 'Initial commit', + description: 'First commit', + icon: '$(git-commit)' + }, + { + type: ContextMenuOptionType.Folder, + value: 'src', + label: 'src', + description: 'Source folder' + } + ] + + it('should return all option types for empty query', () => { + const result = getContextMenuOptions('', null, []) + expect(result).toHaveLength(5) + expect(result.map(item => item.type)).toEqual([ + ContextMenuOptionType.Problems, + ContextMenuOptionType.URL, + ContextMenuOptionType.Folder, + ContextMenuOptionType.File, + ContextMenuOptionType.Git + ]) + }) + + it('should filter by selected type when query is empty', () => { + const result = getContextMenuOptions('', ContextMenuOptionType.File, mockQueryItems) + expect(result).toHaveLength(1) + expect(result[0].type).toBe(ContextMenuOptionType.File) + expect(result[0].value).toBe('src/test.ts') + }) + + it('should match git commands', () => { + const result = getContextMenuOptions('git', null, mockQueryItems) + expect(result[0].type).toBe(ContextMenuOptionType.Git) + expect(result[0].label).toBe('Git Commits') + }) + + it('should match git commit hashes', () => { + const result = getContextMenuOptions('abc1234', null, mockQueryItems) + expect(result[0].type).toBe(ContextMenuOptionType.Git) + expect(result[0].value).toBe('abc1234') + }) + + it('should return NoResults when no matches found', () => { + const result = getContextMenuOptions('nonexistent', null, mockQueryItems) + expect(result).toHaveLength(1) + expect(result[0].type).toBe(ContextMenuOptionType.NoResults) + }) +}) + +describe('shouldShowContextMenu', () => { + it('should return true for @ symbol', () => { + expect(shouldShowContextMenu('@', 1)).toBe(true) + }) + + it('should return true for @ followed by text', () => { + expect(shouldShowContextMenu('Hello @test', 10)).toBe(true) + }) + + it('should return false when no @ symbol exists', () => { + expect(shouldShowContextMenu('Hello world', 5)).toBe(false) + }) + + it('should return false for @ followed by whitespace', () => { + expect(shouldShowContextMenu('Hello @ world', 6)).toBe(false) + }) + + it('should return false for @ in URL', () => { + expect(shouldShowContextMenu('Hello @http://test.com', 17)).toBe(false) + }) + + it('should return false for @problems', () => { + // Position cursor at the end to test the full word + expect(shouldShowContextMenu('@problems', 9)).toBe(false) + }) +}) \ No newline at end of file diff --git a/webview-ui/src/utils/context-mentions.ts b/webview-ui/src/utils/context-mentions.ts index a79aefb..6d846bf 100644 --- a/webview-ui/src/utils/context-mentions.ts +++ b/webview-ui/src/utils/context-mentions.ts @@ -51,12 +51,16 @@ export enum ContextMenuOptionType { Folder = "folder", Problems = "problems", URL = "url", + Git = "git", NoResults = "noResults", } export interface ContextMenuQueryItem { type: ContextMenuOptionType value?: string + label?: string + description?: string + icon?: string } export function getContextMenuOptions( @@ -64,6 +68,14 @@ export function getContextMenuOptions( selectedType: ContextMenuOptionType | null = null, queryItems: ContextMenuQueryItem[], ): ContextMenuQueryItem[] { + const workingChanges: ContextMenuQueryItem = { + type: ContextMenuOptionType.Git, + value: "git-changes", + label: "Working changes", + description: "Current uncommitted changes", + icon: "$(git-commit)" + } + if (query === "") { if (selectedType === ContextMenuOptionType.File) { const files = queryItems @@ -79,30 +91,88 @@ export function getContextMenuOptions( return folders.length > 0 ? folders : [{ type: ContextMenuOptionType.NoResults }] } + if (selectedType === ContextMenuOptionType.Git) { + const commits = queryItems + .filter((item) => item.type === ContextMenuOptionType.Git) + return commits.length > 0 ? [workingChanges, ...commits] : [workingChanges] + } + return [ - { type: ContextMenuOptionType.URL }, { type: ContextMenuOptionType.Problems }, + { type: ContextMenuOptionType.URL }, { type: ContextMenuOptionType.Folder }, { type: ContextMenuOptionType.File }, + { type: ContextMenuOptionType.Git }, ] } const lowerQuery = query.toLowerCase() + const suggestions: ContextMenuQueryItem[] = [] + // Check for top-level option matches + if ("git".startsWith(lowerQuery)) { + suggestions.push({ + type: ContextMenuOptionType.Git, + label: "Git Commits", + description: "Search repository history", + icon: "$(git-commit)" + }) + } else if ("git-changes".startsWith(lowerQuery)) { + suggestions.push(workingChanges) + } + if ("problems".startsWith(lowerQuery)) { + suggestions.push({ type: ContextMenuOptionType.Problems }) + } if (query.startsWith("http")) { - return [{ type: ContextMenuOptionType.URL, value: query }] - } else { - const matchingItems = queryItems.filter((item) => item.value?.toLowerCase().includes(lowerQuery)) + suggestions.push({ type: ContextMenuOptionType.URL, value: query }) + } - if (matchingItems.length > 0) { - return matchingItems.map((item) => ({ - type: item.type, - value: item.value, - })) + // Add exact SHA matches to suggestions + if (/^[a-f0-9]{7,40}$/i.test(lowerQuery)) { + const exactMatches = queryItems.filter((item) => + item.type === ContextMenuOptionType.Git && + item.value?.toLowerCase() === lowerQuery + ) + if (exactMatches.length > 0) { + suggestions.push(...exactMatches) } else { - return [{ type: ContextMenuOptionType.NoResults }] + // If no exact match but valid SHA format, add as option + suggestions.push({ + type: ContextMenuOptionType.Git, + value: lowerQuery, + label: `Commit ${lowerQuery}`, + description: "Git commit hash", + icon: "$(git-commit)" + }) } } + + // Get matching items, separating by type + const matchingItems = queryItems.filter((item) => + item.value?.toLowerCase().includes(lowerQuery) || + item.label?.toLowerCase().includes(lowerQuery) || + item.description?.toLowerCase().includes(lowerQuery) + ) + + const fileMatches = matchingItems.filter(item => + item.type === ContextMenuOptionType.File || + item.type === ContextMenuOptionType.Folder + ) + const gitMatches = matchingItems.filter(item => + item.type === ContextMenuOptionType.Git + ) + const otherMatches = matchingItems.filter(item => + item.type !== ContextMenuOptionType.File && + item.type !== ContextMenuOptionType.Folder && + item.type !== ContextMenuOptionType.Git + ) + + // Combine suggestions with matching items in the desired order + if (suggestions.length > 0 || matchingItems.length > 0) { + return [...suggestions, ...fileMatches, ...gitMatches, ...otherMatches] + } + + return [{ type: ContextMenuOptionType.NoResults }] } export function shouldShowContextMenu(text: string, position: number): boolean { From ede7e8c8d75a97e01810a5e19fb78c4cb671b5c9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 6 Jan 2025 18:08:19 +0000 Subject: [PATCH 23/24] changeset version bump --- .changeset/two-camels-jam.md | 5 ----- CHANGELOG.md | 6 ++++++ package-lock.json | 4 ++-- package.json | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) delete mode 100644 .changeset/two-camels-jam.md diff --git a/.changeset/two-camels-jam.md b/.changeset/two-camels-jam.md deleted file mode 100644 index 6f165f7..0000000 --- a/.changeset/two-camels-jam.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Add a Git section to the context mentions diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b1b120..565202e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Roo Cline Changelog +## 2.2.42 + +### Patch Changes + +- Add a Git section to the context mentions + ## [2.2.41] - Checkbox to disable streaming for OpenAI-compatible providers diff --git a/package-lock.json b/package-lock.json index 368a747..d68e66a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "2.2.41", + "version": "2.2.42", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "2.2.41", + "version": "2.2.42", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.26.0", diff --git a/package.json b/package.json index 7871d4b..62e50ab 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Cline", "description": "A fork of Cline, an autonomous coding agent, with some added experimental configuration and automation features.", "publisher": "RooVeterinaryInc", - "version": "2.2.41", + "version": "2.2.42", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From 9c65eeefbf3d361df46d97f02abf82bf8bd555b9 Mon Sep 17 00:00:00 2001 From: R00-B0T Date: Mon, 6 Jan 2025 18:08:56 +0000 Subject: [PATCH 24/24] Updating CHANGELOG.md format --- CHANGELOG.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 565202e..fbce017 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,6 @@ # Roo Cline Changelog -## 2.2.42 - -### Patch Changes +## [2.2.42] - Add a Git section to the context mentions