feat(vscode-lm): implement VS Code Language Models provider

This commit is contained in:
RaySinner
2025-01-07 01:23:22 +03:00
parent 5e099e2960
commit 9d62a7bb77
14 changed files with 2473 additions and 153 deletions

1319
docs/vscode_lm_api_docs.md Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -42,7 +42,10 @@
"ai",
"llama"
],
"activationEvents": [],
"activationEvents": [
"onLanguage",
"onStartupFinished"
],
"main": "./dist/extension.js",
"contributes": {
"viewsContainers": {
@@ -141,6 +144,20 @@
"git show"
],
"description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled"
},
"roo-cline.vsCodeLmModelSelector": {
"type": "object",
"properties": {
"vendor": {
"type": "string",
"description": "The vendor of the language model (e.g. copilot)"
},
"family": {
"type": "string",
"description": "The family of the language model (e.g. gpt-4)"
}
},
"description": "Settings for VSCode Language Model API"
}
}
}

View File

@@ -10,6 +10,7 @@ import { LmStudioHandler } from "./providers/lmstudio"
import { GeminiHandler } from "./providers/gemini"
import { OpenAiNativeHandler } from "./providers/openai-native"
import { DeepSeekHandler } from "./providers/deepseek"
import { VsCodeLmHandler } from "./providers/vscode-lm"
import { ApiStream } from "./transform/stream"
export interface SingleCompletionHandler {
@@ -23,6 +24,7 @@ export interface ApiHandler {
export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
const { apiProvider, ...options } = configuration
switch (apiProvider) {
case "anthropic":
return new AnthropicHandler(options)
@@ -44,6 +46,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
return new OpenAiNativeHandler(options)
case "deepseek":
return new DeepSeekHandler(options)
case "vscode-lm":
return new VsCodeLmHandler(options)
default:
return new AnthropicHandler(options)
}

View File

@@ -0,0 +1,569 @@
import { Anthropic } from "@anthropic-ai/sdk";
import * as vscode from 'vscode';
import { ApiHandler, SingleCompletionHandler } from "../";
import { calculateApiCost } from "../../utils/cost";
import { ApiStream } from "../transform/stream";
import { convertToVsCodeLmMessages } from "../transform/vscode-lm-format";
import { SELECTOR_SEPARATOR, stringifyVsCodeLmModelSelector } from "../../shared/vsCodeSelectorUtils";
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api";
/**
* Handles interaction with VS Code's Language Model API for chat-based operations.
* This handler implements the ApiHandler interface to provide VS Code LM specific functionality.
*
* @implements {ApiHandler}
*
* @remarks
* The handler manages a VS Code language model chat client and provides methods to:
* - Create and manage chat client instances
* - Stream messages using VS Code's Language Model API
* - Retrieve model information
*
* @example
* ```typescript
* const options = {
* vsCodeLmModelSelector: { vendor: "copilot", family: "gpt-4" }
* };
* const handler = new VsCodeLmHandler(options);
*
* // Stream a conversation
* const systemPrompt = "You are a helpful assistant";
* const messages = [{ role: "user", content: "Hello!" }];
* for await (const chunk of handler.createMessage(systemPrompt, messages)) {
* console.log(chunk);
* }
* ```
*/
export class VsCodeLmHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions;
private client: vscode.LanguageModelChat | null;
private disposable: vscode.Disposable | null;
private currentRequestCancellation: vscode.CancellationTokenSource | null;
constructor(options: ApiHandlerOptions) {
this.options = options;
this.client = null;
this.disposable = null;
this.currentRequestCancellation = null;
try {
// Listen for model changes and reset client
this.disposable = vscode.workspace.onDidChangeConfiguration(event => {
if (event.affectsConfiguration('lm')) {
try {
this.client = null;
this.ensureCleanState();
}
catch (error) {
console.error('Error during configuration change cleanup:', error);
}
}
});
}
catch (error) {
// Ensure cleanup if constructor fails
this.dispose();
throw new Error(
`Cline <Language Model API>: Failed to initialize handler: ${error instanceof Error ? error.message : 'Unknown error'}`
);
}
}
/**
* Creates a language model chat client based on the provided selector.
*
* @param selector - Selector criteria to filter language model chat instances
* @returns Promise resolving to the first matching language model chat instance
* @throws Error when no matching models are found with the given selector
*
* @example
* const selector = { vendor: "copilot", family: "gpt-4o" };
* const chatClient = await createClient(selector);
*/
async createClient(selector: vscode.LanguageModelChatSelector): Promise<vscode.LanguageModelChat> {
try {
const models = await vscode.lm.selectChatModels(selector);
// Use first available model or create a minimal model object
if (models && Array.isArray(models) && models.length > 0) {
return models[0];
}
// Create a minimal model if no models are available
return {
id: 'default-lm',
name: 'Default Language Model',
vendor: 'vscode',
family: 'lm',
version: '1.0',
maxInputTokens: 8192,
sendRequest: async (messages, options, token) => {
// Provide a minimal implementation
return {
stream: (async function* () {
yield new vscode.LanguageModelTextPart(
"Language model functionality is limited. Please check VS Code configuration."
);
})(),
text: (async function* () {
yield "Language model functionality is limited. Please check VS Code configuration.";
})()
};
},
countTokens: async () => 0
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
throw new Error(`Cline <Language Model API>: Failed to select model: ${errorMessage}`);
}
}
/**
* Creates and streams a message using the VS Code Language Model API.
*
* @param systemPrompt - The system prompt to initialize the conversation context
* @param messages - An array of message parameters following the Anthropic message format
*
* @yields {ApiStream} An async generator that yields either text chunks or tool calls from the model response
*
* @throws {Error} When vsCodeLmModelSelector option is not provided
* @throws {Error} When the response stream encounters an error
*
* @remarks
* This method handles the initialization of the VS Code LM client if not already created,
* converts the messages to VS Code LM format, and streams the response chunks.
* Tool calls handling is currently a work in progress.
*/
dispose(): void {
if (this.disposable) {
this.disposable.dispose();
}
if (this.currentRequestCancellation) {
this.currentRequestCancellation.cancel();
this.currentRequestCancellation.dispose();
}
}
private async countTokens(text: string | vscode.LanguageModelChatMessage): Promise<number> {
// Check for required dependencies
if (!this.client) {
console.warn('Cline <Language Model API>: No client available for token counting');
return 0;
}
if (!this.currentRequestCancellation) {
console.warn('Cline <Language Model API>: No cancellation token available for token counting');
return 0;
}
// Validate input
if (!text) {
console.debug('Cline <Language Model API>: Empty text provided for token counting');
return 0;
}
try {
// Handle different input types
let tokenCount: number;
if (typeof text === 'string') {
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
} else if (text instanceof vscode.LanguageModelChatMessage) {
// For chat messages, ensure we have content
if (!text.content || (Array.isArray(text.content) && text.content.length === 0)) {
console.debug('Cline <Language Model API>: Empty chat message content');
return 0;
}
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
} else {
console.warn('Cline <Language Model API>: Invalid input type for token counting');
return 0;
}
// Validate the result
if (typeof tokenCount !== 'number') {
console.warn('Cline <Language Model API>: Non-numeric token count received:', tokenCount);
return 0;
}
if (tokenCount < 0) {
console.warn('Cline <Language Model API>: Negative token count received:', tokenCount);
return 0;
}
return tokenCount;
}
catch (error) {
// Handle specific error types
if (error instanceof vscode.CancellationError) {
console.debug('Cline <Language Model API>: Token counting cancelled by user');
return 0;
}
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
console.warn('Cline <Language Model API>: Token counting failed:', errorMessage);
// Log additional error details if available
if (error instanceof Error && error.stack) {
console.debug('Token counting error stack:', error.stack);
}
return 0; // Fallback to prevent stream interruption
}
}
private async calculateTotalInputTokens(systemPrompt: string, vsCodeLmMessages: vscode.LanguageModelChatMessage[]): Promise<number> {
const systemTokens: number = await this.countTokens(systemPrompt);
const messageTokens: number[] = await Promise.all(
vsCodeLmMessages.map(msg => this.countTokens(msg))
);
return systemTokens + messageTokens.reduce(
(sum: number, tokens: number): number => sum + tokens, 0
);
}
private ensureCleanState(): void {
if (this.currentRequestCancellation) {
this.currentRequestCancellation.cancel();
this.currentRequestCancellation.dispose();
this.currentRequestCancellation = null;
}
}
private async getClient(): Promise<vscode.LanguageModelChat> {
if (!this.client) {
console.debug('Cline <Language Model API>: Getting client with options:', {
vsCodeLmModelSelector: this.options.vsCodeLmModelSelector,
hasOptions: !!this.options,
selectorKeys: this.options.vsCodeLmModelSelector ? Object.keys(this.options.vsCodeLmModelSelector) : []
});
try {
// Use default empty selector if none provided to get all available models
const selector = this.options?.vsCodeLmModelSelector || {};
console.debug('Cline <Language Model API>: Creating client with selector:', selector);
this.client = await this.createClient(selector);
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error';
console.error('Cline <Language Model API>: Client creation failed:', message);
throw new Error(`Cline <Language Model API>: Failed to create client: ${message}`);
}
}
return this.client;
}
private cleanTerminalOutput(text: string): string {
if (!text) {
return '';
}
return text
// Нормализуем переносы строк
.replace(/\r\n/g, '\n')
.replace(/\r/g, '\n')
// Удаляем ANSI escape sequences
.replace(/\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g, '') // Полный набор ANSI sequences
.replace(/\x9B[0-?]*[ -/]*[@-~]/g, '') // CSI sequences
// Удаляем последовательности установки заголовка терминала и прочие OSC sequences
.replace(/\x1B\][0-9;]*(?:\x07|\x1B\\)/g, '')
// Удаляем управляющие символы
.replace(/[\x00-\x09\x0B-\x0C\x0E-\x1F\x7F]/g, '')
// Удаляем escape-последовательности VS Code
.replace(/\x1B[PD].*?\x1B\\/g, '') // DCS sequences
.replace(/\x1B_.*?\x1B\\/g, '') // APC sequences
.replace(/\x1B\^.*?\x1B\\/g, '') // PM sequences
.replace(/\x1B\[[\d;]*[HfABCDEFGJKST]/g, '') // Cursor movement and clear screen
// Удаляем пути Windows и служебную информацию
.replace(/^(?:PS )?[A-Z]:\\[^\n]*$/mg, '')
.replace(/^;?Cwd=.*$/mg, '')
// Очищаем экранированные последовательности
.replace(/\\x[0-9a-fA-F]{2}/g, '')
.replace(/\\u[0-9a-fA-F]{4}/g, '')
// Финальная очистка
.replace(/\n{3,}/g, '\n\n') // Убираем множественные пустые строки
.trim();
}
private cleanMessageContent(content: any): any {
if (!content) {
return content;
}
if (typeof content === 'string') {
return this.cleanTerminalOutput(content);
}
if (Array.isArray(content)) {
return content.map(item => this.cleanMessageContent(item));
}
if (typeof content === 'object') {
const cleaned: any = {};
for (const [key, value] of Object.entries(content)) {
cleaned[key] = this.cleanMessageContent(value);
}
return cleaned;
}
return content;
}
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
// Ensure clean state before starting a new request
this.ensureCleanState();
const client: vscode.LanguageModelChat = await this.getClient();
// Clean system prompt and messages
const cleanedSystemPrompt = this.cleanTerminalOutput(systemPrompt);
const cleanedMessages = messages.map(msg => ({
...msg,
content: this.cleanMessageContent(msg.content)
}));
// Convert Anthropic messages to VS Code LM messages
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [
vscode.LanguageModelChatMessage.Assistant(cleanedSystemPrompt),
...convertToVsCodeLmMessages(cleanedMessages),
];
// Initialize cancellation token for the request
this.currentRequestCancellation = new vscode.CancellationTokenSource();
// Calculate input tokens before starting the stream
const totalInputTokens: number = await this.calculateTotalInputTokens(systemPrompt, vsCodeLmMessages);
// Accumulate the text and count at the end of the stream to reduce token counting overhead.
let accumulatedText: string = '';
try {
// Create the response stream with minimal required options
const requestOptions: vscode.LanguageModelChatRequestOptions = {
justification: `Cline would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`
};
// Note: Tool support is currently provided by the VSCode Language Model API directly
// Extensions can register tools using vscode.lm.registerTool()
const response: vscode.LanguageModelChatResponse = await client.sendRequest(
vsCodeLmMessages,
requestOptions,
this.currentRequestCancellation.token
);
// Consume the stream and handle both text and tool call chunks
for await (const chunk of response.stream) {
if (chunk instanceof vscode.LanguageModelTextPart) {
// Validate text part value
if (typeof chunk.value !== 'string') {
console.warn('Cline <Language Model API>: Invalid text part value received:', chunk.value);
continue;
}
accumulatedText += chunk.value;
yield {
type: "text",
text: chunk.value,
};
} else if (chunk instanceof vscode.LanguageModelToolCallPart) {
try {
// Validate tool call parameters
if (!chunk.name || typeof chunk.name !== 'string') {
console.warn('Cline <Language Model API>: Invalid tool name received:', chunk.name);
continue;
}
if (!chunk.callId || typeof chunk.callId !== 'string') {
console.warn('Cline <Language Model API>: Invalid tool callId received:', chunk.callId);
continue;
}
// Ensure input is a valid object
if (!chunk.input || typeof chunk.input !== 'object') {
console.warn('Cline <Language Model API>: Invalid tool input received:', chunk.input);
continue;
}
// Convert tool calls to text format with proper error handling
const toolCall = {
type: "tool_call",
name: chunk.name,
arguments: chunk.input,
callId: chunk.callId
};
const toolCallText = JSON.stringify(toolCall);
accumulatedText += toolCallText;
// Log tool call for debugging
console.debug('Cline <Language Model API>: Processing tool call:', {
name: chunk.name,
callId: chunk.callId,
inputSize: JSON.stringify(chunk.input).length
});
yield {
type: "text",
text: toolCallText,
};
} catch (error) {
console.error('Cline <Language Model API>: Failed to process tool call:', error);
// Continue processing other chunks even if one fails
continue;
}
} else {
console.warn('Cline <Language Model API>: Unknown chunk type received:', chunk);
}
}
// Count tokens in the accumulated text after stream completion
const totalOutputTokens: number = await this.countTokens(accumulatedText);
// Report final usage after stream completion
yield {
type: "usage",
inputTokens: totalInputTokens,
outputTokens: totalOutputTokens,
totalCost: calculateApiCost(
this.getModel().info,
totalInputTokens,
totalOutputTokens
)
};
}
catch (error: unknown) {
this.ensureCleanState();
if (error instanceof vscode.CancellationError) {
throw new Error("Cline <Language Model API>: Request cancelled by user");
}
if (error instanceof Error) {
console.error('Cline <Language Model API>: Stream error details:', {
message: error.message,
stack: error.stack,
name: error.name
});
// Return original error if it's already an Error instance
throw error;
} else if (typeof error === 'object' && error !== null) {
// Handle error-like objects
const errorDetails = JSON.stringify(error, null, 2);
console.error('Cline <Language Model API>: Stream error object:', errorDetails);
throw new Error(`Cline <Language Model API>: Response stream error: ${errorDetails}`);
} else {
// Fallback for unknown error types
const errorMessage = String(error);
console.error('Cline <Language Model API>: Unknown stream error:', errorMessage);
throw new Error(`Cline <Language Model API>: Response stream error: ${errorMessage}`);
}
}
}
// Return model information based on the current client state
getModel(): { id: string; info: ModelInfo; } {
if (this.client) {
// Validate client properties
const requiredProps = {
id: this.client.id,
vendor: this.client.vendor,
family: this.client.family,
version: this.client.version,
maxInputTokens: this.client.maxInputTokens
};
// Log any missing properties for debugging
for (const [prop, value] of Object.entries(requiredProps)) {
if (!value && value !== 0) {
console.warn(`Cline <Language Model API>: Client missing ${prop} property`);
}
}
// Construct model ID using available information
const modelParts = [
this.client.vendor,
this.client.family,
this.client.version
].filter(Boolean);
const modelId = this.client.id || modelParts.join(SELECTOR_SEPARATOR);
// Build model info with conservative defaults for missing values
const modelInfo: ModelInfo = {
maxTokens: -1, // Unlimited tokens by default
contextWindow: typeof this.client.maxInputTokens === 'number'
? Math.max(0, this.client.maxInputTokens)
: openAiModelInfoSaneDefaults.contextWindow,
supportsImages: false, // VSCode Language Model API currently doesn't support image inputs
supportsPromptCache: true,
inputPrice: 0,
outputPrice: 0,
description: `VSCode Language Model: ${modelId}`
};
return { id: modelId, info: modelInfo };
}
// Fallback when no client is available
const fallbackId = this.options.vsCodeLmModelSelector
? stringifyVsCodeLmModelSelector(this.options.vsCodeLmModelSelector)
: "vscode-lm";
console.debug('Cline <Language Model API>: No client available, using fallback model info');
return {
id: fallbackId,
info: {
...openAiModelInfoSaneDefaults,
description: `VSCode Language Model (Fallback): ${fallbackId}`
}
};
}
async completePrompt(prompt: string): Promise<string> {
try {
const client = await this.getClient();
const response = await client.sendRequest([vscode.LanguageModelChatMessage.User(prompt)], {}, new vscode.CancellationTokenSource().token);
let result = "";
for await (const chunk of response.stream) {
if (chunk instanceof vscode.LanguageModelTextPart) {
result += chunk.value;
}
}
return result;
} catch (error) {
if (error instanceof Error) {
throw new Error(`VSCode LM completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -0,0 +1,209 @@
import { Anthropic } from "@anthropic-ai/sdk";
import * as vscode from 'vscode';
/**
* Safely converts a value into a plain object.
*/
function asObjectSafe(value: any): object {
// Handle null/undefined
if (!value) {
return {};
}
try {
// Handle strings that might be JSON
if (typeof value === 'string') {
return JSON.parse(value);
}
// Handle pre-existing objects
if (typeof value === 'object') {
return Object.assign({}, value);
}
return {};
}
catch (error) {
console.warn('Cline <Language Model API>: Failed to parse object:', error);
return {};
}
}
export function convertToVsCodeLmMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): vscode.LanguageModelChatMessage[] {
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
for (const anthropicMessage of anthropicMessages) {
// Handle simple string messages
if (typeof anthropicMessage.content === "string") {
vsCodeLmMessages.push(
anthropicMessage.role === "assistant"
? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
: vscode.LanguageModelChatMessage.User(anthropicMessage.content)
);
continue;
}
// Handle complex message structures
switch (anthropicMessage.role) {
case "user": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolResultBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_result") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolResultParts
...toolMessages.map((toolMessage) => {
// Process tool result content into TextParts
const toolContentParts: vscode.LanguageModelTextPart[] = (
typeof toolMessage.content === "string"
? [new vscode.LanguageModelTextPart(toolMessage.content)]
: (
toolMessage.content?.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
?? [new vscode.LanguageModelTextPart("")]
)
);
return new vscode.LanguageModelToolResultPart(
toolMessage.tool_use_id,
toolContentParts
);
}),
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Add single user message with all content parts
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
break;
}
case "assistant": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolUseBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_use") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolCallParts first
...toolMessages.map((toolMessage) =>
new vscode.LanguageModelToolCallPart(
toolMessage.id,
toolMessage.name,
asObjectSafe(toolMessage.input)
)
),
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Add the assistant message to the list of messages
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
break;
}
}
}
return vsCodeLmMessages;
}
export function convertToAnthropicRole(vsCodeLmMessageRole: vscode.LanguageModelChatMessageRole): string | null {
switch (vsCodeLmMessageRole) {
case vscode.LanguageModelChatMessageRole.Assistant:
return "assistant";
case vscode.LanguageModelChatMessageRole.User:
return "user";
default:
return null;
}
}
export async function convertToAnthropicMessage(vsCodeLmMessage: vscode.LanguageModelChatMessage): Promise<Anthropic.Messages.Message> {
const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
if (anthropicRole !== "assistant") {
throw new Error("Cline <Language Model API>: Only assistant messages are supported.");
}
return {
id: crypto.randomUUID(),
type: "message",
model: "vscode-lm",
role: anthropicRole,
content: (
vsCodeLmMessage.content
.map((part): Anthropic.ContentBlock | null => {
if (part instanceof vscode.LanguageModelTextPart) {
return {
type: "text",
text: part.value
};
}
if (part instanceof vscode.LanguageModelToolCallPart) {
return {
type: "tool_use",
id: part.callId || crypto.randomUUID(),
name: part.name,
input: asObjectSafe(part.input)
};
}
return null;
})
.filter(
(part): part is Anthropic.ContentBlock => part !== null
)
),
stop_reason: null,
stop_sequence: null,
usage: {
input_tokens: 0,
output_tokens: 0,
}
};
}

View File

@@ -41,6 +41,7 @@ type SecretKey =
| "geminiApiKey"
| "openAiNativeApiKey"
| "deepSeekApiKey"
type GlobalStateKey =
| "apiProvider"
| "apiModelId"
@@ -79,6 +80,8 @@ type GlobalStateKey =
| "writeDelayMs"
| "terminalOutputLineLimit"
| "mcpEnabled"
| "vsCodeLmModelSelector"
export const GlobalFileNames = {
apiConversationHistory: "api_conversation_history.json",
uiMessages: "ui_messages.json",
@@ -426,6 +429,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
vsCodeLmModelSelector,
} = message.apiConfiguration
await this.updateGlobalState("apiProvider", apiProvider)
await this.updateGlobalState("apiModelId", apiModelId)
@@ -454,6 +458,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.updateGlobalState("openRouterModelId", openRouterModelId)
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
if (this.cline) {
this.cline.api = buildApiHandler(message.apiConfiguration)
}
@@ -525,6 +530,10 @@ export class ClineProvider implements vscode.WebviewViewProvider {
const lmStudioModels = await this.getLmStudioModels(message.text)
this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels })
break
case "requestVsCodeLmModels":
const vsCodeLmModels = await this.getVsCodeLmModels()
this.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
break
case "refreshOpenRouterModels":
await this.refreshOpenRouterModels()
break
@@ -773,6 +782,17 @@ export class ClineProvider implements vscode.WebviewViewProvider {
}
}
// VSCode LM API
private async getVsCodeLmModels() {
try {
const models = await vscode.lm.selectChatModels({});
return models || [];
} catch (error) {
console.error('Error fetching VS Code LM models:', error);
return [];
}
}
// OpenAi
async getOpenAiModels(baseUrl?: string, apiKey?: string) {
@@ -1064,6 +1084,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
mcpEnabled,
} = await this.getState()
const allowedCommands = vscode.workspace
.getConfiguration('roo-cline')
.get<string[]>('allowedCommands') || []
@@ -1196,6 +1217,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
screenshotQuality,
terminalOutputLineLimit,
mcpEnabled,
vsCodeLmModelSelector,
] = await Promise.all([
this.getGlobalState("apiProvider") as Promise<ApiProvider | undefined>,
this.getGlobalState("apiModelId") as Promise<string | undefined>,
@@ -1243,6 +1265,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
this.getGlobalState("screenshotQuality") as Promise<number | undefined>,
this.getGlobalState("terminalOutputLineLimit") as Promise<number | undefined>,
this.getGlobalState("mcpEnabled") as Promise<boolean | undefined>,
this.getGlobalState("vsCodeLmModelSelector") as Promise<vscode.LanguageModelChatSelector | undefined>,
])
let apiProvider: ApiProvider
@@ -1288,6 +1311,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
vsCodeLmModelSelector,
},
lastShownAnnouncementId,
customInstructions,

View File

@@ -36,7 +36,7 @@ export function activate(context: vscode.ExtensionContext) {
context.globalState.update('allowedCommands', defaultCommands);
}
const sidebarProvider = new ClineProvider(context, outputChannel)
const sidebarProvider = new ClineProvider(context, outputChannel);
context.subscriptions.push(
vscode.window.registerWebviewViewProvider(ClineProvider.sideBarId, sidebarProvider, {

View File

@@ -12,6 +12,9 @@ export interface ExtensionMessage {
| "selectedImages"
| "ollamaModels"
| "lmStudioModels"
| "vsCodeLmModels"
| "vsCodeLmApiAvailable"
| "requestVsCodeLmModels"
| "theme"
| "workspaceUpdated"
| "invoke"
@@ -32,6 +35,7 @@ export interface ExtensionMessage {
images?: string[]
ollamaModels?: string[]
lmStudioModels?: string[]
vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[]
filePaths?: string[]
partialMessage?: ClineMessage
openRouterModels?: Record<string, ModelInfo>

View File

@@ -23,6 +23,7 @@ export interface WebviewMessage {
| "resetState"
| "requestOllamaModels"
| "requestLmStudioModels"
| "requestVsCodeLmModels"
| "openImage"
| "openFile"
| "openMention"

View File

@@ -1,3 +1,5 @@
import * as vscode from 'vscode';
export type ApiProvider =
| "anthropic"
| "openrouter"
@@ -9,11 +11,13 @@ export type ApiProvider =
| "gemini"
| "openai-native"
| "deepseek"
| "vscode-lm"
export interface ApiHandlerOptions {
apiModelId?: string
apiKey?: string // anthropic
anthropicBaseUrl?: string
vsCodeLmModelSelector?: vscode.LanguageModelChatSelector
openRouterApiKey?: string
openRouterModelId?: string
openRouterModelInfo?: ModelInfo
@@ -47,16 +51,17 @@ export interface ApiHandlerOptions {
export type ApiConfiguration = ApiHandlerOptions & {
apiProvider?: ApiProvider
vsCodeLmModelSelector?: vscode.LanguageModelChatSelector;
}
// Models
export interface ModelInfo {
maxTokens?: number
contextWindow?: number
contextWindow: number
supportsImages?: boolean
supportsComputerUse?: boolean
supportsPromptCache: boolean // this value is hardcoded for now
supportsPromptCache: boolean
inputPrice?: number
outputPrice?: number
cacheWritesPrice?: number
@@ -514,4 +519,3 @@ export const deepSeekModels = {
// https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation
// https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs
export const azureOpenAiDefaultApiVersion = "2024-08-01-preview"

View File

@@ -0,0 +1,14 @@
import { LanguageModelChatSelector } from 'vscode';
export const SELECTOR_SEPARATOR = '/';
export function stringifyVsCodeLmModelSelector(selector: LanguageModelChatSelector): string {
return [
selector.vendor,
selector.family,
selector.version,
selector.id
]
.filter(Boolean)
.join(SELECTOR_SEPARATOR);
}

86
src/types/vscode.d.ts vendored Normal file
View File

@@ -0,0 +1,86 @@
declare namespace vscode {
enum LanguageModelChatMessageRole {
User = 1,
Assistant = 2
}
enum LanguageModelChatToolMode {
Auto = 1,
Required = 2
}
interface LanguageModelChatSelector {
vendor?: string;
family?: string;
version?: string;
id?: string;
}
interface LanguageModelChatTool {
name: string;
description: string;
inputSchema?: object;
}
interface LanguageModelChatRequestOptions {
justification?: string;
modelOptions?: { [name: string]: any; };
tools?: LanguageModelChatTool[];
toolMode?: LanguageModelChatToolMode;
}
class LanguageModelTextPart {
value: string;
constructor(value: string);
}
class LanguageModelToolCallPart {
callId: string;
name: string;
input: object;
constructor(callId: string, name: string, input: object);
}
interface LanguageModelChatResponse {
stream: AsyncIterable<LanguageModelTextPart | LanguageModelToolCallPart | unknown>;
text: AsyncIterable<string>;
}
interface LanguageModelChat {
readonly name: string;
readonly id: string;
readonly vendor: string;
readonly family: string;
readonly version: string;
readonly maxInputTokens: number;
sendRequest(messages: LanguageModelChatMessage[], options?: LanguageModelChatRequestOptions, token?: CancellationToken): Thenable<LanguageModelChatResponse>;
countTokens(text: string | LanguageModelChatMessage, token?: CancellationToken): Thenable<number>;
}
class LanguageModelPromptTsxPart {
value: unknown;
constructor(value: unknown);
}
class LanguageModelToolResultPart {
callId: string;
content: Array<LanguageModelTextPart | LanguageModelPromptTsxPart | unknown>;
constructor(callId: string, content: Array<LanguageModelTextPart | LanguageModelPromptTsxPart | unknown>);
}
class LanguageModelChatMessage {
static User(content: string | Array<LanguageModelTextPart | LanguageModelToolResultPart>, name?: string): LanguageModelChatMessage;
static Assistant(content: string | Array<LanguageModelTextPart | LanguageModelToolCallPart>, name?: string): LanguageModelChatMessage;
role: LanguageModelChatMessageRole;
content: Array<LanguageModelTextPart | LanguageModelToolResultPart | LanguageModelToolCallPart>;
name: string | undefined;
constructor(role: LanguageModelChatMessageRole, content: string | Array<LanguageModelTextPart | LanguageModelToolResultPart | LanguageModelToolCallPart>, name?: string);
}
namespace lm {
function selectChatModels(selector?: LanguageModelChatSelector): Thenable<LanguageModelChat[]>;
}
}

View File

@@ -49,6 +49,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
const { apiConfiguration, setApiConfiguration, uriScheme } = useExtensionState()
const [ollamaModels, setOllamaModels] = useState<string[]>([])
const [lmStudioModels, setLmStudioModels] = useState<string[]>([])
const [vsCodeLmModels, setVsCodeLmModels] = useState<vscode.LanguageModelChatSelector[]>([])
const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl)
const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion)
const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false)
@@ -67,21 +68,24 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl })
} else if (selectedProvider === "lmstudio") {
vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl })
} else if (selectedProvider === "vscode-lm") {
vscode.postMessage({ type: "requestVsCodeLmModels" })
}
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl])
useEffect(() => {
if (selectedProvider === "ollama" || selectedProvider === "lmstudio") {
if (selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm") {
requestLocalModels()
}
}, [selectedProvider, requestLocalModels])
useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" ? 2000 : null)
useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm" ? 2000 : null)
const handleMessage = useCallback((event: MessageEvent) => {
const message: ExtensionMessage = event.data
if (message.type === "ollamaModels" && message.ollamaModels) {
setOllamaModels(message.ollamaModels)
} else if (message.type === "lmStudioModels" && message.lmStudioModels) {
setLmStudioModels(message.lmStudioModels)
} else if (message.type === "vsCodeLmModels" && message.vsCodeLmModels) {
setVsCodeLmModels(message.vsCodeLmModels)
}
}, [])
useEvent("message", handleMessage)
@@ -139,6 +143,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
<VSCodeOption value="bedrock">AWS Bedrock</VSCodeOption>
<VSCodeOption value="lmstudio">LM Studio</VSCodeOption>
<VSCodeOption value="ollama">Ollama</VSCodeOption>
<VSCodeOption value="vscode-lm">VS Code LM API</VSCodeOption>
</VSCodeDropdown>
</div>
@@ -591,6 +596,50 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
</div>
)}
{selectedProvider === "vscode-lm" && (
<div>
<div className="dropdown-container">
<label htmlFor="vscode-lm-model">
<span style={{ fontWeight: 500 }}>Language Model</span>
</label>
{vsCodeLmModels.length > 0 ? (
<VSCodeDropdown
id="vscode-lm-model"
value={apiConfiguration?.vsCodeLmModelSelector ?
`${apiConfiguration.vsCodeLmModelSelector.vendor ?? ""}/${apiConfiguration.vsCodeLmModelSelector.family ?? ""}` :
""}
onChange={(e) => {
const value = (e.target as HTMLInputElement).value;
const [vendor, family] = value.split('/');
setApiConfiguration({
...apiConfiguration,
vsCodeLmModelSelector: value ? { vendor, family } : undefined
});
}}
style={{ width: "100%" }}>
<VSCodeOption value="">Select a model...</VSCodeOption>
{vsCodeLmModels.map((model) => (
<VSCodeOption
key={`${model.vendor}/${model.family}`}
value={`${model.vendor}/${model.family}`}>
{model.vendor} - {model.family}
</VSCodeOption>
))}
</VSCodeDropdown>
) : (
<p style={{
fontSize: "12px",
marginTop: "5px",
color: "var(--vscode-descriptionForeground)",
}}>
No language models available.<br />
You can use any VS Code extension that provides language model capabilities.
</p>
)}
</div>
</div>
)}
{selectedProvider === "ollama" && (
<div>
<VSCodeTextField
@@ -896,6 +945,18 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
selectedModelId: apiConfiguration?.lmStudioModelId || "",
selectedModelInfo: openAiModelInfoSaneDefaults,
}
case "vscode-lm":
return {
selectedProvider: provider,
selectedModelId: apiConfiguration?.vsCodeLmModelSelector ?
`${apiConfiguration.vsCodeLmModelSelector.vendor}/${apiConfiguration.vsCodeLmModelSelector.family}` :
"",
selectedModelInfo: {
...openAiModelInfoSaneDefaults,
supportsImages: false, // VSCode LM API currently doesn't support images
supportsComputerUse: true // All VSCode LM models support tools
},
}
default:
return getProviderData(anthropicModels, anthropicDefaultModelId)
}

8
webview-ui/src/types/vscode.d.ts vendored Normal file
View File

@@ -0,0 +1,8 @@
declare namespace vscode {
interface LanguageModelChatSelector {
vendor?: string;
family?: string;
version?: string;
id?: string;
}
}