Merge pull request #367 from RooVetGit/vscode-lm-provider

Add VSCode-LM as a provider
This commit is contained in:
Matt Rubens
2025-01-15 16:53:04 -05:00
committed by GitHub
18 changed files with 1562 additions and 8 deletions

View File

@@ -0,0 +1,5 @@
---
"roo-cline": patch
---
Experimental support for VS Code Language Models (thanks @RaySinner / @julesmons!)

View File

@@ -60,6 +60,7 @@ Give it a try and let us know what you think in the reddit: https://www.reddit.c
- Support for Glama
- Support for listing models from OpenAI-compatible providers
- Support for adding OpenAI-compatible models with or without streaming
- Experimental support for VS Code Language Models (e.g. Copilot)
- Per-tool MCP auto-approval
- Enable/disable individual MCP servers
- Enable/disable the MCP feature overall

View File

@@ -42,7 +42,10 @@
"ai",
"llama"
],
"activationEvents": [],
"activationEvents": [
"onLanguage",
"onStartupFinished"
],
"main": "./dist/extension.js",
"contributes": {
"viewsContainers": {
@@ -151,6 +154,20 @@
"git show"
],
"description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled"
},
"roo-cline.vsCodeLmModelSelector": {
"type": "object",
"properties": {
"vendor": {
"type": "string",
"description": "The vendor of the language model (e.g. copilot)"
},
"family": {
"type": "string",
"description": "The family of the language model (e.g. gpt-4)"
}
},
"description": "Settings for VSCode Language Model API"
}
}
}

View File

@@ -11,6 +11,7 @@ import { LmStudioHandler } from "./providers/lmstudio"
import { GeminiHandler } from "./providers/gemini"
import { OpenAiNativeHandler } from "./providers/openai-native"
import { DeepSeekHandler } from "./providers/deepseek"
import { VsCodeLmHandler } from "./providers/vscode-lm"
import { ApiStream } from "./transform/stream"
export interface SingleCompletionHandler {

View File

@@ -0,0 +1,289 @@
import * as vscode from 'vscode';
import { VsCodeLmHandler } from '../vscode-lm';
import { ApiHandlerOptions } from '../../../shared/api';
import { Anthropic } from '@anthropic-ai/sdk';
// Mock vscode namespace
jest.mock('vscode', () => {
class MockLanguageModelTextPart {
type = 'text';
constructor(public value: string) {}
}
class MockLanguageModelToolCallPart {
type = 'tool_call';
constructor(
public callId: string,
public name: string,
public input: any
) {}
}
return {
workspace: {
onDidChangeConfiguration: jest.fn((callback) => ({
dispose: jest.fn()
}))
},
CancellationTokenSource: jest.fn(() => ({
token: {
isCancellationRequested: false,
onCancellationRequested: jest.fn()
},
cancel: jest.fn(),
dispose: jest.fn()
})),
CancellationError: class CancellationError extends Error {
constructor() {
super('Operation cancelled');
this.name = 'CancellationError';
}
},
LanguageModelChatMessage: {
Assistant: jest.fn((content) => ({
role: 'assistant',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
})),
User: jest.fn((content) => ({
role: 'user',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
}))
},
LanguageModelTextPart: MockLanguageModelTextPart,
LanguageModelToolCallPart: MockLanguageModelToolCallPart,
lm: {
selectChatModels: jest.fn()
}
};
});
const mockLanguageModelChat = {
id: 'test-model',
name: 'Test Model',
vendor: 'test-vendor',
family: 'test-family',
version: '1.0',
maxInputTokens: 4096,
sendRequest: jest.fn(),
countTokens: jest.fn()
};
describe('VsCodeLmHandler', () => {
let handler: VsCodeLmHandler;
const defaultOptions: ApiHandlerOptions = {
vsCodeLmModelSelector: {
vendor: 'test-vendor',
family: 'test-family'
}
};
beforeEach(() => {
jest.clearAllMocks();
handler = new VsCodeLmHandler(defaultOptions);
});
afterEach(() => {
handler.dispose();
});
describe('constructor', () => {
it('should initialize with provided options', () => {
expect(handler).toBeDefined();
expect(vscode.workspace.onDidChangeConfiguration).toHaveBeenCalled();
});
it('should handle configuration changes', () => {
const callback = (vscode.workspace.onDidChangeConfiguration as jest.Mock).mock.calls[0][0];
callback({ affectsConfiguration: () => true });
// Should reset client when config changes
expect(handler['client']).toBeNull();
});
});
describe('createClient', () => {
it('should create client with selector', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
const client = await handler['createClient']({
vendor: 'test-vendor',
family: 'test-family'
});
expect(client).toBeDefined();
expect(client.id).toBe('test-model');
expect(vscode.lm.selectChatModels).toHaveBeenCalledWith({
vendor: 'test-vendor',
family: 'test-family'
});
});
it('should return default client when no models available', async () => {
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([]);
const client = await handler['createClient']({});
expect(client).toBeDefined();
expect(client.id).toBe('default-lm');
expect(client.vendor).toBe('vscode');
});
});
describe('createMessage', () => {
beforeEach(() => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
mockLanguageModelChat.countTokens.mockResolvedValue(10);
});
it('should stream text responses', async () => {
const systemPrompt = 'You are a helpful assistant';
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user' as const,
content: 'Hello'
}];
const responseText = 'Hello! How can I help you?';
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelTextPart(responseText);
return;
})(),
text: (async function* () {
yield responseText;
return;
})()
});
const stream = handler.createMessage(systemPrompt, messages);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks).toHaveLength(2); // Text chunk + usage chunk
expect(chunks[0]).toEqual({
type: 'text',
text: responseText
});
expect(chunks[1]).toMatchObject({
type: 'usage',
inputTokens: expect.any(Number),
outputTokens: expect.any(Number)
});
});
it('should handle tool calls', async () => {
const systemPrompt = 'You are a helpful assistant';
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user' as const,
content: 'Calculate 2+2'
}];
const toolCallData = {
name: 'calculator',
arguments: { operation: 'add', numbers: [2, 2] },
callId: 'call-1'
};
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelToolCallPart(
toolCallData.callId,
toolCallData.name,
toolCallData.arguments
);
return;
})(),
text: (async function* () {
yield JSON.stringify({ type: 'tool_call', ...toolCallData });
return;
})()
});
const stream = handler.createMessage(systemPrompt, messages);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks).toHaveLength(2); // Tool call chunk + usage chunk
expect(chunks[0]).toEqual({
type: 'text',
text: JSON.stringify({ type: 'tool_call', ...toolCallData })
});
});
it('should handle errors', async () => {
const systemPrompt = 'You are a helpful assistant';
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user' as const,
content: 'Hello'
}];
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('API Error'));
await expect(async () => {
const stream = handler.createMessage(systemPrompt, messages);
for await (const _ of stream) {
// consume stream
}
}).rejects.toThrow('API Error');
});
});
describe('getModel', () => {
it('should return model info when client exists', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
// Initialize client
await handler['getClient']();
const model = handler.getModel();
expect(model.id).toBe('test-model');
expect(model.info).toBeDefined();
expect(model.info.contextWindow).toBe(4096);
});
it('should return fallback model info when no client exists', () => {
const model = handler.getModel();
expect(model.id).toBe('test-vendor/test-family');
expect(model.info).toBeDefined();
});
});
describe('completePrompt', () => {
it('should complete single prompt', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
const responseText = 'Completed text';
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelTextPart(responseText);
return;
})(),
text: (async function* () {
yield responseText;
return;
})()
});
const result = await handler.completePrompt('Test prompt');
expect(result).toBe(responseText);
expect(mockLanguageModelChat.sendRequest).toHaveBeenCalled();
});
it('should handle errors during completion', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('Completion failed'));
await expect(handler.completePrompt('Test prompt'))
.rejects
.toThrow('VSCode LM completion error: Completion failed');
});
});
});

View File

@@ -0,0 +1,564 @@
import { Anthropic } from "@anthropic-ai/sdk";
import * as vscode from 'vscode';
import { ApiHandler, SingleCompletionHandler } from "../";
import { calculateApiCost } from "../../utils/cost";
import { ApiStream } from "../transform/stream";
import { convertToVsCodeLmMessages } from "../transform/vscode-lm-format";
import { SELECTOR_SEPARATOR, stringifyVsCodeLmModelSelector } from "../../shared/vsCodeSelectorUtils";
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api";
/**
* Handles interaction with VS Code's Language Model API for chat-based operations.
* This handler implements the ApiHandler interface to provide VS Code LM specific functionality.
*
* @implements {ApiHandler}
*
* @remarks
* The handler manages a VS Code language model chat client and provides methods to:
* - Create and manage chat client instances
* - Stream messages using VS Code's Language Model API
* - Retrieve model information
*
* @example
* ```typescript
* const options = {
* vsCodeLmModelSelector: { vendor: "copilot", family: "gpt-4" }
* };
* const handler = new VsCodeLmHandler(options);
*
* // Stream a conversation
* const systemPrompt = "You are a helpful assistant";
* const messages = [{ role: "user", content: "Hello!" }];
* for await (const chunk of handler.createMessage(systemPrompt, messages)) {
* console.log(chunk);
* }
* ```
*/
export class VsCodeLmHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions;
private client: vscode.LanguageModelChat | null;
private disposable: vscode.Disposable | null;
private currentRequestCancellation: vscode.CancellationTokenSource | null;
constructor(options: ApiHandlerOptions) {
this.options = options;
this.client = null;
this.disposable = null;
this.currentRequestCancellation = null;
try {
// Listen for model changes and reset client
this.disposable = vscode.workspace.onDidChangeConfiguration(event => {
if (event.affectsConfiguration('lm')) {
try {
this.client = null;
this.ensureCleanState();
}
catch (error) {
console.error('Error during configuration change cleanup:', error);
}
}
});
}
catch (error) {
// Ensure cleanup if constructor fails
this.dispose();
throw new Error(
`Cline <Language Model API>: Failed to initialize handler: ${error instanceof Error ? error.message : 'Unknown error'}`
);
}
}
/**
* Creates a language model chat client based on the provided selector.
*
* @param selector - Selector criteria to filter language model chat instances
* @returns Promise resolving to the first matching language model chat instance
* @throws Error when no matching models are found with the given selector
*
* @example
* const selector = { vendor: "copilot", family: "gpt-4o" };
* const chatClient = await createClient(selector);
*/
async createClient(selector: vscode.LanguageModelChatSelector): Promise<vscode.LanguageModelChat> {
try {
const models = await vscode.lm.selectChatModels(selector);
// Use first available model or create a minimal model object
if (models && Array.isArray(models) && models.length > 0) {
return models[0];
}
// Create a minimal model if no models are available
return {
id: 'default-lm',
name: 'Default Language Model',
vendor: 'vscode',
family: 'lm',
version: '1.0',
maxInputTokens: 8192,
sendRequest: async (messages, options, token) => {
// Provide a minimal implementation
return {
stream: (async function* () {
yield new vscode.LanguageModelTextPart(
"Language model functionality is limited. Please check VS Code configuration."
);
})(),
text: (async function* () {
yield "Language model functionality is limited. Please check VS Code configuration.";
})()
};
},
countTokens: async () => 0
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
throw new Error(`Cline <Language Model API>: Failed to select model: ${errorMessage}`);
}
}
/**
* Creates and streams a message using the VS Code Language Model API.
*
* @param systemPrompt - The system prompt to initialize the conversation context
* @param messages - An array of message parameters following the Anthropic message format
*
* @yields {ApiStream} An async generator that yields either text chunks or tool calls from the model response
*
* @throws {Error} When vsCodeLmModelSelector option is not provided
* @throws {Error} When the response stream encounters an error
*
* @remarks
* This method handles the initialization of the VS Code LM client if not already created,
* converts the messages to VS Code LM format, and streams the response chunks.
* Tool calls handling is currently a work in progress.
*/
dispose(): void {
if (this.disposable) {
this.disposable.dispose();
}
if (this.currentRequestCancellation) {
this.currentRequestCancellation.cancel();
this.currentRequestCancellation.dispose();
}
}
private async countTokens(text: string | vscode.LanguageModelChatMessage): Promise<number> {
// Check for required dependencies
if (!this.client) {
console.warn('Cline <Language Model API>: No client available for token counting');
return 0;
}
if (!this.currentRequestCancellation) {
console.warn('Cline <Language Model API>: No cancellation token available for token counting');
return 0;
}
// Validate input
if (!text) {
console.debug('Cline <Language Model API>: Empty text provided for token counting');
return 0;
}
try {
// Handle different input types
let tokenCount: number;
if (typeof text === 'string') {
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
} else if (text instanceof vscode.LanguageModelChatMessage) {
// For chat messages, ensure we have content
if (!text.content || (Array.isArray(text.content) && text.content.length === 0)) {
console.debug('Cline <Language Model API>: Empty chat message content');
return 0;
}
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
} else {
console.warn('Cline <Language Model API>: Invalid input type for token counting');
return 0;
}
// Validate the result
if (typeof tokenCount !== 'number') {
console.warn('Cline <Language Model API>: Non-numeric token count received:', tokenCount);
return 0;
}
if (tokenCount < 0) {
console.warn('Cline <Language Model API>: Negative token count received:', tokenCount);
return 0;
}
return tokenCount;
}
catch (error) {
// Handle specific error types
if (error instanceof vscode.CancellationError) {
console.debug('Cline <Language Model API>: Token counting cancelled by user');
return 0;
}
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
console.warn('Cline <Language Model API>: Token counting failed:', errorMessage);
// Log additional error details if available
if (error instanceof Error && error.stack) {
console.debug('Token counting error stack:', error.stack);
}
return 0; // Fallback to prevent stream interruption
}
}
private async calculateTotalInputTokens(systemPrompt: string, vsCodeLmMessages: vscode.LanguageModelChatMessage[]): Promise<number> {
const systemTokens: number = await this.countTokens(systemPrompt);
const messageTokens: number[] = await Promise.all(
vsCodeLmMessages.map(msg => this.countTokens(msg))
);
return systemTokens + messageTokens.reduce(
(sum: number, tokens: number): number => sum + tokens, 0
);
}
private ensureCleanState(): void {
if (this.currentRequestCancellation) {
this.currentRequestCancellation.cancel();
this.currentRequestCancellation.dispose();
this.currentRequestCancellation = null;
}
}
private async getClient(): Promise<vscode.LanguageModelChat> {
if (!this.client) {
console.debug('Cline <Language Model API>: Getting client with options:', {
vsCodeLmModelSelector: this.options.vsCodeLmModelSelector,
hasOptions: !!this.options,
selectorKeys: this.options.vsCodeLmModelSelector ? Object.keys(this.options.vsCodeLmModelSelector) : []
});
try {
// Use default empty selector if none provided to get all available models
const selector = this.options?.vsCodeLmModelSelector || {};
console.debug('Cline <Language Model API>: Creating client with selector:', selector);
this.client = await this.createClient(selector);
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error';
console.error('Cline <Language Model API>: Client creation failed:', message);
throw new Error(`Cline <Language Model API>: Failed to create client: ${message}`);
}
}
return this.client;
}
private cleanTerminalOutput(text: string): string {
if (!text) {
return '';
}
return text
// Нормализуем переносы строк
.replace(/\r\n/g, '\n')
.replace(/\r/g, '\n')
// Удаляем ANSI escape sequences
.replace(/\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g, '') // Полный набор ANSI sequences
.replace(/\x9B[0-?]*[ -/]*[@-~]/g, '') // CSI sequences
// Удаляем последовательности установки заголовка терминала и прочие OSC sequences
.replace(/\x1B\][0-9;]*(?:\x07|\x1B\\)/g, '')
// Удаляем управляющие символы
.replace(/[\x00-\x09\x0B-\x0C\x0E-\x1F\x7F]/g, '')
// Удаляем escape-последовательности VS Code
.replace(/\x1B[PD].*?\x1B\\/g, '') // DCS sequences
.replace(/\x1B_.*?\x1B\\/g, '') // APC sequences
.replace(/\x1B\^.*?\x1B\\/g, '') // PM sequences
.replace(/\x1B\[[\d;]*[HfABCDEFGJKST]/g, '') // Cursor movement and clear screen
// Удаляем пути Windows и служебную информацию
.replace(/^(?:PS )?[A-Z]:\\[^\n]*$/mg, '')
.replace(/^;?Cwd=.*$/mg, '')
// Очищаем экранированные последовательности
.replace(/\\x[0-9a-fA-F]{2}/g, '')
.replace(/\\u[0-9a-fA-F]{4}/g, '')
// Финальная очистка
.replace(/\n{3,}/g, '\n\n') // Убираем множественные пустые строки
.trim();
}
private cleanMessageContent(content: any): any {
if (!content) {
return content;
}
if (typeof content === 'string') {
return this.cleanTerminalOutput(content);
}
if (Array.isArray(content)) {
return content.map(item => this.cleanMessageContent(item));
}
if (typeof content === 'object') {
const cleaned: any = {};
for (const [key, value] of Object.entries(content)) {
cleaned[key] = this.cleanMessageContent(value);
}
return cleaned;
}
return content;
}
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
// Ensure clean state before starting a new request
this.ensureCleanState();
const client: vscode.LanguageModelChat = await this.getClient();
// Clean system prompt and messages
const cleanedSystemPrompt = this.cleanTerminalOutput(systemPrompt);
const cleanedMessages = messages.map(msg => ({
...msg,
content: this.cleanMessageContent(msg.content)
}));
// Convert Anthropic messages to VS Code LM messages
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [
vscode.LanguageModelChatMessage.Assistant(cleanedSystemPrompt),
...convertToVsCodeLmMessages(cleanedMessages),
];
// Initialize cancellation token for the request
this.currentRequestCancellation = new vscode.CancellationTokenSource();
// Calculate input tokens before starting the stream
const totalInputTokens: number = await this.calculateTotalInputTokens(systemPrompt, vsCodeLmMessages);
// Accumulate the text and count at the end of the stream to reduce token counting overhead.
let accumulatedText: string = '';
try {
// Create the response stream with minimal required options
const requestOptions: vscode.LanguageModelChatRequestOptions = {
justification: `Cline would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`
};
// Note: Tool support is currently provided by the VSCode Language Model API directly
// Extensions can register tools using vscode.lm.registerTool()
const response: vscode.LanguageModelChatResponse = await client.sendRequest(
vsCodeLmMessages,
requestOptions,
this.currentRequestCancellation.token
);
// Consume the stream and handle both text and tool call chunks
for await (const chunk of response.stream) {
if (chunk instanceof vscode.LanguageModelTextPart) {
// Validate text part value
if (typeof chunk.value !== 'string') {
console.warn('Cline <Language Model API>: Invalid text part value received:', chunk.value);
continue;
}
accumulatedText += chunk.value;
yield {
type: "text",
text: chunk.value,
};
} else if (chunk instanceof vscode.LanguageModelToolCallPart) {
try {
// Validate tool call parameters
if (!chunk.name || typeof chunk.name !== 'string') {
console.warn('Cline <Language Model API>: Invalid tool name received:', chunk.name);
continue;
}
if (!chunk.callId || typeof chunk.callId !== 'string') {
console.warn('Cline <Language Model API>: Invalid tool callId received:', chunk.callId);
continue;
}
// Ensure input is a valid object
if (!chunk.input || typeof chunk.input !== 'object') {
console.warn('Cline <Language Model API>: Invalid tool input received:', chunk.input);
continue;
}
// Convert tool calls to text format with proper error handling
const toolCall = {
type: "tool_call",
name: chunk.name,
arguments: chunk.input,
callId: chunk.callId
};
const toolCallText = JSON.stringify(toolCall);
accumulatedText += toolCallText;
// Log tool call for debugging
console.debug('Cline <Language Model API>: Processing tool call:', {
name: chunk.name,
callId: chunk.callId,
inputSize: JSON.stringify(chunk.input).length
});
yield {
type: "text",
text: toolCallText,
};
} catch (error) {
console.error('Cline <Language Model API>: Failed to process tool call:', error);
// Continue processing other chunks even if one fails
continue;
}
} else {
console.warn('Cline <Language Model API>: Unknown chunk type received:', chunk);
}
}
// Count tokens in the accumulated text after stream completion
const totalOutputTokens: number = await this.countTokens(accumulatedText);
// Report final usage after stream completion
yield {
type: "usage",
inputTokens: totalInputTokens,
outputTokens: totalOutputTokens,
totalCost: calculateApiCost(
this.getModel().info,
totalInputTokens,
totalOutputTokens
)
};
}
catch (error: unknown) {
this.ensureCleanState();
if (error instanceof vscode.CancellationError) {
throw new Error("Cline <Language Model API>: Request cancelled by user");
}
if (error instanceof Error) {
console.error('Cline <Language Model API>: Stream error details:', {
message: error.message,
stack: error.stack,
name: error.name
});
// Return original error if it's already an Error instance
throw error;
} else if (typeof error === 'object' && error !== null) {
// Handle error-like objects
const errorDetails = JSON.stringify(error, null, 2);
console.error('Cline <Language Model API>: Stream error object:', errorDetails);
throw new Error(`Cline <Language Model API>: Response stream error: ${errorDetails}`);
} else {
// Fallback for unknown error types
const errorMessage = String(error);
console.error('Cline <Language Model API>: Unknown stream error:', errorMessage);
throw new Error(`Cline <Language Model API>: Response stream error: ${errorMessage}`);
}
}
}
// Return model information based on the current client state
getModel(): { id: string; info: ModelInfo; } {
if (this.client) {
// Validate client properties
const requiredProps = {
id: this.client.id,
vendor: this.client.vendor,
family: this.client.family,
version: this.client.version,
maxInputTokens: this.client.maxInputTokens
};
// Log any missing properties for debugging
for (const [prop, value] of Object.entries(requiredProps)) {
if (!value && value !== 0) {
console.warn(`Cline <Language Model API>: Client missing ${prop} property`);
}
}
// Construct model ID using available information
const modelParts = [
this.client.vendor,
this.client.family,
this.client.version
].filter(Boolean);
const modelId = this.client.id || modelParts.join(SELECTOR_SEPARATOR);
// Build model info with conservative defaults for missing values
const modelInfo: ModelInfo = {
maxTokens: -1, // Unlimited tokens by default
contextWindow: typeof this.client.maxInputTokens === 'number'
? Math.max(0, this.client.maxInputTokens)
: openAiModelInfoSaneDefaults.contextWindow,
supportsImages: false, // VSCode Language Model API currently doesn't support image inputs
supportsPromptCache: true,
inputPrice: 0,
outputPrice: 0,
description: `VSCode Language Model: ${modelId}`
};
return { id: modelId, info: modelInfo };
}
// Fallback when no client is available
const fallbackId = this.options.vsCodeLmModelSelector
? stringifyVsCodeLmModelSelector(this.options.vsCodeLmModelSelector)
: "vscode-lm";
console.debug('Cline <Language Model API>: No client available, using fallback model info');
return {
id: fallbackId,
info: {
...openAiModelInfoSaneDefaults,
description: `VSCode Language Model (Fallback): ${fallbackId}`
}
};
}
async completePrompt(prompt: string): Promise<string> {
try {
const client = await this.getClient();
const response = await client.sendRequest([vscode.LanguageModelChatMessage.User(prompt)], {}, new vscode.CancellationTokenSource().token);
let result = "";
for await (const chunk of response.stream) {
if (chunk instanceof vscode.LanguageModelTextPart) {
result += chunk.value;
}
}
return result;
} catch (error) {
if (error instanceof Error) {
throw new Error(`VSCode LM completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -0,0 +1,246 @@
import { Anthropic } from "@anthropic-ai/sdk";
import * as vscode from 'vscode';
import { convertToVsCodeLmMessages, convertToAnthropicRole, convertToAnthropicMessage } from '../vscode-lm-format';
// Mock crypto
const mockCrypto = {
randomUUID: () => 'test-uuid'
};
global.crypto = mockCrypto as any;
// Define types for our mocked classes
interface MockLanguageModelTextPart {
type: 'text';
value: string;
}
interface MockLanguageModelToolCallPart {
type: 'tool_call';
callId: string;
name: string;
input: any;
}
interface MockLanguageModelToolResultPart {
type: 'tool_result';
toolUseId: string;
parts: MockLanguageModelTextPart[];
}
type MockMessageContent = MockLanguageModelTextPart | MockLanguageModelToolCallPart | MockLanguageModelToolResultPart;
interface MockLanguageModelChatMessage {
role: string;
name?: string;
content: MockMessageContent[];
}
// Mock vscode namespace
jest.mock('vscode', () => {
const LanguageModelChatMessageRole = {
Assistant: 'assistant',
User: 'user'
};
class MockLanguageModelTextPart {
type = 'text';
constructor(public value: string) {}
}
class MockLanguageModelToolCallPart {
type = 'tool_call';
constructor(
public callId: string,
public name: string,
public input: any
) {}
}
class MockLanguageModelToolResultPart {
type = 'tool_result';
constructor(
public toolUseId: string,
public parts: MockLanguageModelTextPart[]
) {}
}
return {
LanguageModelChatMessage: {
Assistant: jest.fn((content) => ({
role: LanguageModelChatMessageRole.Assistant,
name: 'assistant',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
})),
User: jest.fn((content) => ({
role: LanguageModelChatMessageRole.User,
name: 'user',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
}))
},
LanguageModelChatMessageRole,
LanguageModelTextPart: MockLanguageModelTextPart,
LanguageModelToolCallPart: MockLanguageModelToolCallPart,
LanguageModelToolResultPart: MockLanguageModelToolResultPart
};
});
describe('vscode-lm-format', () => {
describe('convertToVsCodeLmMessages', () => {
it('should convert simple string messages', () => {
const messages: Anthropic.Messages.MessageParam[] = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' }
];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(2);
expect(result[0].role).toBe('user');
expect((result[0].content[0] as MockLanguageModelTextPart).value).toBe('Hello');
expect(result[1].role).toBe('assistant');
expect((result[1].content[0] as MockLanguageModelTextPart).value).toBe('Hi there');
});
it('should handle complex user messages with tool results', () => {
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user',
content: [
{ type: 'text', text: 'Here is the result:' },
{
type: 'tool_result',
tool_use_id: 'tool-1',
content: 'Tool output'
}
]
}];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(1);
expect(result[0].role).toBe('user');
expect(result[0].content).toHaveLength(2);
const [toolResult, textContent] = result[0].content as [MockLanguageModelToolResultPart, MockLanguageModelTextPart];
expect(toolResult.type).toBe('tool_result');
expect(textContent.type).toBe('text');
});
it('should handle complex assistant messages with tool calls', () => {
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'assistant',
content: [
{ type: 'text', text: 'Let me help you with that.' },
{
type: 'tool_use',
id: 'tool-1',
name: 'calculator',
input: { operation: 'add', numbers: [2, 2] }
}
]
}];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(1);
expect(result[0].role).toBe('assistant');
expect(result[0].content).toHaveLength(2);
const [toolCall, textContent] = result[0].content as [MockLanguageModelToolCallPart, MockLanguageModelTextPart];
expect(toolCall.type).toBe('tool_call');
expect(textContent.type).toBe('text');
});
it('should handle image blocks with appropriate placeholders', () => {
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user',
content: [
{ type: 'text', text: 'Look at this:' },
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/png',
data: 'base64data'
}
}
]
}];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(1);
const imagePlaceholder = result[0].content[1] as MockLanguageModelTextPart;
expect(imagePlaceholder.value).toContain('[Image (base64): image/png not supported by VSCode LM API]');
});
});
describe('convertToAnthropicRole', () => {
it('should convert assistant role correctly', () => {
const result = convertToAnthropicRole('assistant' as any);
expect(result).toBe('assistant');
});
it('should convert user role correctly', () => {
const result = convertToAnthropicRole('user' as any);
expect(result).toBe('user');
});
it('should return null for unknown roles', () => {
const result = convertToAnthropicRole('unknown' as any);
expect(result).toBeNull();
});
});
describe('convertToAnthropicMessage', () => {
it('should convert assistant message with text content', async () => {
const vsCodeMessage = {
role: 'assistant',
name: 'assistant',
content: [new vscode.LanguageModelTextPart('Hello')]
};
const result = await convertToAnthropicMessage(vsCodeMessage as any);
expect(result.role).toBe('assistant');
expect(result.content).toHaveLength(1);
expect(result.content[0]).toEqual({
type: 'text',
text: 'Hello'
});
expect(result.id).toBe('test-uuid');
});
it('should convert assistant message with tool calls', async () => {
const vsCodeMessage = {
role: 'assistant',
name: 'assistant',
content: [new vscode.LanguageModelToolCallPart(
'call-1',
'calculator',
{ operation: 'add', numbers: [2, 2] }
)]
};
const result = await convertToAnthropicMessage(vsCodeMessage as any);
expect(result.content).toHaveLength(1);
expect(result.content[0]).toEqual({
type: 'tool_use',
id: 'call-1',
name: 'calculator',
input: { operation: 'add', numbers: [2, 2] }
});
expect(result.id).toBe('test-uuid');
});
it('should throw error for non-assistant messages', async () => {
const vsCodeMessage = {
role: 'user',
name: 'user',
content: [new vscode.LanguageModelTextPart('Hello')]
};
await expect(convertToAnthropicMessage(vsCodeMessage as any))
.rejects
.toThrow('Cline <Language Model API>: Only assistant messages are supported.');
});
});
});

View File

@@ -0,0 +1,209 @@
import { Anthropic } from "@anthropic-ai/sdk";
import * as vscode from 'vscode';
/**
* Safely converts a value into a plain object.
*/
function asObjectSafe(value: any): object {
// Handle null/undefined
if (!value) {
return {};
}
try {
// Handle strings that might be JSON
if (typeof value === 'string') {
return JSON.parse(value);
}
// Handle pre-existing objects
if (typeof value === 'object') {
return Object.assign({}, value);
}
return {};
}
catch (error) {
console.warn('Cline <Language Model API>: Failed to parse object:', error);
return {};
}
}
export function convertToVsCodeLmMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): vscode.LanguageModelChatMessage[] {
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
for (const anthropicMessage of anthropicMessages) {
// Handle simple string messages
if (typeof anthropicMessage.content === "string") {
vsCodeLmMessages.push(
anthropicMessage.role === "assistant"
? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
: vscode.LanguageModelChatMessage.User(anthropicMessage.content)
);
continue;
}
// Handle complex message structures
switch (anthropicMessage.role) {
case "user": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolResultBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_result") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolResultParts
...toolMessages.map((toolMessage) => {
// Process tool result content into TextParts
const toolContentParts: vscode.LanguageModelTextPart[] = (
typeof toolMessage.content === "string"
? [new vscode.LanguageModelTextPart(toolMessage.content)]
: (
toolMessage.content?.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
?? [new vscode.LanguageModelTextPart("")]
)
);
return new vscode.LanguageModelToolResultPart(
toolMessage.tool_use_id,
toolContentParts
);
}),
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Add single user message with all content parts
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
break;
}
case "assistant": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolUseBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_use") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolCallParts first
...toolMessages.map((toolMessage) =>
new vscode.LanguageModelToolCallPart(
toolMessage.id,
toolMessage.name,
asObjectSafe(toolMessage.input)
)
),
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Add the assistant message to the list of messages
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
break;
}
}
}
return vsCodeLmMessages;
}
export function convertToAnthropicRole(vsCodeLmMessageRole: vscode.LanguageModelChatMessageRole): string | null {
switch (vsCodeLmMessageRole) {
case vscode.LanguageModelChatMessageRole.Assistant:
return "assistant";
case vscode.LanguageModelChatMessageRole.User:
return "user";
default:
return null;
}
}
export async function convertToAnthropicMessage(vsCodeLmMessage: vscode.LanguageModelChatMessage): Promise<Anthropic.Messages.Message> {
const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
if (anthropicRole !== "assistant") {
throw new Error("Cline <Language Model API>: Only assistant messages are supported.");
}
return {
id: crypto.randomUUID(),
type: "message",
model: "vscode-lm",
role: anthropicRole,
content: (
vsCodeLmMessage.content
.map((part): Anthropic.ContentBlock | null => {
if (part instanceof vscode.LanguageModelTextPart) {
return {
type: "text",
text: part.value
};
}
if (part instanceof vscode.LanguageModelToolCallPart) {
return {
type: "tool_use",
id: part.callId || crypto.randomUUID(),
name: part.name,
input: asObjectSafe(part.input)
};
}
return null;
})
.filter(
(part): part is Anthropic.ContentBlock => part !== null
)
),
stop_reason: null,
stop_sequence: null,
usage: {
input_tokens: 0,
output_tokens: 0,
}
};
}

View File

@@ -93,6 +93,7 @@ type GlobalStateKey =
| "requestDelaySeconds"
| "currentApiConfigName"
| "listApiConfigMeta"
| "vsCodeLmModelSelector"
| "mode"
| "modeApiConfigs"
| "customPrompts"
@@ -571,6 +572,10 @@ export class ClineProvider implements vscode.WebviewViewProvider {
const lmStudioModels = await this.getLmStudioModels(message.text)
this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels })
break
case "requestVsCodeLmModels":
const vsCodeLmModels = await this.getVsCodeLmModels()
this.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
break
case "refreshGlamaModels":
await this.refreshGlamaModels()
break
@@ -1114,6 +1119,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
vsCodeLmModelSelector,
} = apiConfiguration
await this.updateGlobalState("apiProvider", apiProvider)
await this.updateGlobalState("apiModelId", apiModelId)
@@ -1145,6 +1151,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.updateGlobalState("openRouterModelId", openRouterModelId)
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
if (this.cline) {
this.cline.api = buildApiHandler(apiConfiguration)
}
@@ -1215,6 +1222,17 @@ export class ClineProvider implements vscode.WebviewViewProvider {
}
}
// VSCode LM API
private async getVsCodeLmModels() {
try {
const models = await vscode.lm.selectChatModels({});
return models || [];
} catch (error) {
console.error('Error fetching VS Code LM models:', error);
return [];
}
}
// OpenAi
async getOpenAiModels(baseUrl?: string, apiKey?: string) {
@@ -1774,6 +1792,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
requestDelaySeconds,
currentApiConfigName,
listApiConfigMeta,
vsCodeLmModelSelector,
mode,
modeApiConfigs,
customPrompts,
@@ -1832,6 +1851,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
this.getGlobalState("requestDelaySeconds") as Promise<number | undefined>,
this.getGlobalState("currentApiConfigName") as Promise<string | undefined>,
this.getGlobalState("listApiConfigMeta") as Promise<ApiConfigMeta[] | undefined>,
this.getGlobalState("vsCodeLmModelSelector") as Promise<vscode.LanguageModelChatSelector | undefined>,
this.getGlobalState("mode") as Promise<Mode | undefined>,
this.getGlobalState("modeApiConfigs") as Promise<Record<Mode, string> | undefined>,
this.getGlobalState("customPrompts") as Promise<CustomPrompts | undefined>,
@@ -1884,6 +1904,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
vsCodeLmModelSelector,
},
lastShownAnnouncementId,
customInstructions,

View File

@@ -25,6 +25,9 @@ export interface ExtensionMessage {
| "enhancedPrompt"
| "commitSearchResults"
| "listApiConfig"
| "vsCodeLmModels"
| "vsCodeLmApiAvailable"
| "requestVsCodeLmModels"
| "updatePrompt"
| "systemPrompt"
text?: string
@@ -40,6 +43,7 @@ export interface ExtensionMessage {
images?: string[]
ollamaModels?: string[]
lmStudioModels?: string[]
vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[]
filePaths?: string[]
partialMessage?: ClineMessage
glamaModels?: Record<string, ModelInfo>

View File

@@ -61,9 +61,11 @@ export interface WebviewMessage {
| "terminalOutputLineLimit"
| "mcpEnabled"
| "searchCommits"
| "refreshGlamaModels"
| "alwaysApproveResubmit"
| "requestDelaySeconds"
| "setApiConfigPassword"
| "requestVsCodeLmModels"
| "mode"
| "updatePrompt"
| "updateEnhancedPrompt"

View File

@@ -0,0 +1,56 @@
import { checkExistKey } from '../checkExistApiConfig';
import { ApiConfiguration } from '../api';
describe('checkExistKey', () => {
it('should return false for undefined config', () => {
expect(checkExistKey(undefined)).toBe(false);
});
it('should return false for empty config', () => {
const config: ApiConfiguration = {};
expect(checkExistKey(config)).toBe(false);
});
it('should return true when one key is defined', () => {
const config: ApiConfiguration = {
apiKey: 'test-key'
};
expect(checkExistKey(config)).toBe(true);
});
it('should return true when multiple keys are defined', () => {
const config: ApiConfiguration = {
apiKey: 'test-key',
glamaApiKey: 'glama-key',
openRouterApiKey: 'openrouter-key'
};
expect(checkExistKey(config)).toBe(true);
});
it('should return true when only non-key fields are undefined', () => {
const config: ApiConfiguration = {
apiKey: 'test-key',
apiProvider: undefined,
anthropicBaseUrl: undefined
};
expect(checkExistKey(config)).toBe(true);
});
it('should return false when all key fields are undefined', () => {
const config: ApiConfiguration = {
apiKey: undefined,
glamaApiKey: undefined,
openRouterApiKey: undefined,
awsRegion: undefined,
vertexProjectId: undefined,
openAiApiKey: undefined,
ollamaModelId: undefined,
lmStudioModelId: undefined,
geminiApiKey: undefined,
openAiNativeApiKey: undefined,
deepSeekApiKey: undefined,
vsCodeLmModelSelector: undefined
};
expect(checkExistKey(config)).toBe(false);
});
});

View File

@@ -0,0 +1,44 @@
import { stringifyVsCodeLmModelSelector, SELECTOR_SEPARATOR } from '../vsCodeSelectorUtils';
import { LanguageModelChatSelector } from 'vscode';
describe('vsCodeSelectorUtils', () => {
describe('stringifyVsCodeLmModelSelector', () => {
it('should join all defined selector properties with separator', () => {
const selector: LanguageModelChatSelector = {
vendor: 'test-vendor',
family: 'test-family',
version: 'v1',
id: 'test-id'
};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('test-vendor/test-family/v1/test-id');
});
it('should skip undefined properties', () => {
const selector: LanguageModelChatSelector = {
vendor: 'test-vendor',
family: 'test-family'
};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('test-vendor/test-family');
});
it('should handle empty selector', () => {
const selector: LanguageModelChatSelector = {};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('');
});
it('should handle selector with only one property', () => {
const selector: LanguageModelChatSelector = {
vendor: 'test-vendor'
};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('test-vendor');
});
});
});

View File

@@ -1,3 +1,5 @@
import * as vscode from 'vscode';
export type ApiProvider =
| "anthropic"
| "glama"
@@ -10,11 +12,13 @@ export type ApiProvider =
| "gemini"
| "openai-native"
| "deepseek"
| "vscode-lm"
export interface ApiHandlerOptions {
apiModelId?: string
apiKey?: string // anthropic
anthropicBaseUrl?: string
vsCodeLmModelSelector?: vscode.LanguageModelChatSelector
glamaModelId?: string
glamaModelInfo?: ModelInfo
glamaApiKey?: string
@@ -58,7 +62,7 @@ export type ApiConfiguration = ApiHandlerOptions & {
export interface ModelInfo {
maxTokens?: number
contextWindow?: number
contextWindow: number
supportsImages?: boolean
supportsComputerUse?: boolean
supportsPromptCache: boolean // this value is hardcoded for now

View File

@@ -13,7 +13,8 @@ export function checkExistKey(config: ApiConfiguration | undefined) {
config.lmStudioModelId,
config.geminiApiKey,
config.openAiNativeApiKey,
config.deepSeekApiKey
config.deepSeekApiKey,
config.vsCodeLmModelSelector,
].some((key) => key !== undefined)
: false;
}

View File

@@ -0,0 +1,14 @@
import { LanguageModelChatSelector } from 'vscode';
export const SELECTOR_SEPARATOR = '/';
export function stringifyVsCodeLmModelSelector(selector: LanguageModelChatSelector): string {
return [
selector.vendor,
selector.family,
selector.version,
selector.id
]
.filter(Boolean)
.join(SELECTOR_SEPARATOR);
}

View File

@@ -34,6 +34,7 @@ import {
import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage"
import { useExtensionState } from "../../context/ExtensionStateContext"
import { vscode } from "../../utils/vscode"
import * as vscodemodels from "vscode"
import VSCodeButtonLink from "../common/VSCodeButtonLink"
import OpenRouterModelPicker, {
ModelDescriptionMarkdown,
@@ -51,6 +52,7 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
const { apiConfiguration, setApiConfiguration, uriScheme, onUpdateApiConfig } = useExtensionState()
const [ollamaModels, setOllamaModels] = useState<string[]>([])
const [lmStudioModels, setLmStudioModels] = useState<string[]>([])
const [vsCodeLmModels, setVsCodeLmModels] = useState<vscodemodels.LanguageModelChatSelector[]>([])
const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl)
const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion)
const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false)
@@ -71,21 +73,24 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl })
} else if (selectedProvider === "lmstudio") {
vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl })
} else if (selectedProvider === "vscode-lm") {
vscode.postMessage({ type: "requestVsCodeLmModels" })
}
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl])
useEffect(() => {
if (selectedProvider === "ollama" || selectedProvider === "lmstudio") {
if (selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm") {
requestLocalModels()
}
}, [selectedProvider, requestLocalModels])
useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" ? 2000 : null)
useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm" ? 2000 : null)
const handleMessage = useCallback((event: MessageEvent) => {
const message: ExtensionMessage = event.data
if (message.type === "ollamaModels" && message.ollamaModels) {
setOllamaModels(message.ollamaModels)
} else if (message.type === "lmStudioModels" && message.lmStudioModels) {
setLmStudioModels(message.lmStudioModels)
} else if (message.type === "vsCodeLmModels" && message.vsCodeLmModels) {
setVsCodeLmModels(message.vsCodeLmModels)
}
}, [])
useEvent("message", handleMessage)
@@ -142,6 +147,7 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
<VSCodeOption value="vertex">GCP Vertex AI</VSCodeOption>
<VSCodeOption value="bedrock">AWS Bedrock</VSCodeOption>
<VSCodeOption value="glama">Glama</VSCodeOption>
<VSCodeOption value="vscode-lm">VS Code LM API</VSCodeOption>
<VSCodeOption value="lmstudio">LM Studio</VSCodeOption>
<VSCodeOption value="ollama">Ollama</VSCodeOption>
</VSCodeDropdown>
@@ -620,6 +626,60 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
</div>
)}
{selectedProvider === "vscode-lm" && (
<div>
<div className="dropdown-container">
<label htmlFor="vscode-lm-model">
<span style={{ fontWeight: 500 }}>Language Model</span>
</label>
{vsCodeLmModels.length > 0 ? (
<VSCodeDropdown
id="vscode-lm-model"
value={apiConfiguration?.vsCodeLmModelSelector ?
`${apiConfiguration.vsCodeLmModelSelector.vendor ?? ""}/${apiConfiguration.vsCodeLmModelSelector.family ?? ""}` :
""}
onChange={(e) => {
const value = (e.target as HTMLInputElement).value;
const [vendor, family] = value.split('/');
setApiConfiguration({
...apiConfiguration,
vsCodeLmModelSelector: value ? { vendor, family } : undefined
});
}}
style={{ width: "100%" }}>
<VSCodeOption value="">Select a model...</VSCodeOption>
{vsCodeLmModels.map((model) => (
<VSCodeOption
key={`${model.vendor}/${model.family}`}
value={`${model.vendor}/${model.family}`}>
{model.vendor} - {model.family}
</VSCodeOption>
))}
</VSCodeDropdown>
) : (
<p style={{
fontSize: "12px",
marginTop: "5px",
color: "var(--vscode-descriptionForeground)",
}}>
The VS Code Language Model API allows you to run models provided by other VS Code extensions (including but not limited to GitHub Copilot).
The easiest way to get started is to install the Copilot and Copilot Chat extensions from the VS Code Marketplace.
</p>
)}
<p
style={{
fontSize: "12px",
marginTop: "5px",
color: "var(--vscode-errorForeground)",
fontWeight: 500,
}}>
Note: This is a very experimental integration and may not work as expected. Please report any issues to the Roo-Cline GitHub repository.
</p>
</div>
</div>
)}
{selectedProvider === "ollama" && (
<div>
<VSCodeTextField
@@ -939,6 +999,17 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
selectedModelId: apiConfiguration?.lmStudioModelId || "",
selectedModelInfo: openAiModelInfoSaneDefaults,
}
case "vscode-lm":
return {
selectedProvider: provider,
selectedModelId: apiConfiguration?.vsCodeLmModelSelector ?
`${apiConfiguration.vsCodeLmModelSelector.vendor}/${apiConfiguration.vsCodeLmModelSelector.family}` :
"",
selectedModelInfo: {
...openAiModelInfoSaneDefaults,
supportsImages: false, // VSCode LM API currently doesn't support images
},
}
default:
return getProviderData(anthropicModels, anthropicDefaultModelId)
}

View File

@@ -57,6 +57,11 @@ export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): s
return "You must provide a valid model ID."
}
break
case "vscode-lm":
if (!apiConfiguration.vsCodeLmModelSelector) {
return "You must provide a valid model selector."
}
break
}
}
return undefined