|
-Download on VS Marketplace
+Download on VS Marketplace
|
Join the Discord
diff --git a/package-lock.json b/package-lock.json
index dd78add..f0560b4 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -31,7 +31,7 @@
"isbinaryfile": "^5.0.2",
"mammoth": "^1.8.0",
"monaco-vscode-textmate-theme-converter": "^0.1.7",
- "openai": "^4.73.1",
+ "openai": "^4.78.1",
"os-name": "^6.0.0",
"p-wait-for": "^5.0.2",
"pdf-parse": "^1.1.1",
@@ -12546,9 +12546,9 @@
}
},
"node_modules/openai": {
- "version": "4.76.0",
- "resolved": "https://registry.npmjs.org/openai/-/openai-4.76.0.tgz",
- "integrity": "sha512-QBGIetjX1C9xDp5XGa/3mPnfKI9BgAe2xHQX6PmO98wuW9qQaurBaumcYptQWc9LHZZq7cH/Y1Rjnsr6uUDdVw==",
+ "version": "4.78.1",
+ "resolved": "https://registry.npmjs.org/openai/-/openai-4.78.1.tgz",
+ "integrity": "sha512-drt0lHZBd2lMyORckOXFPQTmnGLWSLt8VK0W9BhOKWpMFBEoHMoz5gxMPmVq5icp+sOrsbMnsmZTVHUlKvD1Ow==",
"dependencies": {
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",
diff --git a/package.json b/package.json
index 8892e2a..38bd166 100644
--- a/package.json
+++ b/package.json
@@ -42,7 +42,10 @@
"ai",
"llama"
],
- "activationEvents": [],
+ "activationEvents": [
+ "onLanguage",
+ "onStartupFinished"
+ ],
"main": "./dist/extension.js",
"contributes": {
"viewsContainers": {
@@ -151,6 +154,20 @@
"git show"
],
"description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled"
+ },
+ "roo-cline.vsCodeLmModelSelector": {
+ "type": "object",
+ "properties": {
+ "vendor": {
+ "type": "string",
+ "description": "The vendor of the language model (e.g. copilot)"
+ },
+ "family": {
+ "type": "string",
+ "description": "The family of the language model (e.g. gpt-4)"
+ }
+ },
+ "description": "Settings for VSCode Language Model API"
}
}
}
@@ -227,7 +244,7 @@
"isbinaryfile": "^5.0.2",
"mammoth": "^1.8.0",
"monaco-vscode-textmate-theme-converter": "^0.1.7",
- "openai": "^4.73.1",
+ "openai": "^4.78.1",
"os-name": "^6.0.0",
"p-wait-for": "^5.0.2",
"pdf-parse": "^1.1.1",
diff --git a/src/api/index.ts b/src/api/index.ts
index 999b588..17b9d29 100644
--- a/src/api/index.ts
+++ b/src/api/index.ts
@@ -11,6 +11,7 @@ import { LmStudioHandler } from "./providers/lmstudio"
import { GeminiHandler } from "./providers/gemini"
import { OpenAiNativeHandler } from "./providers/openai-native"
import { DeepSeekHandler } from "./providers/deepseek"
+import { VsCodeLmHandler } from "./providers/vscode-lm"
import { ApiStream } from "./transform/stream"
export interface SingleCompletionHandler {
diff --git a/src/api/providers/__tests__/openai-native.test.ts b/src/api/providers/__tests__/openai-native.test.ts
index fe40804..7b263b0 100644
--- a/src/api/providers/__tests__/openai-native.test.ts
+++ b/src/api/providers/__tests__/openai-native.test.ts
@@ -60,6 +60,13 @@ jest.mock('openai', () => {
describe('OpenAiNativeHandler', () => {
let handler: OpenAiNativeHandler;
let mockOptions: ApiHandlerOptions;
+ const systemPrompt = 'You are a helpful assistant.';
+ const messages: Anthropic.Messages.MessageParam[] = [
+ {
+ role: 'user',
+ content: 'Hello!'
+ }
+ ];
beforeEach(() => {
mockOptions = {
@@ -86,14 +93,6 @@ describe('OpenAiNativeHandler', () => {
});
describe('createMessage', () => {
- const systemPrompt = 'You are a helpful assistant.';
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: 'user',
- content: 'Hello!'
- }
- ];
-
it('should handle streaming responses', async () => {
const stream = handler.createMessage(systemPrompt, messages);
const chunks: any[] = [];
@@ -109,15 +108,126 @@ describe('OpenAiNativeHandler', () => {
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
-
const stream = handler.createMessage(systemPrompt, messages);
-
await expect(async () => {
for await (const chunk of stream) {
// Should not reach here
}
}).rejects.toThrow('API Error');
});
+
+ it('should handle missing content in response for o1 model', async () => {
+ // Use o1 model which supports developer role
+ handler = new OpenAiNativeHandler({
+ ...mockOptions,
+ apiModelId: 'o1'
+ });
+
+ mockCreate.mockResolvedValueOnce({
+ choices: [{ message: { content: null } }],
+ usage: {
+ prompt_tokens: 0,
+ completion_tokens: 0,
+ total_tokens: 0
+ }
+ });
+
+ const generator = handler.createMessage(systemPrompt, messages);
+ const results = [];
+ for await (const result of generator) {
+ results.push(result);
+ }
+
+ expect(results).toEqual([
+ { type: 'text', text: '' },
+ { type: 'usage', inputTokens: 0, outputTokens: 0 }
+ ]);
+
+ // Verify developer role is used for system prompt with o1 model
+ expect(mockCreate).toHaveBeenCalledWith({
+ model: 'o1',
+ messages: [
+ { role: 'developer', content: systemPrompt },
+ { role: 'user', content: 'Hello!' }
+ ]
+ });
+ });
+ });
+
+ describe('streaming models', () => {
+ beforeEach(() => {
+ handler = new OpenAiNativeHandler({
+ ...mockOptions,
+ apiModelId: 'gpt-4o',
+ });
+ });
+
+ it('should handle streaming response', async () => {
+ const mockStream = [
+ { choices: [{ delta: { content: 'Hello' } }], usage: null },
+ { choices: [{ delta: { content: ' there' } }], usage: null },
+ { choices: [{ delta: { content: '!' } }], usage: { prompt_tokens: 10, completion_tokens: 5 } },
+ ];
+
+ mockCreate.mockResolvedValueOnce(
+ (async function* () {
+ for (const chunk of mockStream) {
+ yield chunk;
+ }
+ })()
+ );
+
+ const generator = handler.createMessage(systemPrompt, messages);
+ const results = [];
+ for await (const result of generator) {
+ results.push(result);
+ }
+
+ expect(results).toEqual([
+ { type: 'text', text: 'Hello' },
+ { type: 'text', text: ' there' },
+ { type: 'text', text: '!' },
+ { type: 'usage', inputTokens: 10, outputTokens: 5 },
+ ]);
+
+ expect(mockCreate).toHaveBeenCalledWith({
+ model: 'gpt-4o',
+ temperature: 0,
+ messages: [
+ { role: 'system', content: systemPrompt },
+ { role: 'user', content: 'Hello!' },
+ ],
+ stream: true,
+ stream_options: { include_usage: true },
+ });
+ });
+
+ it('should handle empty delta content', async () => {
+ const mockStream = [
+ { choices: [{ delta: {} }], usage: null },
+ { choices: [{ delta: { content: null } }], usage: null },
+ { choices: [{ delta: { content: 'Hello' } }], usage: { prompt_tokens: 10, completion_tokens: 5 } },
+ ];
+
+ mockCreate.mockResolvedValueOnce(
+ (async function* () {
+ for (const chunk of mockStream) {
+ yield chunk;
+ }
+ })()
+ );
+
+ const generator = handler.createMessage(systemPrompt, messages);
+ const results = [];
+ for await (const result of generator) {
+ results.push(result);
+ }
+
+ expect(results).toEqual([
+ { type: 'text', text: 'Hello' },
+ { type: 'usage', inputTokens: 10, outputTokens: 5 },
+ ]);
+ });
});
describe('completePrompt', () => {
@@ -206,4 +316,4 @@ describe('OpenAiNativeHandler', () => {
expect(modelInfo.info).toBeDefined();
});
});
-});
\ No newline at end of file
+});
diff --git a/src/api/providers/__tests__/vscode-lm.test.ts b/src/api/providers/__tests__/vscode-lm.test.ts
new file mode 100644
index 0000000..396f13f
--- /dev/null
+++ b/src/api/providers/__tests__/vscode-lm.test.ts
@@ -0,0 +1,289 @@
+import * as vscode from 'vscode';
+import { VsCodeLmHandler } from '../vscode-lm';
+import { ApiHandlerOptions } from '../../../shared/api';
+import { Anthropic } from '@anthropic-ai/sdk';
+
+// Mock vscode namespace
+jest.mock('vscode', () => {
+ class MockLanguageModelTextPart {
+ type = 'text';
+ constructor(public value: string) {}
+ }
+
+ class MockLanguageModelToolCallPart {
+ type = 'tool_call';
+ constructor(
+ public callId: string,
+ public name: string,
+ public input: any
+ ) {}
+ }
+
+ return {
+ workspace: {
+ onDidChangeConfiguration: jest.fn((callback) => ({
+ dispose: jest.fn()
+ }))
+ },
+ CancellationTokenSource: jest.fn(() => ({
+ token: {
+ isCancellationRequested: false,
+ onCancellationRequested: jest.fn()
+ },
+ cancel: jest.fn(),
+ dispose: jest.fn()
+ })),
+ CancellationError: class CancellationError extends Error {
+ constructor() {
+ super('Operation cancelled');
+ this.name = 'CancellationError';
+ }
+ },
+ LanguageModelChatMessage: {
+ Assistant: jest.fn((content) => ({
+ role: 'assistant',
+ content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
+ })),
+ User: jest.fn((content) => ({
+ role: 'user',
+ content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
+ }))
+ },
+ LanguageModelTextPart: MockLanguageModelTextPart,
+ LanguageModelToolCallPart: MockLanguageModelToolCallPart,
+ lm: {
+ selectChatModels: jest.fn()
+ }
+ };
+});
+
+const mockLanguageModelChat = {
+ id: 'test-model',
+ name: 'Test Model',
+ vendor: 'test-vendor',
+ family: 'test-family',
+ version: '1.0',
+ maxInputTokens: 4096,
+ sendRequest: jest.fn(),
+ countTokens: jest.fn()
+};
+
+describe('VsCodeLmHandler', () => {
+ let handler: VsCodeLmHandler;
+ const defaultOptions: ApiHandlerOptions = {
+ vsCodeLmModelSelector: {
+ vendor: 'test-vendor',
+ family: 'test-family'
+ }
+ };
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ handler = new VsCodeLmHandler(defaultOptions);
+ });
+
+ afterEach(() => {
+ handler.dispose();
+ });
+
+ describe('constructor', () => {
+ it('should initialize with provided options', () => {
+ expect(handler).toBeDefined();
+ expect(vscode.workspace.onDidChangeConfiguration).toHaveBeenCalled();
+ });
+
+ it('should handle configuration changes', () => {
+ const callback = (vscode.workspace.onDidChangeConfiguration as jest.Mock).mock.calls[0][0];
+ callback({ affectsConfiguration: () => true });
+ // Should reset client when config changes
+ expect(handler['client']).toBeNull();
+ });
+ });
+
+ describe('createClient', () => {
+ it('should create client with selector', async () => {
+ const mockModel = { ...mockLanguageModelChat };
+ (vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
+
+ const client = await handler['createClient']({
+ vendor: 'test-vendor',
+ family: 'test-family'
+ });
+
+ expect(client).toBeDefined();
+ expect(client.id).toBe('test-model');
+ expect(vscode.lm.selectChatModels).toHaveBeenCalledWith({
+ vendor: 'test-vendor',
+ family: 'test-family'
+ });
+ });
+
+ it('should return default client when no models available', async () => {
+ (vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([]);
+
+ const client = await handler['createClient']({});
+
+ expect(client).toBeDefined();
+ expect(client.id).toBe('default-lm');
+ expect(client.vendor).toBe('vscode');
+ });
+ });
+
+ describe('createMessage', () => {
+ beforeEach(() => {
+ const mockModel = { ...mockLanguageModelChat };
+ (vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
+ mockLanguageModelChat.countTokens.mockResolvedValue(10);
+ });
+
+ it('should stream text responses', async () => {
+ const systemPrompt = 'You are a helpful assistant';
+ const messages: Anthropic.Messages.MessageParam[] = [{
+ role: 'user' as const,
+ content: 'Hello'
+ }];
+
+ const responseText = 'Hello! How can I help you?';
+ mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
+ stream: (async function* () {
+ yield new vscode.LanguageModelTextPart(responseText);
+ return;
+ })(),
+ text: (async function* () {
+ yield responseText;
+ return;
+ })()
+ });
+
+ const stream = handler.createMessage(systemPrompt, messages);
+ const chunks = [];
+ for await (const chunk of stream) {
+ chunks.push(chunk);
+ }
+
+ expect(chunks).toHaveLength(2); // Text chunk + usage chunk
+ expect(chunks[0]).toEqual({
+ type: 'text',
+ text: responseText
+ });
+ expect(chunks[1]).toMatchObject({
+ type: 'usage',
+ inputTokens: expect.any(Number),
+ outputTokens: expect.any(Number)
+ });
+ });
+
+ it('should handle tool calls', async () => {
+ const systemPrompt = 'You are a helpful assistant';
+ const messages: Anthropic.Messages.MessageParam[] = [{
+ role: 'user' as const,
+ content: 'Calculate 2+2'
+ }];
+
+ const toolCallData = {
+ name: 'calculator',
+ arguments: { operation: 'add', numbers: [2, 2] },
+ callId: 'call-1'
+ };
+
+ mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
+ stream: (async function* () {
+ yield new vscode.LanguageModelToolCallPart(
+ toolCallData.callId,
+ toolCallData.name,
+ toolCallData.arguments
+ );
+ return;
+ })(),
+ text: (async function* () {
+ yield JSON.stringify({ type: 'tool_call', ...toolCallData });
+ return;
+ })()
+ });
+
+ const stream = handler.createMessage(systemPrompt, messages);
+ const chunks = [];
+ for await (const chunk of stream) {
+ chunks.push(chunk);
+ }
+
+ expect(chunks).toHaveLength(2); // Tool call chunk + usage chunk
+ expect(chunks[0]).toEqual({
+ type: 'text',
+ text: JSON.stringify({ type: 'tool_call', ...toolCallData })
+ });
+ });
+
+ it('should handle errors', async () => {
+ const systemPrompt = 'You are a helpful assistant';
+ const messages: Anthropic.Messages.MessageParam[] = [{
+ role: 'user' as const,
+ content: 'Hello'
+ }];
+
+ mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('API Error'));
+
+ await expect(async () => {
+ const stream = handler.createMessage(systemPrompt, messages);
+ for await (const _ of stream) {
+ // consume stream
+ }
+ }).rejects.toThrow('API Error');
+ });
+ });
+
+ describe('getModel', () => {
+ it('should return model info when client exists', async () => {
+ const mockModel = { ...mockLanguageModelChat };
+ (vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
+
+ // Initialize client
+ await handler['getClient']();
+
+ const model = handler.getModel();
+ expect(model.id).toBe('test-model');
+ expect(model.info).toBeDefined();
+ expect(model.info.contextWindow).toBe(4096);
+ });
+
+ it('should return fallback model info when no client exists', () => {
+ const model = handler.getModel();
+ expect(model.id).toBe('test-vendor/test-family');
+ expect(model.info).toBeDefined();
+ });
+ });
+
+ describe('completePrompt', () => {
+ it('should complete single prompt', async () => {
+ const mockModel = { ...mockLanguageModelChat };
+ (vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
+
+ const responseText = 'Completed text';
+ mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
+ stream: (async function* () {
+ yield new vscode.LanguageModelTextPart(responseText);
+ return;
+ })(),
+ text: (async function* () {
+ yield responseText;
+ return;
+ })()
+ });
+
+ const result = await handler.completePrompt('Test prompt');
+ expect(result).toBe(responseText);
+ expect(mockLanguageModelChat.sendRequest).toHaveBeenCalled();
+ });
+
+ it('should handle errors during completion', async () => {
+ const mockModel = { ...mockLanguageModelChat };
+ (vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
+
+ mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('Completion failed'));
+
+ await expect(handler.completePrompt('Test prompt'))
+ .rejects
+ .toThrow('VSCode LM completion error: Completion failed');
+ });
+ });
+});
\ No newline at end of file
diff --git a/src/api/providers/openai-native.ts b/src/api/providers/openai-native.ts
index 83644c9..fa27eb3 100644
--- a/src/api/providers/openai-native.ts
+++ b/src/api/providers/openai-native.ts
@@ -23,14 +23,16 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
}
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
- switch (this.getModel().id) {
+ const modelId = this.getModel().id
+ switch (modelId) {
case "o1":
case "o1-preview":
case "o1-mini": {
- // o1 doesnt support streaming, non-1 temp, or system prompt
+ // o1-preview and o1-mini don't support streaming, non-1 temp, or system prompt
+ // o1 doesnt support streaming or non-1 temp but does support a developer prompt
const response = await this.client.chat.completions.create({
- model: this.getModel().id,
- messages: [{ role: "user", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
+ model: modelId,
+ messages: [{ role: modelId === "o1" ? "developer" : "user", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
})
yield {
type: "text",
@@ -93,7 +95,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
case "o1":
case "o1-preview":
case "o1-mini":
- // o1 doesn't support non-1 temp or system prompt
+ // o1 doesn't support non-1 temp
requestOptions = {
model: modelId,
messages: [{ role: "user", content: prompt }]
diff --git a/src/api/providers/vscode-lm.ts b/src/api/providers/vscode-lm.ts
new file mode 100644
index 0000000..bde2d6a
--- /dev/null
+++ b/src/api/providers/vscode-lm.ts
@@ -0,0 +1,564 @@
+import { Anthropic } from "@anthropic-ai/sdk";
+import * as vscode from 'vscode';
+import { ApiHandler, SingleCompletionHandler } from "../";
+import { calculateApiCost } from "../../utils/cost";
+import { ApiStream } from "../transform/stream";
+import { convertToVsCodeLmMessages } from "../transform/vscode-lm-format";
+import { SELECTOR_SEPARATOR, stringifyVsCodeLmModelSelector } from "../../shared/vsCodeSelectorUtils";
+import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api";
+
+/**
+ * Handles interaction with VS Code's Language Model API for chat-based operations.
+ * This handler implements the ApiHandler interface to provide VS Code LM specific functionality.
+ *
+ * @implements {ApiHandler}
+ *
+ * @remarks
+ * The handler manages a VS Code language model chat client and provides methods to:
+ * - Create and manage chat client instances
+ * - Stream messages using VS Code's Language Model API
+ * - Retrieve model information
+ *
+ * @example
+ * ```typescript
+ * const options = {
+ * vsCodeLmModelSelector: { vendor: "copilot", family: "gpt-4" }
+ * };
+ * const handler = new VsCodeLmHandler(options);
+ *
+ * // Stream a conversation
+ * const systemPrompt = "You are a helpful assistant";
+ * const messages = [{ role: "user", content: "Hello!" }];
+ * for await (const chunk of handler.createMessage(systemPrompt, messages)) {
+ * console.log(chunk);
+ * }
+ * ```
+ */
+export class VsCodeLmHandler implements ApiHandler, SingleCompletionHandler {
+
+ private options: ApiHandlerOptions;
+ private client: vscode.LanguageModelChat | null;
+ private disposable: vscode.Disposable | null;
+ private currentRequestCancellation: vscode.CancellationTokenSource | null;
+
+ constructor(options: ApiHandlerOptions) {
+ this.options = options;
+ this.client = null;
+ this.disposable = null;
+ this.currentRequestCancellation = null;
+
+ try {
+ // Listen for model changes and reset client
+ this.disposable = vscode.workspace.onDidChangeConfiguration(event => {
+ if (event.affectsConfiguration('lm')) {
+ try {
+ this.client = null;
+ this.ensureCleanState();
+ }
+ catch (error) {
+ console.error('Error during configuration change cleanup:', error);
+ }
+ }
+ });
+ }
+ catch (error) {
+ // Ensure cleanup if constructor fails
+ this.dispose();
+
+ throw new Error(
+ `Cline : Failed to initialize handler: ${error instanceof Error ? error.message : 'Unknown error'}`
+ );
+ }
+ }
+
+ /**
+ * Creates a language model chat client based on the provided selector.
+ *
+ * @param selector - Selector criteria to filter language model chat instances
+ * @returns Promise resolving to the first matching language model chat instance
+ * @throws Error when no matching models are found with the given selector
+ *
+ * @example
+ * const selector = { vendor: "copilot", family: "gpt-4o" };
+ * const chatClient = await createClient(selector);
+ */
+ async createClient(selector: vscode.LanguageModelChatSelector): Promise {
+ try {
+ const models = await vscode.lm.selectChatModels(selector);
+
+ // Use first available model or create a minimal model object
+ if (models && Array.isArray(models) && models.length > 0) {
+ return models[0];
+ }
+
+ // Create a minimal model if no models are available
+ return {
+ id: 'default-lm',
+ name: 'Default Language Model',
+ vendor: 'vscode',
+ family: 'lm',
+ version: '1.0',
+ maxInputTokens: 8192,
+ sendRequest: async (messages, options, token) => {
+ // Provide a minimal implementation
+ return {
+ stream: (async function* () {
+ yield new vscode.LanguageModelTextPart(
+ "Language model functionality is limited. Please check VS Code configuration."
+ );
+ })(),
+ text: (async function* () {
+ yield "Language model functionality is limited. Please check VS Code configuration.";
+ })()
+ };
+ },
+ countTokens: async () => 0
+ };
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : 'Unknown error';
+ throw new Error(`Cline : Failed to select model: ${errorMessage}`);
+ }
+ }
+
+ /**
+ * Creates and streams a message using the VS Code Language Model API.
+ *
+ * @param systemPrompt - The system prompt to initialize the conversation context
+ * @param messages - An array of message parameters following the Anthropic message format
+ *
+ * @yields {ApiStream} An async generator that yields either text chunks or tool calls from the model response
+ *
+ * @throws {Error} When vsCodeLmModelSelector option is not provided
+ * @throws {Error} When the response stream encounters an error
+ *
+ * @remarks
+ * This method handles the initialization of the VS Code LM client if not already created,
+ * converts the messages to VS Code LM format, and streams the response chunks.
+ * Tool calls handling is currently a work in progress.
+ */
+ dispose(): void {
+
+ if (this.disposable) {
+
+ this.disposable.dispose();
+ }
+
+ if (this.currentRequestCancellation) {
+
+ this.currentRequestCancellation.cancel();
+ this.currentRequestCancellation.dispose();
+ }
+ }
+
+ private async countTokens(text: string | vscode.LanguageModelChatMessage): Promise {
+ // Check for required dependencies
+ if (!this.client) {
+ console.warn('Cline : No client available for token counting');
+ return 0;
+ }
+
+ if (!this.currentRequestCancellation) {
+ console.warn('Cline : No cancellation token available for token counting');
+ return 0;
+ }
+
+ // Validate input
+ if (!text) {
+ console.debug('Cline : Empty text provided for token counting');
+ return 0;
+ }
+
+ try {
+ // Handle different input types
+ let tokenCount: number;
+
+ if (typeof text === 'string') {
+ tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
+ } else if (text instanceof vscode.LanguageModelChatMessage) {
+ // For chat messages, ensure we have content
+ if (!text.content || (Array.isArray(text.content) && text.content.length === 0)) {
+ console.debug('Cline : Empty chat message content');
+ return 0;
+ }
+ tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
+ } else {
+ console.warn('Cline : Invalid input type for token counting');
+ return 0;
+ }
+
+ // Validate the result
+ if (typeof tokenCount !== 'number') {
+ console.warn('Cline : Non-numeric token count received:', tokenCount);
+ return 0;
+ }
+
+ if (tokenCount < 0) {
+ console.warn('Cline : Negative token count received:', tokenCount);
+ return 0;
+ }
+
+ return tokenCount;
+ }
+ catch (error) {
+ // Handle specific error types
+ if (error instanceof vscode.CancellationError) {
+ console.debug('Cline : Token counting cancelled by user');
+ return 0;
+ }
+
+ const errorMessage = error instanceof Error ? error.message : 'Unknown error';
+ console.warn('Cline : Token counting failed:', errorMessage);
+
+ // Log additional error details if available
+ if (error instanceof Error && error.stack) {
+ console.debug('Token counting error stack:', error.stack);
+ }
+
+ return 0; // Fallback to prevent stream interruption
+ }
+ }
+
+ private async calculateTotalInputTokens(systemPrompt: string, vsCodeLmMessages: vscode.LanguageModelChatMessage[]): Promise {
+
+ const systemTokens: number = await this.countTokens(systemPrompt);
+
+ const messageTokens: number[] = await Promise.all(
+ vsCodeLmMessages.map(msg => this.countTokens(msg))
+ );
+
+ return systemTokens + messageTokens.reduce(
+ (sum: number, tokens: number): number => sum + tokens, 0
+ );
+ }
+
+ private ensureCleanState(): void {
+
+ if (this.currentRequestCancellation) {
+
+ this.currentRequestCancellation.cancel();
+ this.currentRequestCancellation.dispose();
+ this.currentRequestCancellation = null;
+ }
+ }
+
+ private async getClient(): Promise {
+ if (!this.client) {
+ console.debug('Cline : Getting client with options:', {
+ vsCodeLmModelSelector: this.options.vsCodeLmModelSelector,
+ hasOptions: !!this.options,
+ selectorKeys: this.options.vsCodeLmModelSelector ? Object.keys(this.options.vsCodeLmModelSelector) : []
+ });
+
+ try {
+ // Use default empty selector if none provided to get all available models
+ const selector = this.options?.vsCodeLmModelSelector || {};
+ console.debug('Cline : Creating client with selector:', selector);
+ this.client = await this.createClient(selector);
+ } catch (error) {
+ const message = error instanceof Error ? error.message : 'Unknown error';
+ console.error('Cline : Client creation failed:', message);
+ throw new Error(`Cline : Failed to create client: ${message}`);
+ }
+ }
+
+ return this.client;
+ }
+
+ private cleanTerminalOutput(text: string): string {
+ if (!text) {
+ return '';
+ }
+
+ return text
+ // Нормализуем переносы строк
+ .replace(/\r\n/g, '\n')
+ .replace(/\r/g, '\n')
+
+ // Удаляем ANSI escape sequences
+ .replace(/\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g, '') // Полный набор ANSI sequences
+ .replace(/\x9B[0-?]*[ -/]*[@-~]/g, '') // CSI sequences
+
+ // Удаляем последовательности установки заголовка терминала и прочие OSC sequences
+ .replace(/\x1B\][0-9;]*(?:\x07|\x1B\\)/g, '')
+
+ // Удаляем управляющие символы
+ .replace(/[\x00-\x09\x0B-\x0C\x0E-\x1F\x7F]/g, '')
+
+ // Удаляем escape-последовательности VS Code
+ .replace(/\x1B[PD].*?\x1B\\/g, '') // DCS sequences
+ .replace(/\x1B_.*?\x1B\\/g, '') // APC sequences
+ .replace(/\x1B\^.*?\x1B\\/g, '') // PM sequences
+ .replace(/\x1B\[[\d;]*[HfABCDEFGJKST]/g, '') // Cursor movement and clear screen
+
+ // Удаляем пути Windows и служебную информацию
+ .replace(/^(?:PS )?[A-Z]:\\[^\n]*$/mg, '')
+ .replace(/^;?Cwd=.*$/mg, '')
+
+ // Очищаем экранированные последовательности
+ .replace(/\\x[0-9a-fA-F]{2}/g, '')
+ .replace(/\\u[0-9a-fA-F]{4}/g, '')
+
+ // Финальная очистка
+ .replace(/\n{3,}/g, '\n\n') // Убираем множественные пустые строки
+ .trim();
+ }
+
+ private cleanMessageContent(content: any): any {
+ if (!content) {
+ return content;
+ }
+
+ if (typeof content === 'string') {
+ return this.cleanTerminalOutput(content);
+ }
+
+ if (Array.isArray(content)) {
+ return content.map(item => this.cleanMessageContent(item));
+ }
+
+ if (typeof content === 'object') {
+ const cleaned: any = {};
+ for (const [key, value] of Object.entries(content)) {
+ cleaned[key] = this.cleanMessageContent(value);
+ }
+ return cleaned;
+ }
+
+ return content;
+ }
+
+ async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
+
+ // Ensure clean state before starting a new request
+ this.ensureCleanState();
+ const client: vscode.LanguageModelChat = await this.getClient();
+
+ // Clean system prompt and messages
+ const cleanedSystemPrompt = this.cleanTerminalOutput(systemPrompt);
+ const cleanedMessages = messages.map(msg => ({
+ ...msg,
+ content: this.cleanMessageContent(msg.content)
+ }));
+
+ // Convert Anthropic messages to VS Code LM messages
+ const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [
+ vscode.LanguageModelChatMessage.Assistant(cleanedSystemPrompt),
+ ...convertToVsCodeLmMessages(cleanedMessages),
+ ];
+
+ // Initialize cancellation token for the request
+ this.currentRequestCancellation = new vscode.CancellationTokenSource();
+
+ // Calculate input tokens before starting the stream
+ const totalInputTokens: number = await this.calculateTotalInputTokens(systemPrompt, vsCodeLmMessages);
+
+ // Accumulate the text and count at the end of the stream to reduce token counting overhead.
+ let accumulatedText: string = '';
+
+ try {
+
+ // Create the response stream with minimal required options
+ const requestOptions: vscode.LanguageModelChatRequestOptions = {
+ justification: `Cline would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`
+ };
+
+ // Note: Tool support is currently provided by the VSCode Language Model API directly
+ // Extensions can register tools using vscode.lm.registerTool()
+
+ const response: vscode.LanguageModelChatResponse = await client.sendRequest(
+ vsCodeLmMessages,
+ requestOptions,
+ this.currentRequestCancellation.token
+ );
+
+ // Consume the stream and handle both text and tool call chunks
+ for await (const chunk of response.stream) {
+ if (chunk instanceof vscode.LanguageModelTextPart) {
+ // Validate text part value
+ if (typeof chunk.value !== 'string') {
+ console.warn('Cline : Invalid text part value received:', chunk.value);
+ continue;
+ }
+
+ accumulatedText += chunk.value;
+ yield {
+ type: "text",
+ text: chunk.value,
+ };
+ } else if (chunk instanceof vscode.LanguageModelToolCallPart) {
+ try {
+ // Validate tool call parameters
+ if (!chunk.name || typeof chunk.name !== 'string') {
+ console.warn('Cline : Invalid tool name received:', chunk.name);
+ continue;
+ }
+
+ if (!chunk.callId || typeof chunk.callId !== 'string') {
+ console.warn('Cline : Invalid tool callId received:', chunk.callId);
+ continue;
+ }
+
+ // Ensure input is a valid object
+ if (!chunk.input || typeof chunk.input !== 'object') {
+ console.warn('Cline : Invalid tool input received:', chunk.input);
+ continue;
+ }
+
+ // Convert tool calls to text format with proper error handling
+ const toolCall = {
+ type: "tool_call",
+ name: chunk.name,
+ arguments: chunk.input,
+ callId: chunk.callId
+ };
+
+ const toolCallText = JSON.stringify(toolCall);
+ accumulatedText += toolCallText;
+
+ // Log tool call for debugging
+ console.debug('Cline : Processing tool call:', {
+ name: chunk.name,
+ callId: chunk.callId,
+ inputSize: JSON.stringify(chunk.input).length
+ });
+
+ yield {
+ type: "text",
+ text: toolCallText,
+ };
+ } catch (error) {
+ console.error('Cline : Failed to process tool call:', error);
+ // Continue processing other chunks even if one fails
+ continue;
+ }
+ } else {
+ console.warn('Cline : Unknown chunk type received:', chunk);
+ }
+ }
+
+ // Count tokens in the accumulated text after stream completion
+ const totalOutputTokens: number = await this.countTokens(accumulatedText);
+
+ // Report final usage after stream completion
+ yield {
+ type: "usage",
+ inputTokens: totalInputTokens,
+ outputTokens: totalOutputTokens,
+ totalCost: calculateApiCost(
+ this.getModel().info,
+ totalInputTokens,
+ totalOutputTokens
+ )
+ };
+ }
+ catch (error: unknown) {
+
+ this.ensureCleanState();
+
+ if (error instanceof vscode.CancellationError) {
+
+ throw new Error("Cline : Request cancelled by user");
+ }
+
+ if (error instanceof Error) {
+ console.error('Cline : Stream error details:', {
+ message: error.message,
+ stack: error.stack,
+ name: error.name
+ });
+
+ // Return original error if it's already an Error instance
+ throw error;
+ } else if (typeof error === 'object' && error !== null) {
+ // Handle error-like objects
+ const errorDetails = JSON.stringify(error, null, 2);
+ console.error('Cline : Stream error object:', errorDetails);
+ throw new Error(`Cline : Response stream error: ${errorDetails}`);
+ } else {
+ // Fallback for unknown error types
+ const errorMessage = String(error);
+ console.error('Cline : Unknown stream error:', errorMessage);
+ throw new Error(`Cline : Response stream error: ${errorMessage}`);
+ }
+ }
+ }
+
+ // Return model information based on the current client state
+ getModel(): { id: string; info: ModelInfo; } {
+ if (this.client) {
+ // Validate client properties
+ const requiredProps = {
+ id: this.client.id,
+ vendor: this.client.vendor,
+ family: this.client.family,
+ version: this.client.version,
+ maxInputTokens: this.client.maxInputTokens
+ };
+
+ // Log any missing properties for debugging
+ for (const [prop, value] of Object.entries(requiredProps)) {
+ if (!value && value !== 0) {
+ console.warn(`Cline : Client missing ${prop} property`);
+ }
+ }
+
+ // Construct model ID using available information
+ const modelParts = [
+ this.client.vendor,
+ this.client.family,
+ this.client.version
+ ].filter(Boolean);
+
+ const modelId = this.client.id || modelParts.join(SELECTOR_SEPARATOR);
+
+ // Build model info with conservative defaults for missing values
+ const modelInfo: ModelInfo = {
+ maxTokens: -1, // Unlimited tokens by default
+ contextWindow: typeof this.client.maxInputTokens === 'number'
+ ? Math.max(0, this.client.maxInputTokens)
+ : openAiModelInfoSaneDefaults.contextWindow,
+ supportsImages: false, // VSCode Language Model API currently doesn't support image inputs
+ supportsPromptCache: true,
+ inputPrice: 0,
+ outputPrice: 0,
+ description: `VSCode Language Model: ${modelId}`
+ };
+
+ return { id: modelId, info: modelInfo };
+ }
+
+ // Fallback when no client is available
+ const fallbackId = this.options.vsCodeLmModelSelector
+ ? stringifyVsCodeLmModelSelector(this.options.vsCodeLmModelSelector)
+ : "vscode-lm";
+
+ console.debug('Cline : No client available, using fallback model info');
+
+ return {
+ id: fallbackId,
+ info: {
+ ...openAiModelInfoSaneDefaults,
+ description: `VSCode Language Model (Fallback): ${fallbackId}`
+ }
+ };
+ }
+
+ async completePrompt(prompt: string): Promise {
+ try {
+ const client = await this.getClient();
+ const response = await client.sendRequest([vscode.LanguageModelChatMessage.User(prompt)], {}, new vscode.CancellationTokenSource().token);
+ let result = "";
+ for await (const chunk of response.stream) {
+ if (chunk instanceof vscode.LanguageModelTextPart) {
+ result += chunk.value;
+ }
+ }
+ return result;
+ } catch (error) {
+ if (error instanceof Error) {
+ throw new Error(`VSCode LM completion error: ${error.message}`)
+ }
+ throw error
+ }
+ }
+}
diff --git a/src/api/transform/__tests__/vscode-lm-format.test.ts b/src/api/transform/__tests__/vscode-lm-format.test.ts
new file mode 100644
index 0000000..eb71578
--- /dev/null
+++ b/src/api/transform/__tests__/vscode-lm-format.test.ts
@@ -0,0 +1,246 @@
+import { Anthropic } from "@anthropic-ai/sdk";
+import * as vscode from 'vscode';
+import { convertToVsCodeLmMessages, convertToAnthropicRole, convertToAnthropicMessage } from '../vscode-lm-format';
+
+// Mock crypto
+const mockCrypto = {
+ randomUUID: () => 'test-uuid'
+};
+global.crypto = mockCrypto as any;
+
+// Define types for our mocked classes
+interface MockLanguageModelTextPart {
+ type: 'text';
+ value: string;
+}
+
+interface MockLanguageModelToolCallPart {
+ type: 'tool_call';
+ callId: string;
+ name: string;
+ input: any;
+}
+
+interface MockLanguageModelToolResultPart {
+ type: 'tool_result';
+ toolUseId: string;
+ parts: MockLanguageModelTextPart[];
+}
+
+type MockMessageContent = MockLanguageModelTextPart | MockLanguageModelToolCallPart | MockLanguageModelToolResultPart;
+
+interface MockLanguageModelChatMessage {
+ role: string;
+ name?: string;
+ content: MockMessageContent[];
+}
+
+// Mock vscode namespace
+jest.mock('vscode', () => {
+ const LanguageModelChatMessageRole = {
+ Assistant: 'assistant',
+ User: 'user'
+ };
+
+ class MockLanguageModelTextPart {
+ type = 'text';
+ constructor(public value: string) {}
+ }
+
+ class MockLanguageModelToolCallPart {
+ type = 'tool_call';
+ constructor(
+ public callId: string,
+ public name: string,
+ public input: any
+ ) {}
+ }
+
+ class MockLanguageModelToolResultPart {
+ type = 'tool_result';
+ constructor(
+ public toolUseId: string,
+ public parts: MockLanguageModelTextPart[]
+ ) {}
+ }
+
+ return {
+ LanguageModelChatMessage: {
+ Assistant: jest.fn((content) => ({
+ role: LanguageModelChatMessageRole.Assistant,
+ name: 'assistant',
+ content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
+ })),
+ User: jest.fn((content) => ({
+ role: LanguageModelChatMessageRole.User,
+ name: 'user',
+ content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
+ }))
+ },
+ LanguageModelChatMessageRole,
+ LanguageModelTextPart: MockLanguageModelTextPart,
+ LanguageModelToolCallPart: MockLanguageModelToolCallPart,
+ LanguageModelToolResultPart: MockLanguageModelToolResultPart
+ };
+});
+
+describe('vscode-lm-format', () => {
+ describe('convertToVsCodeLmMessages', () => {
+ it('should convert simple string messages', () => {
+ const messages: Anthropic.Messages.MessageParam[] = [
+ { role: 'user', content: 'Hello' },
+ { role: 'assistant', content: 'Hi there' }
+ ];
+
+ const result = convertToVsCodeLmMessages(messages);
+
+ expect(result).toHaveLength(2);
+ expect(result[0].role).toBe('user');
+ expect((result[0].content[0] as MockLanguageModelTextPart).value).toBe('Hello');
+ expect(result[1].role).toBe('assistant');
+ expect((result[1].content[0] as MockLanguageModelTextPart).value).toBe('Hi there');
+ });
+
+ it('should handle complex user messages with tool results', () => {
+ const messages: Anthropic.Messages.MessageParam[] = [{
+ role: 'user',
+ content: [
+ { type: 'text', text: 'Here is the result:' },
+ {
+ type: 'tool_result',
+ tool_use_id: 'tool-1',
+ content: 'Tool output'
+ }
+ ]
+ }];
+
+ const result = convertToVsCodeLmMessages(messages);
+
+ expect(result).toHaveLength(1);
+ expect(result[0].role).toBe('user');
+ expect(result[0].content).toHaveLength(2);
+ const [toolResult, textContent] = result[0].content as [MockLanguageModelToolResultPart, MockLanguageModelTextPart];
+ expect(toolResult.type).toBe('tool_result');
+ expect(textContent.type).toBe('text');
+ });
+
+ it('should handle complex assistant messages with tool calls', () => {
+ const messages: Anthropic.Messages.MessageParam[] = [{
+ role: 'assistant',
+ content: [
+ { type: 'text', text: 'Let me help you with that.' },
+ {
+ type: 'tool_use',
+ id: 'tool-1',
+ name: 'calculator',
+ input: { operation: 'add', numbers: [2, 2] }
+ }
+ ]
+ }];
+
+ const result = convertToVsCodeLmMessages(messages);
+
+ expect(result).toHaveLength(1);
+ expect(result[0].role).toBe('assistant');
+ expect(result[0].content).toHaveLength(2);
+ const [toolCall, textContent] = result[0].content as [MockLanguageModelToolCallPart, MockLanguageModelTextPart];
+ expect(toolCall.type).toBe('tool_call');
+ expect(textContent.type).toBe('text');
+ });
+
+ it('should handle image blocks with appropriate placeholders', () => {
+ const messages: Anthropic.Messages.MessageParam[] = [{
+ role: 'user',
+ content: [
+ { type: 'text', text: 'Look at this:' },
+ {
+ type: 'image',
+ source: {
+ type: 'base64',
+ media_type: 'image/png',
+ data: 'base64data'
+ }
+ }
+ ]
+ }];
+
+ const result = convertToVsCodeLmMessages(messages);
+
+ expect(result).toHaveLength(1);
+ const imagePlaceholder = result[0].content[1] as MockLanguageModelTextPart;
+ expect(imagePlaceholder.value).toContain('[Image (base64): image/png not supported by VSCode LM API]');
+ });
+ });
+
+ describe('convertToAnthropicRole', () => {
+ it('should convert assistant role correctly', () => {
+ const result = convertToAnthropicRole('assistant' as any);
+ expect(result).toBe('assistant');
+ });
+
+ it('should convert user role correctly', () => {
+ const result = convertToAnthropicRole('user' as any);
+ expect(result).toBe('user');
+ });
+
+ it('should return null for unknown roles', () => {
+ const result = convertToAnthropicRole('unknown' as any);
+ expect(result).toBeNull();
+ });
+ });
+
+ describe('convertToAnthropicMessage', () => {
+ it('should convert assistant message with text content', async () => {
+ const vsCodeMessage = {
+ role: 'assistant',
+ name: 'assistant',
+ content: [new vscode.LanguageModelTextPart('Hello')]
+ };
+
+ const result = await convertToAnthropicMessage(vsCodeMessage as any);
+
+ expect(result.role).toBe('assistant');
+ expect(result.content).toHaveLength(1);
+ expect(result.content[0]).toEqual({
+ type: 'text',
+ text: 'Hello'
+ });
+ expect(result.id).toBe('test-uuid');
+ });
+
+ it('should convert assistant message with tool calls', async () => {
+ const vsCodeMessage = {
+ role: 'assistant',
+ name: 'assistant',
+ content: [new vscode.LanguageModelToolCallPart(
+ 'call-1',
+ 'calculator',
+ { operation: 'add', numbers: [2, 2] }
+ )]
+ };
+
+ const result = await convertToAnthropicMessage(vsCodeMessage as any);
+
+ expect(result.content).toHaveLength(1);
+ expect(result.content[0]).toEqual({
+ type: 'tool_use',
+ id: 'call-1',
+ name: 'calculator',
+ input: { operation: 'add', numbers: [2, 2] }
+ });
+ expect(result.id).toBe('test-uuid');
+ });
+
+ it('should throw error for non-assistant messages', async () => {
+ const vsCodeMessage = {
+ role: 'user',
+ name: 'user',
+ content: [new vscode.LanguageModelTextPart('Hello')]
+ };
+
+ await expect(convertToAnthropicMessage(vsCodeMessage as any))
+ .rejects
+ .toThrow('Cline : Only assistant messages are supported.');
+ });
+ });
+});
\ No newline at end of file
diff --git a/src/api/transform/vscode-lm-format.ts b/src/api/transform/vscode-lm-format.ts
new file mode 100644
index 0000000..5ccc6e6
--- /dev/null
+++ b/src/api/transform/vscode-lm-format.ts
@@ -0,0 +1,209 @@
+import { Anthropic } from "@anthropic-ai/sdk";
+import * as vscode from 'vscode';
+
+/**
+ * Safely converts a value into a plain object.
+ */
+function asObjectSafe(value: any): object {
+ // Handle null/undefined
+ if (!value) {
+ return {};
+ }
+
+ try {
+ // Handle strings that might be JSON
+ if (typeof value === 'string') {
+ return JSON.parse(value);
+ }
+
+ // Handle pre-existing objects
+ if (typeof value === 'object') {
+ return Object.assign({}, value);
+ }
+
+ return {};
+ }
+ catch (error) {
+ console.warn('Cline : Failed to parse object:', error);
+ return {};
+ }
+}
+
+export function convertToVsCodeLmMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): vscode.LanguageModelChatMessage[] {
+ const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
+
+ for (const anthropicMessage of anthropicMessages) {
+ // Handle simple string messages
+ if (typeof anthropicMessage.content === "string") {
+ vsCodeLmMessages.push(
+ anthropicMessage.role === "assistant"
+ ? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
+ : vscode.LanguageModelChatMessage.User(anthropicMessage.content)
+ );
+ continue;
+ }
+
+ // Handle complex message structures
+ switch (anthropicMessage.role) {
+ case "user": {
+ const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
+ nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
+ toolMessages: Anthropic.ToolResultBlockParam[];
+ }>(
+ (acc, part) => {
+ if (part.type === "tool_result") {
+ acc.toolMessages.push(part);
+ }
+ else if (part.type === "text" || part.type === "image") {
+ acc.nonToolMessages.push(part);
+ }
+ return acc;
+ },
+ { nonToolMessages: [], toolMessages: [] },
+ );
+
+ // Process tool messages first then non-tool messages
+ const contentParts = [
+ // Convert tool messages to ToolResultParts
+ ...toolMessages.map((toolMessage) => {
+ // Process tool result content into TextParts
+ const toolContentParts: vscode.LanguageModelTextPart[] = (
+ typeof toolMessage.content === "string"
+ ? [new vscode.LanguageModelTextPart(toolMessage.content)]
+ : (
+ toolMessage.content?.map((part) => {
+ if (part.type === "image") {
+ return new vscode.LanguageModelTextPart(
+ `[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
+ );
+ }
+ return new vscode.LanguageModelTextPart(part.text);
+ })
+ ?? [new vscode.LanguageModelTextPart("")]
+ )
+ );
+
+ return new vscode.LanguageModelToolResultPart(
+ toolMessage.tool_use_id,
+ toolContentParts
+ );
+ }),
+
+ // Convert non-tool messages to TextParts after tool messages
+ ...nonToolMessages.map((part) => {
+ if (part.type === "image") {
+ return new vscode.LanguageModelTextPart(
+ `[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
+ );
+ }
+ return new vscode.LanguageModelTextPart(part.text);
+ })
+ ];
+
+ // Add single user message with all content parts
+ vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
+ break;
+ }
+
+ case "assistant": {
+ const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
+ nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
+ toolMessages: Anthropic.ToolUseBlockParam[];
+ }>(
+ (acc, part) => {
+ if (part.type === "tool_use") {
+ acc.toolMessages.push(part);
+ }
+ else if (part.type === "text" || part.type === "image") {
+ acc.nonToolMessages.push(part);
+ }
+ return acc;
+ },
+ { nonToolMessages: [], toolMessages: [] },
+ );
+
+ // Process tool messages first then non-tool messages
+ const contentParts = [
+ // Convert tool messages to ToolCallParts first
+ ...toolMessages.map((toolMessage) =>
+ new vscode.LanguageModelToolCallPart(
+ toolMessage.id,
+ toolMessage.name,
+ asObjectSafe(toolMessage.input)
+ )
+ ),
+
+ // Convert non-tool messages to TextParts after tool messages
+ ...nonToolMessages.map((part) => {
+ if (part.type === "image") {
+ return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
+ }
+ return new vscode.LanguageModelTextPart(part.text);
+ })
+ ];
+
+ // Add the assistant message to the list of messages
+ vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
+ break;
+ }
+ }
+ }
+
+ return vsCodeLmMessages;
+}
+
+export function convertToAnthropicRole(vsCodeLmMessageRole: vscode.LanguageModelChatMessageRole): string | null {
+ switch (vsCodeLmMessageRole) {
+ case vscode.LanguageModelChatMessageRole.Assistant:
+ return "assistant";
+ case vscode.LanguageModelChatMessageRole.User:
+ return "user";
+ default:
+ return null;
+ }
+}
+
+export async function convertToAnthropicMessage(vsCodeLmMessage: vscode.LanguageModelChatMessage): Promise {
+ const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
+ if (anthropicRole !== "assistant") {
+ throw new Error("Cline : Only assistant messages are supported.");
+ }
+
+ return {
+ id: crypto.randomUUID(),
+ type: "message",
+ model: "vscode-lm",
+ role: anthropicRole,
+ content: (
+ vsCodeLmMessage.content
+ .map((part): Anthropic.ContentBlock | null => {
+ if (part instanceof vscode.LanguageModelTextPart) {
+ return {
+ type: "text",
+ text: part.value
+ };
+ }
+
+ if (part instanceof vscode.LanguageModelToolCallPart) {
+ return {
+ type: "tool_use",
+ id: part.callId || crypto.randomUUID(),
+ name: part.name,
+ input: asObjectSafe(part.input)
+ };
+ }
+
+ return null;
+ })
+ .filter(
+ (part): part is Anthropic.ContentBlock => part !== null
+ )
+ ),
+ stop_reason: null,
+ stop_sequence: null,
+ usage: {
+ input_tokens: 0,
+ output_tokens: 0,
+ }
+ };
+}
diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts
index 3c8ce13..a648f8e 100644
--- a/src/core/webview/ClineProvider.ts
+++ b/src/core/webview/ClineProvider.ts
@@ -93,6 +93,7 @@ type GlobalStateKey =
| "requestDelaySeconds"
| "currentApiConfigName"
| "listApiConfigMeta"
+ | "vsCodeLmModelSelector"
| "mode"
| "modeApiConfigs"
| "customPrompts"
@@ -571,8 +572,12 @@ export class ClineProvider implements vscode.WebviewViewProvider {
const lmStudioModels = await this.getLmStudioModels(message.text)
this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels })
break
+ case "requestVsCodeLmModels":
+ const vsCodeLmModels = await this.getVsCodeLmModels()
+ this.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
+ break
case "refreshGlamaModels":
- await this.refreshGlamaModels()
+ await this.refreshGlamaModels()
break
case "refreshOpenRouterModels":
await this.refreshOpenRouterModels()
@@ -1109,6 +1114,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
+ vsCodeLmModelSelector,
} = apiConfiguration
await this.updateGlobalState("apiProvider", apiProvider)
await this.updateGlobalState("apiModelId", apiModelId)
@@ -1140,6 +1146,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.updateGlobalState("openRouterModelId", openRouterModelId)
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
+ await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
if (this.cline) {
this.cline.api = buildApiHandler(apiConfiguration)
}
@@ -1210,6 +1217,17 @@ export class ClineProvider implements vscode.WebviewViewProvider {
}
}
+ // VSCode LM API
+ private async getVsCodeLmModels() {
+ try {
+ const models = await vscode.lm.selectChatModels({});
+ return models || [];
+ } catch (error) {
+ console.error('Error fetching VS Code LM models:', error);
+ return [];
+ }
+ }
+
// OpenAi
async getOpenAiModels(baseUrl?: string, apiKey?: string) {
@@ -1268,6 +1286,33 @@ export class ClineProvider implements vscode.WebviewViewProvider {
return cacheDir
}
+ async handleGlamaCallback(code: string) {
+ let apiKey: string
+ try {
+ const response = await axios.post("https://glama.ai/api/gateway/v1/auth/exchange-code", { code })
+ if (response.data && response.data.apiKey) {
+ apiKey = response.data.apiKey
+ } else {
+ throw new Error("Invalid response from Glama API")
+ }
+ } catch (error) {
+ console.error("Error exchanging code for API key:", error)
+ throw error
+ }
+
+ const glama: ApiProvider = "glama"
+ await this.updateGlobalState("apiProvider", glama)
+ await this.storeSecret("glamaApiKey", apiKey)
+ await this.postStateToWebview()
+ if (this.cline) {
+ this.cline.api = buildApiHandler({
+ apiProvider: glama,
+ glamaApiKey: apiKey,
+ })
+ }
+ // await this.postMessageToWebview({ type: "action", action: "settingsButtonClicked" }) // bad ux if user is on welcome
+ }
+
async readGlamaModels(): Promise | undefined> {
const glamaModelsFilePath = path.join(
await this.ensureCacheDirectoryExists(),
@@ -1742,6 +1787,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
requestDelaySeconds,
currentApiConfigName,
listApiConfigMeta,
+ vsCodeLmModelSelector,
mode,
modeApiConfigs,
customPrompts,
@@ -1800,6 +1846,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
this.getGlobalState("requestDelaySeconds") as Promise,
this.getGlobalState("currentApiConfigName") as Promise,
this.getGlobalState("listApiConfigMeta") as Promise,
+ this.getGlobalState("vsCodeLmModelSelector") as Promise,
this.getGlobalState("mode") as Promise,
this.getGlobalState("modeApiConfigs") as Promise | undefined>,
this.getGlobalState("customPrompts") as Promise,
@@ -1852,6 +1899,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
+ vsCodeLmModelSelector,
},
lastShownAnnouncementId,
customInstructions,
diff --git a/src/exports/README.md b/src/exports/README.md
index 40f909a..03b8983 100644
--- a/src/exports/README.md
+++ b/src/exports/README.md
@@ -7,7 +7,7 @@ The Cline extension exposes an API that can be used by other extensions. To use
3. Get access to the API with the following code:
```ts
- const clineExtension = vscode.extensions.getExtension("saoudrizwan.claude-dev")
+ const clineExtension = vscode.extensions.getExtension("rooveterinaryinc.roo-cline")
if (!clineExtension?.isActive) {
throw new Error("Cline extension is not activated")
@@ -44,11 +44,11 @@ The Cline extension exposes an API that can be used by other extensions. To use
}
```
- **Note:** To ensure that the `saoudrizwan.claude-dev` extension is activated before your extension, add it to the `extensionDependencies` in your `package.json`:
+ **Note:** To ensure that the `rooveterinaryinc.roo-cline` extension is activated before your extension, add it to the `extensionDependencies` in your `package.json`:
```json
"extensionDependencies": [
- "saoudrizwan.claude-dev"
+ "rooveterinaryinc.roo-cline"
]
```
diff --git a/src/extension.ts b/src/extension.ts
index c6dd9c2..31ba8a7 100644
--- a/src/extension.ts
+++ b/src/extension.ts
@@ -139,6 +139,14 @@ export function activate(context: vscode.ExtensionContext) {
return
}
switch (path) {
+ case "/glama": {
+ const code = query.get("code")
+ if (code) {
+ await visibleProvider.handleGlamaCallback(code)
+ }
+ break
+ }
+
case "/openrouter": {
const code = query.get("code")
if (code) {
diff --git a/src/integrations/theme/getTheme.ts b/src/integrations/theme/getTheme.ts
index ffed26e..dbc7a0f 100644
--- a/src/integrations/theme/getTheme.ts
+++ b/src/integrations/theme/getTheme.ts
@@ -141,5 +141,5 @@ export function mergeJson(
}
function getExtensionUri(): vscode.Uri {
- return vscode.extensions.getExtension("saoudrizwan.claude-dev")!.extensionUri
+ return vscode.extensions.getExtension("rooveterinaryinc.roo-cline")!.extensionUri
}
diff --git a/src/shared/ExtensionMessage.ts b/src/shared/ExtensionMessage.ts
index e09a1cc..190321b 100644
--- a/src/shared/ExtensionMessage.ts
+++ b/src/shared/ExtensionMessage.ts
@@ -25,6 +25,9 @@ export interface ExtensionMessage {
| "enhancedPrompt"
| "commitSearchResults"
| "listApiConfig"
+ | "vsCodeLmModels"
+ | "vsCodeLmApiAvailable"
+ | "requestVsCodeLmModels"
| "updatePrompt"
| "systemPrompt"
text?: string
@@ -40,6 +43,7 @@ export interface ExtensionMessage {
images?: string[]
ollamaModels?: string[]
lmStudioModels?: string[]
+ vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[]
filePaths?: string[]
partialMessage?: ClineMessage
glamaModels?: Record
diff --git a/src/shared/WebviewMessage.ts b/src/shared/WebviewMessage.ts
index 5960512..9d6905f 100644
--- a/src/shared/WebviewMessage.ts
+++ b/src/shared/WebviewMessage.ts
@@ -61,9 +61,11 @@ export interface WebviewMessage {
| "terminalOutputLineLimit"
| "mcpEnabled"
| "searchCommits"
+ | "refreshGlamaModels"
| "alwaysApproveResubmit"
| "requestDelaySeconds"
| "setApiConfigPassword"
+ | "requestVsCodeLmModels"
| "mode"
| "updatePrompt"
| "updateEnhancedPrompt"
diff --git a/src/shared/__tests__/checkExistApiConfig.test.ts b/src/shared/__tests__/checkExistApiConfig.test.ts
new file mode 100644
index 0000000..13b64f5
--- /dev/null
+++ b/src/shared/__tests__/checkExistApiConfig.test.ts
@@ -0,0 +1,56 @@
+import { checkExistKey } from '../checkExistApiConfig';
+import { ApiConfiguration } from '../api';
+
+describe('checkExistKey', () => {
+ it('should return false for undefined config', () => {
+ expect(checkExistKey(undefined)).toBe(false);
+ });
+
+ it('should return false for empty config', () => {
+ const config: ApiConfiguration = {};
+ expect(checkExistKey(config)).toBe(false);
+ });
+
+ it('should return true when one key is defined', () => {
+ const config: ApiConfiguration = {
+ apiKey: 'test-key'
+ };
+ expect(checkExistKey(config)).toBe(true);
+ });
+
+ it('should return true when multiple keys are defined', () => {
+ const config: ApiConfiguration = {
+ apiKey: 'test-key',
+ glamaApiKey: 'glama-key',
+ openRouterApiKey: 'openrouter-key'
+ };
+ expect(checkExistKey(config)).toBe(true);
+ });
+
+ it('should return true when only non-key fields are undefined', () => {
+ const config: ApiConfiguration = {
+ apiKey: 'test-key',
+ apiProvider: undefined,
+ anthropicBaseUrl: undefined
+ };
+ expect(checkExistKey(config)).toBe(true);
+ });
+
+ it('should return false when all key fields are undefined', () => {
+ const config: ApiConfiguration = {
+ apiKey: undefined,
+ glamaApiKey: undefined,
+ openRouterApiKey: undefined,
+ awsRegion: undefined,
+ vertexProjectId: undefined,
+ openAiApiKey: undefined,
+ ollamaModelId: undefined,
+ lmStudioModelId: undefined,
+ geminiApiKey: undefined,
+ openAiNativeApiKey: undefined,
+ deepSeekApiKey: undefined,
+ vsCodeLmModelSelector: undefined
+ };
+ expect(checkExistKey(config)).toBe(false);
+ });
+});
\ No newline at end of file
diff --git a/src/shared/__tests__/vsCodeSelectorUtils.test.ts b/src/shared/__tests__/vsCodeSelectorUtils.test.ts
new file mode 100644
index 0000000..dd4ed38
--- /dev/null
+++ b/src/shared/__tests__/vsCodeSelectorUtils.test.ts
@@ -0,0 +1,44 @@
+import { stringifyVsCodeLmModelSelector, SELECTOR_SEPARATOR } from '../vsCodeSelectorUtils';
+import { LanguageModelChatSelector } from 'vscode';
+
+describe('vsCodeSelectorUtils', () => {
+ describe('stringifyVsCodeLmModelSelector', () => {
+ it('should join all defined selector properties with separator', () => {
+ const selector: LanguageModelChatSelector = {
+ vendor: 'test-vendor',
+ family: 'test-family',
+ version: 'v1',
+ id: 'test-id'
+ };
+
+ const result = stringifyVsCodeLmModelSelector(selector);
+ expect(result).toBe('test-vendor/test-family/v1/test-id');
+ });
+
+ it('should skip undefined properties', () => {
+ const selector: LanguageModelChatSelector = {
+ vendor: 'test-vendor',
+ family: 'test-family'
+ };
+
+ const result = stringifyVsCodeLmModelSelector(selector);
+ expect(result).toBe('test-vendor/test-family');
+ });
+
+ it('should handle empty selector', () => {
+ const selector: LanguageModelChatSelector = {};
+
+ const result = stringifyVsCodeLmModelSelector(selector);
+ expect(result).toBe('');
+ });
+
+ it('should handle selector with only one property', () => {
+ const selector: LanguageModelChatSelector = {
+ vendor: 'test-vendor'
+ };
+
+ const result = stringifyVsCodeLmModelSelector(selector);
+ expect(result).toBe('test-vendor');
+ });
+ });
+});
\ No newline at end of file
diff --git a/src/shared/api.ts b/src/shared/api.ts
index 857dd92..908ffec 100644
--- a/src/shared/api.ts
+++ b/src/shared/api.ts
@@ -1,3 +1,5 @@
+import * as vscode from 'vscode';
+
export type ApiProvider =
| "anthropic"
| "glama"
@@ -10,11 +12,13 @@ export type ApiProvider =
| "gemini"
| "openai-native"
| "deepseek"
+ | "vscode-lm"
export interface ApiHandlerOptions {
apiModelId?: string
apiKey?: string // anthropic
anthropicBaseUrl?: string
+ vsCodeLmModelSelector?: vscode.LanguageModelChatSelector
glamaModelId?: string
glamaModelInfo?: ModelInfo
glamaApiKey?: string
@@ -58,7 +62,7 @@ export type ApiConfiguration = ApiHandlerOptions & {
export interface ModelInfo {
maxTokens?: number
- contextWindow?: number
+ contextWindow: number
supportsImages?: boolean
supportsComputerUse?: boolean
supportsPromptCache: boolean // this value is hardcoded for now
diff --git a/src/shared/checkExistApiConfig.ts b/src/shared/checkExistApiConfig.ts
index b347ccf..c876e81 100644
--- a/src/shared/checkExistApiConfig.ts
+++ b/src/shared/checkExistApiConfig.ts
@@ -13,7 +13,8 @@ export function checkExistKey(config: ApiConfiguration | undefined) {
config.lmStudioModelId,
config.geminiApiKey,
config.openAiNativeApiKey,
- config.deepSeekApiKey
+ config.deepSeekApiKey,
+ config.vsCodeLmModelSelector,
].some((key) => key !== undefined)
: false;
}
diff --git a/src/shared/vsCodeSelectorUtils.ts b/src/shared/vsCodeSelectorUtils.ts
new file mode 100644
index 0000000..a54d63f
--- /dev/null
+++ b/src/shared/vsCodeSelectorUtils.ts
@@ -0,0 +1,14 @@
+import { LanguageModelChatSelector } from 'vscode';
+
+export const SELECTOR_SEPARATOR = '/';
+
+export function stringifyVsCodeLmModelSelector(selector: LanguageModelChatSelector): string {
+ return [
+ selector.vendor,
+ selector.family,
+ selector.version,
+ selector.id
+ ]
+ .filter(Boolean)
+ .join(SELECTOR_SEPARATOR);
+}
diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx
index 51ba9bf..dfee51a 100644
--- a/webview-ui/src/components/settings/ApiOptions.tsx
+++ b/webview-ui/src/components/settings/ApiOptions.tsx
@@ -4,7 +4,7 @@ import {
VSCodeLink,
VSCodeRadio,
VSCodeRadioGroup,
- VSCodeTextField,
+ VSCodeTextField
} from "@vscode/webview-ui-toolkit/react"
import { Fragment, memo, useCallback, useEffect, useMemo, useState } from "react"
import { useEvent, useInterval } from "react-use"
@@ -33,6 +33,7 @@ import {
import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage"
import { useExtensionState } from "../../context/ExtensionStateContext"
import { vscode } from "../../utils/vscode"
+import * as vscodemodels from "vscode"
import VSCodeButtonLink from "../common/VSCodeButtonLink"
import OpenRouterModelPicker, {
ModelDescriptionMarkdown,
@@ -50,6 +51,7 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
const { apiConfiguration, setApiConfiguration, uriScheme, onUpdateApiConfig } = useExtensionState()
const [ollamaModels, setOllamaModels] = useState([])
const [lmStudioModels, setLmStudioModels] = useState([])
+ const [vsCodeLmModels, setVsCodeLmModels] = useState([])
const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl)
const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion)
const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false)
@@ -70,21 +72,24 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl })
} else if (selectedProvider === "lmstudio") {
vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl })
+ } else if (selectedProvider === "vscode-lm") {
+ vscode.postMessage({ type: "requestVsCodeLmModels" })
}
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl])
useEffect(() => {
- if (selectedProvider === "ollama" || selectedProvider === "lmstudio") {
+ if (selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm") {
requestLocalModels()
}
}, [selectedProvider, requestLocalModels])
- useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" ? 2000 : null)
-
+ useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm" ? 2000 : null)
const handleMessage = useCallback((event: MessageEvent) => {
const message: ExtensionMessage = event.data
if (message.type === "ollamaModels" && message.ollamaModels) {
setOllamaModels(message.ollamaModels)
} else if (message.type === "lmStudioModels" && message.lmStudioModels) {
setLmStudioModels(message.lmStudioModels)
+ } else if (message.type === "vsCodeLmModels" && message.vsCodeLmModels) {
+ setVsCodeLmModels(message.vsCodeLmModels)
}
}, [])
useEvent("message", handleMessage)
@@ -139,6 +144,7 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
{ value: "vertex", label: "GCP Vertex AI" },
{ value: "bedrock", label: "AWS Bedrock" },
{ value: "glama", label: "Glama" },
+ { value: "vscode-lm", label: "VS Code LM API" },
{ value: "lmstudio", label: "LM Studio" },
{ value: "ollama", label: "Ollama" }
]}
@@ -206,11 +212,12 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
Glama API Key
{!apiConfiguration?.glamaApiKey && (
-
- You can get an Glama API key by signing up here.
-
+
+ Get Glama API Key
+
)}
)}
+ {selectedProvider === "vscode-lm" && (
+
+
+
+ {vsCodeLmModels.length > 0 ? (
+ {
+ const valueStr = (value as DropdownOption).value;
+ const [vendor, family] = valueStr.split('/');
+ setApiConfiguration({
+ ...apiConfiguration,
+ vsCodeLmModelSelector: valueStr ? { vendor, family } : undefined
+ });
+ }}
+ style={{ width: "100%" }}
+ options={[
+ { value: "", label: "Select a model..." },
+ ...vsCodeLmModels.map((model) => ({
+ value: `${model.vendor}/${model.family}`,
+ label: `${model.vendor} - ${model.family}`
+ }))
+ ]}
+ />
+ ) : (
+
+ The VS Code Language Model API allows you to run models provided by other VS Code extensions (including but not limited to GitHub Copilot).
+ The easiest way to get started is to install the Copilot and Copilot Chat extensions from the VS Code Marketplace.
+
+ )}
+
+
+ Note: This is a very experimental integration and may not work as expected. Please report any issues to the Roo-Cline GitHub repository.
+
+
+
+ )}
+
{selectedProvider === "ollama" && (
{
@@ -932,6 +998,17 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
selectedModelId: apiConfiguration?.lmStudioModelId || "",
selectedModelInfo: openAiModelInfoSaneDefaults,
}
+ case "vscode-lm":
+ return {
+ selectedProvider: provider,
+ selectedModelId: apiConfiguration?.vsCodeLmModelSelector ?
+ `${apiConfiguration.vsCodeLmModelSelector.vendor}/${apiConfiguration.vsCodeLmModelSelector.family}` :
+ "",
+ selectedModelInfo: {
+ ...openAiModelInfoSaneDefaults,
+ supportsImages: false, // VSCode LM API currently doesn't support images
+ },
+ }
default:
return getProviderData(anthropicModels, anthropicDefaultModelId)
}
diff --git a/webview-ui/src/utils/validate.ts b/webview-ui/src/utils/validate.ts
index 2ddc46d..ccb1551 100644
--- a/webview-ui/src/utils/validate.ts
+++ b/webview-ui/src/utils/validate.ts
@@ -57,6 +57,11 @@ export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): s
return "You must provide a valid model ID."
}
break
+ case "vscode-lm":
+ if (!apiConfiguration.vsCodeLmModelSelector) {
+ return "You must provide a valid model selector."
+ }
+ break
}
}
return undefined
|