Merge remote-tracking branch 'origin/main' into fix/roo-cline-select-api-config

This commit is contained in:
Matt Rubens
2025-01-15 20:11:17 -05:00
25 changed files with 1753 additions and 43 deletions

View File

@@ -11,6 +11,7 @@ import { LmStudioHandler } from "./providers/lmstudio"
import { GeminiHandler } from "./providers/gemini"
import { OpenAiNativeHandler } from "./providers/openai-native"
import { DeepSeekHandler } from "./providers/deepseek"
import { VsCodeLmHandler } from "./providers/vscode-lm"
import { ApiStream } from "./transform/stream"
export interface SingleCompletionHandler {

View File

@@ -60,6 +60,13 @@ jest.mock('openai', () => {
describe('OpenAiNativeHandler', () => {
let handler: OpenAiNativeHandler;
let mockOptions: ApiHandlerOptions;
const systemPrompt = 'You are a helpful assistant.';
const messages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: 'Hello!'
}
];
beforeEach(() => {
mockOptions = {
@@ -86,14 +93,6 @@ describe('OpenAiNativeHandler', () => {
});
describe('createMessage', () => {
const systemPrompt = 'You are a helpful assistant.';
const messages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: 'Hello!'
}
];
it('should handle streaming responses', async () => {
const stream = handler.createMessage(systemPrompt, messages);
const chunks: any[] = [];
@@ -109,15 +108,126 @@ describe('OpenAiNativeHandler', () => {
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
const stream = handler.createMessage(systemPrompt, messages);
await expect(async () => {
for await (const chunk of stream) {
// Should not reach here
}
}).rejects.toThrow('API Error');
});
it('should handle missing content in response for o1 model', async () => {
// Use o1 model which supports developer role
handler = new OpenAiNativeHandler({
...mockOptions,
apiModelId: 'o1'
});
mockCreate.mockResolvedValueOnce({
choices: [{ message: { content: null } }],
usage: {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0
}
});
const generator = handler.createMessage(systemPrompt, messages);
const results = [];
for await (const result of generator) {
results.push(result);
}
expect(results).toEqual([
{ type: 'text', text: '' },
{ type: 'usage', inputTokens: 0, outputTokens: 0 }
]);
// Verify developer role is used for system prompt with o1 model
expect(mockCreate).toHaveBeenCalledWith({
model: 'o1',
messages: [
{ role: 'developer', content: systemPrompt },
{ role: 'user', content: 'Hello!' }
]
});
});
});
describe('streaming models', () => {
beforeEach(() => {
handler = new OpenAiNativeHandler({
...mockOptions,
apiModelId: 'gpt-4o',
});
});
it('should handle streaming response', async () => {
const mockStream = [
{ choices: [{ delta: { content: 'Hello' } }], usage: null },
{ choices: [{ delta: { content: ' there' } }], usage: null },
{ choices: [{ delta: { content: '!' } }], usage: { prompt_tokens: 10, completion_tokens: 5 } },
];
mockCreate.mockResolvedValueOnce(
(async function* () {
for (const chunk of mockStream) {
yield chunk;
}
})()
);
const generator = handler.createMessage(systemPrompt, messages);
const results = [];
for await (const result of generator) {
results.push(result);
}
expect(results).toEqual([
{ type: 'text', text: 'Hello' },
{ type: 'text', text: ' there' },
{ type: 'text', text: '!' },
{ type: 'usage', inputTokens: 10, outputTokens: 5 },
]);
expect(mockCreate).toHaveBeenCalledWith({
model: 'gpt-4o',
temperature: 0,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: 'Hello!' },
],
stream: true,
stream_options: { include_usage: true },
});
});
it('should handle empty delta content', async () => {
const mockStream = [
{ choices: [{ delta: {} }], usage: null },
{ choices: [{ delta: { content: null } }], usage: null },
{ choices: [{ delta: { content: 'Hello' } }], usage: { prompt_tokens: 10, completion_tokens: 5 } },
];
mockCreate.mockResolvedValueOnce(
(async function* () {
for (const chunk of mockStream) {
yield chunk;
}
})()
);
const generator = handler.createMessage(systemPrompt, messages);
const results = [];
for await (const result of generator) {
results.push(result);
}
expect(results).toEqual([
{ type: 'text', text: 'Hello' },
{ type: 'usage', inputTokens: 10, outputTokens: 5 },
]);
});
});
describe('completePrompt', () => {
@@ -206,4 +316,4 @@ describe('OpenAiNativeHandler', () => {
expect(modelInfo.info).toBeDefined();
});
});
});
});

View File

@@ -0,0 +1,289 @@
import * as vscode from 'vscode';
import { VsCodeLmHandler } from '../vscode-lm';
import { ApiHandlerOptions } from '../../../shared/api';
import { Anthropic } from '@anthropic-ai/sdk';
// Mock vscode namespace
jest.mock('vscode', () => {
class MockLanguageModelTextPart {
type = 'text';
constructor(public value: string) {}
}
class MockLanguageModelToolCallPart {
type = 'tool_call';
constructor(
public callId: string,
public name: string,
public input: any
) {}
}
return {
workspace: {
onDidChangeConfiguration: jest.fn((callback) => ({
dispose: jest.fn()
}))
},
CancellationTokenSource: jest.fn(() => ({
token: {
isCancellationRequested: false,
onCancellationRequested: jest.fn()
},
cancel: jest.fn(),
dispose: jest.fn()
})),
CancellationError: class CancellationError extends Error {
constructor() {
super('Operation cancelled');
this.name = 'CancellationError';
}
},
LanguageModelChatMessage: {
Assistant: jest.fn((content) => ({
role: 'assistant',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
})),
User: jest.fn((content) => ({
role: 'user',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
}))
},
LanguageModelTextPart: MockLanguageModelTextPart,
LanguageModelToolCallPart: MockLanguageModelToolCallPart,
lm: {
selectChatModels: jest.fn()
}
};
});
const mockLanguageModelChat = {
id: 'test-model',
name: 'Test Model',
vendor: 'test-vendor',
family: 'test-family',
version: '1.0',
maxInputTokens: 4096,
sendRequest: jest.fn(),
countTokens: jest.fn()
};
describe('VsCodeLmHandler', () => {
let handler: VsCodeLmHandler;
const defaultOptions: ApiHandlerOptions = {
vsCodeLmModelSelector: {
vendor: 'test-vendor',
family: 'test-family'
}
};
beforeEach(() => {
jest.clearAllMocks();
handler = new VsCodeLmHandler(defaultOptions);
});
afterEach(() => {
handler.dispose();
});
describe('constructor', () => {
it('should initialize with provided options', () => {
expect(handler).toBeDefined();
expect(vscode.workspace.onDidChangeConfiguration).toHaveBeenCalled();
});
it('should handle configuration changes', () => {
const callback = (vscode.workspace.onDidChangeConfiguration as jest.Mock).mock.calls[0][0];
callback({ affectsConfiguration: () => true });
// Should reset client when config changes
expect(handler['client']).toBeNull();
});
});
describe('createClient', () => {
it('should create client with selector', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
const client = await handler['createClient']({
vendor: 'test-vendor',
family: 'test-family'
});
expect(client).toBeDefined();
expect(client.id).toBe('test-model');
expect(vscode.lm.selectChatModels).toHaveBeenCalledWith({
vendor: 'test-vendor',
family: 'test-family'
});
});
it('should return default client when no models available', async () => {
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([]);
const client = await handler['createClient']({});
expect(client).toBeDefined();
expect(client.id).toBe('default-lm');
expect(client.vendor).toBe('vscode');
});
});
describe('createMessage', () => {
beforeEach(() => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
mockLanguageModelChat.countTokens.mockResolvedValue(10);
});
it('should stream text responses', async () => {
const systemPrompt = 'You are a helpful assistant';
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user' as const,
content: 'Hello'
}];
const responseText = 'Hello! How can I help you?';
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelTextPart(responseText);
return;
})(),
text: (async function* () {
yield responseText;
return;
})()
});
const stream = handler.createMessage(systemPrompt, messages);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks).toHaveLength(2); // Text chunk + usage chunk
expect(chunks[0]).toEqual({
type: 'text',
text: responseText
});
expect(chunks[1]).toMatchObject({
type: 'usage',
inputTokens: expect.any(Number),
outputTokens: expect.any(Number)
});
});
it('should handle tool calls', async () => {
const systemPrompt = 'You are a helpful assistant';
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user' as const,
content: 'Calculate 2+2'
}];
const toolCallData = {
name: 'calculator',
arguments: { operation: 'add', numbers: [2, 2] },
callId: 'call-1'
};
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelToolCallPart(
toolCallData.callId,
toolCallData.name,
toolCallData.arguments
);
return;
})(),
text: (async function* () {
yield JSON.stringify({ type: 'tool_call', ...toolCallData });
return;
})()
});
const stream = handler.createMessage(systemPrompt, messages);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks).toHaveLength(2); // Tool call chunk + usage chunk
expect(chunks[0]).toEqual({
type: 'text',
text: JSON.stringify({ type: 'tool_call', ...toolCallData })
});
});
it('should handle errors', async () => {
const systemPrompt = 'You are a helpful assistant';
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user' as const,
content: 'Hello'
}];
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('API Error'));
await expect(async () => {
const stream = handler.createMessage(systemPrompt, messages);
for await (const _ of stream) {
// consume stream
}
}).rejects.toThrow('API Error');
});
});
describe('getModel', () => {
it('should return model info when client exists', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
// Initialize client
await handler['getClient']();
const model = handler.getModel();
expect(model.id).toBe('test-model');
expect(model.info).toBeDefined();
expect(model.info.contextWindow).toBe(4096);
});
it('should return fallback model info when no client exists', () => {
const model = handler.getModel();
expect(model.id).toBe('test-vendor/test-family');
expect(model.info).toBeDefined();
});
});
describe('completePrompt', () => {
it('should complete single prompt', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
const responseText = 'Completed text';
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelTextPart(responseText);
return;
})(),
text: (async function* () {
yield responseText;
return;
})()
});
const result = await handler.completePrompt('Test prompt');
expect(result).toBe(responseText);
expect(mockLanguageModelChat.sendRequest).toHaveBeenCalled();
});
it('should handle errors during completion', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('Completion failed'));
await expect(handler.completePrompt('Test prompt'))
.rejects
.toThrow('VSCode LM completion error: Completion failed');
});
});
});

View File

@@ -23,14 +23,16 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
}
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
switch (this.getModel().id) {
const modelId = this.getModel().id
switch (modelId) {
case "o1":
case "o1-preview":
case "o1-mini": {
// o1 doesnt support streaming, non-1 temp, or system prompt
// o1-preview and o1-mini don't support streaming, non-1 temp, or system prompt
// o1 doesnt support streaming or non-1 temp but does support a developer prompt
const response = await this.client.chat.completions.create({
model: this.getModel().id,
messages: [{ role: "user", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
model: modelId,
messages: [{ role: modelId === "o1" ? "developer" : "user", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
})
yield {
type: "text",
@@ -93,7 +95,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
case "o1":
case "o1-preview":
case "o1-mini":
// o1 doesn't support non-1 temp or system prompt
// o1 doesn't support non-1 temp
requestOptions = {
model: modelId,
messages: [{ role: "user", content: prompt }]

View File

@@ -0,0 +1,564 @@
import { Anthropic } from "@anthropic-ai/sdk";
import * as vscode from 'vscode';
import { ApiHandler, SingleCompletionHandler } from "../";
import { calculateApiCost } from "../../utils/cost";
import { ApiStream } from "../transform/stream";
import { convertToVsCodeLmMessages } from "../transform/vscode-lm-format";
import { SELECTOR_SEPARATOR, stringifyVsCodeLmModelSelector } from "../../shared/vsCodeSelectorUtils";
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api";
/**
* Handles interaction with VS Code's Language Model API for chat-based operations.
* This handler implements the ApiHandler interface to provide VS Code LM specific functionality.
*
* @implements {ApiHandler}
*
* @remarks
* The handler manages a VS Code language model chat client and provides methods to:
* - Create and manage chat client instances
* - Stream messages using VS Code's Language Model API
* - Retrieve model information
*
* @example
* ```typescript
* const options = {
* vsCodeLmModelSelector: { vendor: "copilot", family: "gpt-4" }
* };
* const handler = new VsCodeLmHandler(options);
*
* // Stream a conversation
* const systemPrompt = "You are a helpful assistant";
* const messages = [{ role: "user", content: "Hello!" }];
* for await (const chunk of handler.createMessage(systemPrompt, messages)) {
* console.log(chunk);
* }
* ```
*/
export class VsCodeLmHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions;
private client: vscode.LanguageModelChat | null;
private disposable: vscode.Disposable | null;
private currentRequestCancellation: vscode.CancellationTokenSource | null;
constructor(options: ApiHandlerOptions) {
this.options = options;
this.client = null;
this.disposable = null;
this.currentRequestCancellation = null;
try {
// Listen for model changes and reset client
this.disposable = vscode.workspace.onDidChangeConfiguration(event => {
if (event.affectsConfiguration('lm')) {
try {
this.client = null;
this.ensureCleanState();
}
catch (error) {
console.error('Error during configuration change cleanup:', error);
}
}
});
}
catch (error) {
// Ensure cleanup if constructor fails
this.dispose();
throw new Error(
`Cline <Language Model API>: Failed to initialize handler: ${error instanceof Error ? error.message : 'Unknown error'}`
);
}
}
/**
* Creates a language model chat client based on the provided selector.
*
* @param selector - Selector criteria to filter language model chat instances
* @returns Promise resolving to the first matching language model chat instance
* @throws Error when no matching models are found with the given selector
*
* @example
* const selector = { vendor: "copilot", family: "gpt-4o" };
* const chatClient = await createClient(selector);
*/
async createClient(selector: vscode.LanguageModelChatSelector): Promise<vscode.LanguageModelChat> {
try {
const models = await vscode.lm.selectChatModels(selector);
// Use first available model or create a minimal model object
if (models && Array.isArray(models) && models.length > 0) {
return models[0];
}
// Create a minimal model if no models are available
return {
id: 'default-lm',
name: 'Default Language Model',
vendor: 'vscode',
family: 'lm',
version: '1.0',
maxInputTokens: 8192,
sendRequest: async (messages, options, token) => {
// Provide a minimal implementation
return {
stream: (async function* () {
yield new vscode.LanguageModelTextPart(
"Language model functionality is limited. Please check VS Code configuration."
);
})(),
text: (async function* () {
yield "Language model functionality is limited. Please check VS Code configuration.";
})()
};
},
countTokens: async () => 0
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
throw new Error(`Cline <Language Model API>: Failed to select model: ${errorMessage}`);
}
}
/**
* Creates and streams a message using the VS Code Language Model API.
*
* @param systemPrompt - The system prompt to initialize the conversation context
* @param messages - An array of message parameters following the Anthropic message format
*
* @yields {ApiStream} An async generator that yields either text chunks or tool calls from the model response
*
* @throws {Error} When vsCodeLmModelSelector option is not provided
* @throws {Error} When the response stream encounters an error
*
* @remarks
* This method handles the initialization of the VS Code LM client if not already created,
* converts the messages to VS Code LM format, and streams the response chunks.
* Tool calls handling is currently a work in progress.
*/
dispose(): void {
if (this.disposable) {
this.disposable.dispose();
}
if (this.currentRequestCancellation) {
this.currentRequestCancellation.cancel();
this.currentRequestCancellation.dispose();
}
}
private async countTokens(text: string | vscode.LanguageModelChatMessage): Promise<number> {
// Check for required dependencies
if (!this.client) {
console.warn('Cline <Language Model API>: No client available for token counting');
return 0;
}
if (!this.currentRequestCancellation) {
console.warn('Cline <Language Model API>: No cancellation token available for token counting');
return 0;
}
// Validate input
if (!text) {
console.debug('Cline <Language Model API>: Empty text provided for token counting');
return 0;
}
try {
// Handle different input types
let tokenCount: number;
if (typeof text === 'string') {
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
} else if (text instanceof vscode.LanguageModelChatMessage) {
// For chat messages, ensure we have content
if (!text.content || (Array.isArray(text.content) && text.content.length === 0)) {
console.debug('Cline <Language Model API>: Empty chat message content');
return 0;
}
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
} else {
console.warn('Cline <Language Model API>: Invalid input type for token counting');
return 0;
}
// Validate the result
if (typeof tokenCount !== 'number') {
console.warn('Cline <Language Model API>: Non-numeric token count received:', tokenCount);
return 0;
}
if (tokenCount < 0) {
console.warn('Cline <Language Model API>: Negative token count received:', tokenCount);
return 0;
}
return tokenCount;
}
catch (error) {
// Handle specific error types
if (error instanceof vscode.CancellationError) {
console.debug('Cline <Language Model API>: Token counting cancelled by user');
return 0;
}
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
console.warn('Cline <Language Model API>: Token counting failed:', errorMessage);
// Log additional error details if available
if (error instanceof Error && error.stack) {
console.debug('Token counting error stack:', error.stack);
}
return 0; // Fallback to prevent stream interruption
}
}
private async calculateTotalInputTokens(systemPrompt: string, vsCodeLmMessages: vscode.LanguageModelChatMessage[]): Promise<number> {
const systemTokens: number = await this.countTokens(systemPrompt);
const messageTokens: number[] = await Promise.all(
vsCodeLmMessages.map(msg => this.countTokens(msg))
);
return systemTokens + messageTokens.reduce(
(sum: number, tokens: number): number => sum + tokens, 0
);
}
private ensureCleanState(): void {
if (this.currentRequestCancellation) {
this.currentRequestCancellation.cancel();
this.currentRequestCancellation.dispose();
this.currentRequestCancellation = null;
}
}
private async getClient(): Promise<vscode.LanguageModelChat> {
if (!this.client) {
console.debug('Cline <Language Model API>: Getting client with options:', {
vsCodeLmModelSelector: this.options.vsCodeLmModelSelector,
hasOptions: !!this.options,
selectorKeys: this.options.vsCodeLmModelSelector ? Object.keys(this.options.vsCodeLmModelSelector) : []
});
try {
// Use default empty selector if none provided to get all available models
const selector = this.options?.vsCodeLmModelSelector || {};
console.debug('Cline <Language Model API>: Creating client with selector:', selector);
this.client = await this.createClient(selector);
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error';
console.error('Cline <Language Model API>: Client creation failed:', message);
throw new Error(`Cline <Language Model API>: Failed to create client: ${message}`);
}
}
return this.client;
}
private cleanTerminalOutput(text: string): string {
if (!text) {
return '';
}
return text
// Нормализуем переносы строк
.replace(/\r\n/g, '\n')
.replace(/\r/g, '\n')
// Удаляем ANSI escape sequences
.replace(/\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g, '') // Полный набор ANSI sequences
.replace(/\x9B[0-?]*[ -/]*[@-~]/g, '') // CSI sequences
// Удаляем последовательности установки заголовка терминала и прочие OSC sequences
.replace(/\x1B\][0-9;]*(?:\x07|\x1B\\)/g, '')
// Удаляем управляющие символы
.replace(/[\x00-\x09\x0B-\x0C\x0E-\x1F\x7F]/g, '')
// Удаляем escape-последовательности VS Code
.replace(/\x1B[PD].*?\x1B\\/g, '') // DCS sequences
.replace(/\x1B_.*?\x1B\\/g, '') // APC sequences
.replace(/\x1B\^.*?\x1B\\/g, '') // PM sequences
.replace(/\x1B\[[\d;]*[HfABCDEFGJKST]/g, '') // Cursor movement and clear screen
// Удаляем пути Windows и служебную информацию
.replace(/^(?:PS )?[A-Z]:\\[^\n]*$/mg, '')
.replace(/^;?Cwd=.*$/mg, '')
// Очищаем экранированные последовательности
.replace(/\\x[0-9a-fA-F]{2}/g, '')
.replace(/\\u[0-9a-fA-F]{4}/g, '')
// Финальная очистка
.replace(/\n{3,}/g, '\n\n') // Убираем множественные пустые строки
.trim();
}
private cleanMessageContent(content: any): any {
if (!content) {
return content;
}
if (typeof content === 'string') {
return this.cleanTerminalOutput(content);
}
if (Array.isArray(content)) {
return content.map(item => this.cleanMessageContent(item));
}
if (typeof content === 'object') {
const cleaned: any = {};
for (const [key, value] of Object.entries(content)) {
cleaned[key] = this.cleanMessageContent(value);
}
return cleaned;
}
return content;
}
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
// Ensure clean state before starting a new request
this.ensureCleanState();
const client: vscode.LanguageModelChat = await this.getClient();
// Clean system prompt and messages
const cleanedSystemPrompt = this.cleanTerminalOutput(systemPrompt);
const cleanedMessages = messages.map(msg => ({
...msg,
content: this.cleanMessageContent(msg.content)
}));
// Convert Anthropic messages to VS Code LM messages
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [
vscode.LanguageModelChatMessage.Assistant(cleanedSystemPrompt),
...convertToVsCodeLmMessages(cleanedMessages),
];
// Initialize cancellation token for the request
this.currentRequestCancellation = new vscode.CancellationTokenSource();
// Calculate input tokens before starting the stream
const totalInputTokens: number = await this.calculateTotalInputTokens(systemPrompt, vsCodeLmMessages);
// Accumulate the text and count at the end of the stream to reduce token counting overhead.
let accumulatedText: string = '';
try {
// Create the response stream with minimal required options
const requestOptions: vscode.LanguageModelChatRequestOptions = {
justification: `Cline would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`
};
// Note: Tool support is currently provided by the VSCode Language Model API directly
// Extensions can register tools using vscode.lm.registerTool()
const response: vscode.LanguageModelChatResponse = await client.sendRequest(
vsCodeLmMessages,
requestOptions,
this.currentRequestCancellation.token
);
// Consume the stream and handle both text and tool call chunks
for await (const chunk of response.stream) {
if (chunk instanceof vscode.LanguageModelTextPart) {
// Validate text part value
if (typeof chunk.value !== 'string') {
console.warn('Cline <Language Model API>: Invalid text part value received:', chunk.value);
continue;
}
accumulatedText += chunk.value;
yield {
type: "text",
text: chunk.value,
};
} else if (chunk instanceof vscode.LanguageModelToolCallPart) {
try {
// Validate tool call parameters
if (!chunk.name || typeof chunk.name !== 'string') {
console.warn('Cline <Language Model API>: Invalid tool name received:', chunk.name);
continue;
}
if (!chunk.callId || typeof chunk.callId !== 'string') {
console.warn('Cline <Language Model API>: Invalid tool callId received:', chunk.callId);
continue;
}
// Ensure input is a valid object
if (!chunk.input || typeof chunk.input !== 'object') {
console.warn('Cline <Language Model API>: Invalid tool input received:', chunk.input);
continue;
}
// Convert tool calls to text format with proper error handling
const toolCall = {
type: "tool_call",
name: chunk.name,
arguments: chunk.input,
callId: chunk.callId
};
const toolCallText = JSON.stringify(toolCall);
accumulatedText += toolCallText;
// Log tool call for debugging
console.debug('Cline <Language Model API>: Processing tool call:', {
name: chunk.name,
callId: chunk.callId,
inputSize: JSON.stringify(chunk.input).length
});
yield {
type: "text",
text: toolCallText,
};
} catch (error) {
console.error('Cline <Language Model API>: Failed to process tool call:', error);
// Continue processing other chunks even if one fails
continue;
}
} else {
console.warn('Cline <Language Model API>: Unknown chunk type received:', chunk);
}
}
// Count tokens in the accumulated text after stream completion
const totalOutputTokens: number = await this.countTokens(accumulatedText);
// Report final usage after stream completion
yield {
type: "usage",
inputTokens: totalInputTokens,
outputTokens: totalOutputTokens,
totalCost: calculateApiCost(
this.getModel().info,
totalInputTokens,
totalOutputTokens
)
};
}
catch (error: unknown) {
this.ensureCleanState();
if (error instanceof vscode.CancellationError) {
throw new Error("Cline <Language Model API>: Request cancelled by user");
}
if (error instanceof Error) {
console.error('Cline <Language Model API>: Stream error details:', {
message: error.message,
stack: error.stack,
name: error.name
});
// Return original error if it's already an Error instance
throw error;
} else if (typeof error === 'object' && error !== null) {
// Handle error-like objects
const errorDetails = JSON.stringify(error, null, 2);
console.error('Cline <Language Model API>: Stream error object:', errorDetails);
throw new Error(`Cline <Language Model API>: Response stream error: ${errorDetails}`);
} else {
// Fallback for unknown error types
const errorMessage = String(error);
console.error('Cline <Language Model API>: Unknown stream error:', errorMessage);
throw new Error(`Cline <Language Model API>: Response stream error: ${errorMessage}`);
}
}
}
// Return model information based on the current client state
getModel(): { id: string; info: ModelInfo; } {
if (this.client) {
// Validate client properties
const requiredProps = {
id: this.client.id,
vendor: this.client.vendor,
family: this.client.family,
version: this.client.version,
maxInputTokens: this.client.maxInputTokens
};
// Log any missing properties for debugging
for (const [prop, value] of Object.entries(requiredProps)) {
if (!value && value !== 0) {
console.warn(`Cline <Language Model API>: Client missing ${prop} property`);
}
}
// Construct model ID using available information
const modelParts = [
this.client.vendor,
this.client.family,
this.client.version
].filter(Boolean);
const modelId = this.client.id || modelParts.join(SELECTOR_SEPARATOR);
// Build model info with conservative defaults for missing values
const modelInfo: ModelInfo = {
maxTokens: -1, // Unlimited tokens by default
contextWindow: typeof this.client.maxInputTokens === 'number'
? Math.max(0, this.client.maxInputTokens)
: openAiModelInfoSaneDefaults.contextWindow,
supportsImages: false, // VSCode Language Model API currently doesn't support image inputs
supportsPromptCache: true,
inputPrice: 0,
outputPrice: 0,
description: `VSCode Language Model: ${modelId}`
};
return { id: modelId, info: modelInfo };
}
// Fallback when no client is available
const fallbackId = this.options.vsCodeLmModelSelector
? stringifyVsCodeLmModelSelector(this.options.vsCodeLmModelSelector)
: "vscode-lm";
console.debug('Cline <Language Model API>: No client available, using fallback model info');
return {
id: fallbackId,
info: {
...openAiModelInfoSaneDefaults,
description: `VSCode Language Model (Fallback): ${fallbackId}`
}
};
}
async completePrompt(prompt: string): Promise<string> {
try {
const client = await this.getClient();
const response = await client.sendRequest([vscode.LanguageModelChatMessage.User(prompt)], {}, new vscode.CancellationTokenSource().token);
let result = "";
for await (const chunk of response.stream) {
if (chunk instanceof vscode.LanguageModelTextPart) {
result += chunk.value;
}
}
return result;
} catch (error) {
if (error instanceof Error) {
throw new Error(`VSCode LM completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -0,0 +1,246 @@
import { Anthropic } from "@anthropic-ai/sdk";
import * as vscode from 'vscode';
import { convertToVsCodeLmMessages, convertToAnthropicRole, convertToAnthropicMessage } from '../vscode-lm-format';
// Mock crypto
const mockCrypto = {
randomUUID: () => 'test-uuid'
};
global.crypto = mockCrypto as any;
// Define types for our mocked classes
interface MockLanguageModelTextPart {
type: 'text';
value: string;
}
interface MockLanguageModelToolCallPart {
type: 'tool_call';
callId: string;
name: string;
input: any;
}
interface MockLanguageModelToolResultPart {
type: 'tool_result';
toolUseId: string;
parts: MockLanguageModelTextPart[];
}
type MockMessageContent = MockLanguageModelTextPart | MockLanguageModelToolCallPart | MockLanguageModelToolResultPart;
interface MockLanguageModelChatMessage {
role: string;
name?: string;
content: MockMessageContent[];
}
// Mock vscode namespace
jest.mock('vscode', () => {
const LanguageModelChatMessageRole = {
Assistant: 'assistant',
User: 'user'
};
class MockLanguageModelTextPart {
type = 'text';
constructor(public value: string) {}
}
class MockLanguageModelToolCallPart {
type = 'tool_call';
constructor(
public callId: string,
public name: string,
public input: any
) {}
}
class MockLanguageModelToolResultPart {
type = 'tool_result';
constructor(
public toolUseId: string,
public parts: MockLanguageModelTextPart[]
) {}
}
return {
LanguageModelChatMessage: {
Assistant: jest.fn((content) => ({
role: LanguageModelChatMessageRole.Assistant,
name: 'assistant',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
})),
User: jest.fn((content) => ({
role: LanguageModelChatMessageRole.User,
name: 'user',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
}))
},
LanguageModelChatMessageRole,
LanguageModelTextPart: MockLanguageModelTextPart,
LanguageModelToolCallPart: MockLanguageModelToolCallPart,
LanguageModelToolResultPart: MockLanguageModelToolResultPart
};
});
describe('vscode-lm-format', () => {
describe('convertToVsCodeLmMessages', () => {
it('should convert simple string messages', () => {
const messages: Anthropic.Messages.MessageParam[] = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' }
];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(2);
expect(result[0].role).toBe('user');
expect((result[0].content[0] as MockLanguageModelTextPart).value).toBe('Hello');
expect(result[1].role).toBe('assistant');
expect((result[1].content[0] as MockLanguageModelTextPart).value).toBe('Hi there');
});
it('should handle complex user messages with tool results', () => {
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user',
content: [
{ type: 'text', text: 'Here is the result:' },
{
type: 'tool_result',
tool_use_id: 'tool-1',
content: 'Tool output'
}
]
}];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(1);
expect(result[0].role).toBe('user');
expect(result[0].content).toHaveLength(2);
const [toolResult, textContent] = result[0].content as [MockLanguageModelToolResultPart, MockLanguageModelTextPart];
expect(toolResult.type).toBe('tool_result');
expect(textContent.type).toBe('text');
});
it('should handle complex assistant messages with tool calls', () => {
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'assistant',
content: [
{ type: 'text', text: 'Let me help you with that.' },
{
type: 'tool_use',
id: 'tool-1',
name: 'calculator',
input: { operation: 'add', numbers: [2, 2] }
}
]
}];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(1);
expect(result[0].role).toBe('assistant');
expect(result[0].content).toHaveLength(2);
const [toolCall, textContent] = result[0].content as [MockLanguageModelToolCallPart, MockLanguageModelTextPart];
expect(toolCall.type).toBe('tool_call');
expect(textContent.type).toBe('text');
});
it('should handle image blocks with appropriate placeholders', () => {
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user',
content: [
{ type: 'text', text: 'Look at this:' },
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/png',
data: 'base64data'
}
}
]
}];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(1);
const imagePlaceholder = result[0].content[1] as MockLanguageModelTextPart;
expect(imagePlaceholder.value).toContain('[Image (base64): image/png not supported by VSCode LM API]');
});
});
describe('convertToAnthropicRole', () => {
it('should convert assistant role correctly', () => {
const result = convertToAnthropicRole('assistant' as any);
expect(result).toBe('assistant');
});
it('should convert user role correctly', () => {
const result = convertToAnthropicRole('user' as any);
expect(result).toBe('user');
});
it('should return null for unknown roles', () => {
const result = convertToAnthropicRole('unknown' as any);
expect(result).toBeNull();
});
});
describe('convertToAnthropicMessage', () => {
it('should convert assistant message with text content', async () => {
const vsCodeMessage = {
role: 'assistant',
name: 'assistant',
content: [new vscode.LanguageModelTextPart('Hello')]
};
const result = await convertToAnthropicMessage(vsCodeMessage as any);
expect(result.role).toBe('assistant');
expect(result.content).toHaveLength(1);
expect(result.content[0]).toEqual({
type: 'text',
text: 'Hello'
});
expect(result.id).toBe('test-uuid');
});
it('should convert assistant message with tool calls', async () => {
const vsCodeMessage = {
role: 'assistant',
name: 'assistant',
content: [new vscode.LanguageModelToolCallPart(
'call-1',
'calculator',
{ operation: 'add', numbers: [2, 2] }
)]
};
const result = await convertToAnthropicMessage(vsCodeMessage as any);
expect(result.content).toHaveLength(1);
expect(result.content[0]).toEqual({
type: 'tool_use',
id: 'call-1',
name: 'calculator',
input: { operation: 'add', numbers: [2, 2] }
});
expect(result.id).toBe('test-uuid');
});
it('should throw error for non-assistant messages', async () => {
const vsCodeMessage = {
role: 'user',
name: 'user',
content: [new vscode.LanguageModelTextPart('Hello')]
};
await expect(convertToAnthropicMessage(vsCodeMessage as any))
.rejects
.toThrow('Cline <Language Model API>: Only assistant messages are supported.');
});
});
});

View File

@@ -0,0 +1,209 @@
import { Anthropic } from "@anthropic-ai/sdk";
import * as vscode from 'vscode';
/**
* Safely converts a value into a plain object.
*/
function asObjectSafe(value: any): object {
// Handle null/undefined
if (!value) {
return {};
}
try {
// Handle strings that might be JSON
if (typeof value === 'string') {
return JSON.parse(value);
}
// Handle pre-existing objects
if (typeof value === 'object') {
return Object.assign({}, value);
}
return {};
}
catch (error) {
console.warn('Cline <Language Model API>: Failed to parse object:', error);
return {};
}
}
export function convertToVsCodeLmMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): vscode.LanguageModelChatMessage[] {
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
for (const anthropicMessage of anthropicMessages) {
// Handle simple string messages
if (typeof anthropicMessage.content === "string") {
vsCodeLmMessages.push(
anthropicMessage.role === "assistant"
? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
: vscode.LanguageModelChatMessage.User(anthropicMessage.content)
);
continue;
}
// Handle complex message structures
switch (anthropicMessage.role) {
case "user": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolResultBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_result") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolResultParts
...toolMessages.map((toolMessage) => {
// Process tool result content into TextParts
const toolContentParts: vscode.LanguageModelTextPart[] = (
typeof toolMessage.content === "string"
? [new vscode.LanguageModelTextPart(toolMessage.content)]
: (
toolMessage.content?.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
?? [new vscode.LanguageModelTextPart("")]
)
);
return new vscode.LanguageModelToolResultPart(
toolMessage.tool_use_id,
toolContentParts
);
}),
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Add single user message with all content parts
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
break;
}
case "assistant": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolUseBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_use") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolCallParts first
...toolMessages.map((toolMessage) =>
new vscode.LanguageModelToolCallPart(
toolMessage.id,
toolMessage.name,
asObjectSafe(toolMessage.input)
)
),
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Add the assistant message to the list of messages
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
break;
}
}
}
return vsCodeLmMessages;
}
export function convertToAnthropicRole(vsCodeLmMessageRole: vscode.LanguageModelChatMessageRole): string | null {
switch (vsCodeLmMessageRole) {
case vscode.LanguageModelChatMessageRole.Assistant:
return "assistant";
case vscode.LanguageModelChatMessageRole.User:
return "user";
default:
return null;
}
}
export async function convertToAnthropicMessage(vsCodeLmMessage: vscode.LanguageModelChatMessage): Promise<Anthropic.Messages.Message> {
const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
if (anthropicRole !== "assistant") {
throw new Error("Cline <Language Model API>: Only assistant messages are supported.");
}
return {
id: crypto.randomUUID(),
type: "message",
model: "vscode-lm",
role: anthropicRole,
content: (
vsCodeLmMessage.content
.map((part): Anthropic.ContentBlock | null => {
if (part instanceof vscode.LanguageModelTextPart) {
return {
type: "text",
text: part.value
};
}
if (part instanceof vscode.LanguageModelToolCallPart) {
return {
type: "tool_use",
id: part.callId || crypto.randomUUID(),
name: part.name,
input: asObjectSafe(part.input)
};
}
return null;
})
.filter(
(part): part is Anthropic.ContentBlock => part !== null
)
),
stop_reason: null,
stop_sequence: null,
usage: {
input_tokens: 0,
output_tokens: 0,
}
};
}

View File

@@ -93,6 +93,7 @@ type GlobalStateKey =
| "requestDelaySeconds"
| "currentApiConfigName"
| "listApiConfigMeta"
| "vsCodeLmModelSelector"
| "mode"
| "modeApiConfigs"
| "customPrompts"
@@ -571,8 +572,12 @@ export class ClineProvider implements vscode.WebviewViewProvider {
const lmStudioModels = await this.getLmStudioModels(message.text)
this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels })
break
case "requestVsCodeLmModels":
const vsCodeLmModels = await this.getVsCodeLmModels()
this.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
break
case "refreshGlamaModels":
await this.refreshGlamaModels()
await this.refreshGlamaModels()
break
case "refreshOpenRouterModels":
await this.refreshOpenRouterModels()
@@ -1109,6 +1114,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
vsCodeLmModelSelector,
} = apiConfiguration
await this.updateGlobalState("apiProvider", apiProvider)
await this.updateGlobalState("apiModelId", apiModelId)
@@ -1140,6 +1146,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.updateGlobalState("openRouterModelId", openRouterModelId)
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
if (this.cline) {
this.cline.api = buildApiHandler(apiConfiguration)
}
@@ -1210,6 +1217,17 @@ export class ClineProvider implements vscode.WebviewViewProvider {
}
}
// VSCode LM API
private async getVsCodeLmModels() {
try {
const models = await vscode.lm.selectChatModels({});
return models || [];
} catch (error) {
console.error('Error fetching VS Code LM models:', error);
return [];
}
}
// OpenAi
async getOpenAiModels(baseUrl?: string, apiKey?: string) {
@@ -1268,6 +1286,33 @@ export class ClineProvider implements vscode.WebviewViewProvider {
return cacheDir
}
async handleGlamaCallback(code: string) {
let apiKey: string
try {
const response = await axios.post("https://glama.ai/api/gateway/v1/auth/exchange-code", { code })
if (response.data && response.data.apiKey) {
apiKey = response.data.apiKey
} else {
throw new Error("Invalid response from Glama API")
}
} catch (error) {
console.error("Error exchanging code for API key:", error)
throw error
}
const glama: ApiProvider = "glama"
await this.updateGlobalState("apiProvider", glama)
await this.storeSecret("glamaApiKey", apiKey)
await this.postStateToWebview()
if (this.cline) {
this.cline.api = buildApiHandler({
apiProvider: glama,
glamaApiKey: apiKey,
})
}
// await this.postMessageToWebview({ type: "action", action: "settingsButtonClicked" }) // bad ux if user is on welcome
}
async readGlamaModels(): Promise<Record<string, ModelInfo> | undefined> {
const glamaModelsFilePath = path.join(
await this.ensureCacheDirectoryExists(),
@@ -1742,6 +1787,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
requestDelaySeconds,
currentApiConfigName,
listApiConfigMeta,
vsCodeLmModelSelector,
mode,
modeApiConfigs,
customPrompts,
@@ -1800,6 +1846,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
this.getGlobalState("requestDelaySeconds") as Promise<number | undefined>,
this.getGlobalState("currentApiConfigName") as Promise<string | undefined>,
this.getGlobalState("listApiConfigMeta") as Promise<ApiConfigMeta[] | undefined>,
this.getGlobalState("vsCodeLmModelSelector") as Promise<vscode.LanguageModelChatSelector | undefined>,
this.getGlobalState("mode") as Promise<Mode | undefined>,
this.getGlobalState("modeApiConfigs") as Promise<Record<Mode, string> | undefined>,
this.getGlobalState("customPrompts") as Promise<CustomPrompts | undefined>,
@@ -1852,6 +1899,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
vsCodeLmModelSelector,
},
lastShownAnnouncementId,
customInstructions,

View File

@@ -7,7 +7,7 @@ The Cline extension exposes an API that can be used by other extensions. To use
3. Get access to the API with the following code:
```ts
const clineExtension = vscode.extensions.getExtension<ClineAPI>("saoudrizwan.claude-dev")
const clineExtension = vscode.extensions.getExtension<ClineAPI>("rooveterinaryinc.roo-cline")
if (!clineExtension?.isActive) {
throw new Error("Cline extension is not activated")
@@ -44,11 +44,11 @@ The Cline extension exposes an API that can be used by other extensions. To use
}
```
**Note:** To ensure that the `saoudrizwan.claude-dev` extension is activated before your extension, add it to the `extensionDependencies` in your `package.json`:
**Note:** To ensure that the `rooveterinaryinc.roo-cline` extension is activated before your extension, add it to the `extensionDependencies` in your `package.json`:
```json
"extensionDependencies": [
"saoudrizwan.claude-dev"
"rooveterinaryinc.roo-cline"
]
```

View File

@@ -139,6 +139,14 @@ export function activate(context: vscode.ExtensionContext) {
return
}
switch (path) {
case "/glama": {
const code = query.get("code")
if (code) {
await visibleProvider.handleGlamaCallback(code)
}
break
}
case "/openrouter": {
const code = query.get("code")
if (code) {

View File

@@ -141,5 +141,5 @@ export function mergeJson(
}
function getExtensionUri(): vscode.Uri {
return vscode.extensions.getExtension("saoudrizwan.claude-dev")!.extensionUri
return vscode.extensions.getExtension("rooveterinaryinc.roo-cline")!.extensionUri
}

View File

@@ -25,6 +25,9 @@ export interface ExtensionMessage {
| "enhancedPrompt"
| "commitSearchResults"
| "listApiConfig"
| "vsCodeLmModels"
| "vsCodeLmApiAvailable"
| "requestVsCodeLmModels"
| "updatePrompt"
| "systemPrompt"
text?: string
@@ -40,6 +43,7 @@ export interface ExtensionMessage {
images?: string[]
ollamaModels?: string[]
lmStudioModels?: string[]
vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[]
filePaths?: string[]
partialMessage?: ClineMessage
glamaModels?: Record<string, ModelInfo>

View File

@@ -61,9 +61,11 @@ export interface WebviewMessage {
| "terminalOutputLineLimit"
| "mcpEnabled"
| "searchCommits"
| "refreshGlamaModels"
| "alwaysApproveResubmit"
| "requestDelaySeconds"
| "setApiConfigPassword"
| "requestVsCodeLmModels"
| "mode"
| "updatePrompt"
| "updateEnhancedPrompt"

View File

@@ -0,0 +1,56 @@
import { checkExistKey } from '../checkExistApiConfig';
import { ApiConfiguration } from '../api';
describe('checkExistKey', () => {
it('should return false for undefined config', () => {
expect(checkExistKey(undefined)).toBe(false);
});
it('should return false for empty config', () => {
const config: ApiConfiguration = {};
expect(checkExistKey(config)).toBe(false);
});
it('should return true when one key is defined', () => {
const config: ApiConfiguration = {
apiKey: 'test-key'
};
expect(checkExistKey(config)).toBe(true);
});
it('should return true when multiple keys are defined', () => {
const config: ApiConfiguration = {
apiKey: 'test-key',
glamaApiKey: 'glama-key',
openRouterApiKey: 'openrouter-key'
};
expect(checkExistKey(config)).toBe(true);
});
it('should return true when only non-key fields are undefined', () => {
const config: ApiConfiguration = {
apiKey: 'test-key',
apiProvider: undefined,
anthropicBaseUrl: undefined
};
expect(checkExistKey(config)).toBe(true);
});
it('should return false when all key fields are undefined', () => {
const config: ApiConfiguration = {
apiKey: undefined,
glamaApiKey: undefined,
openRouterApiKey: undefined,
awsRegion: undefined,
vertexProjectId: undefined,
openAiApiKey: undefined,
ollamaModelId: undefined,
lmStudioModelId: undefined,
geminiApiKey: undefined,
openAiNativeApiKey: undefined,
deepSeekApiKey: undefined,
vsCodeLmModelSelector: undefined
};
expect(checkExistKey(config)).toBe(false);
});
});

View File

@@ -0,0 +1,44 @@
import { stringifyVsCodeLmModelSelector, SELECTOR_SEPARATOR } from '../vsCodeSelectorUtils';
import { LanguageModelChatSelector } from 'vscode';
describe('vsCodeSelectorUtils', () => {
describe('stringifyVsCodeLmModelSelector', () => {
it('should join all defined selector properties with separator', () => {
const selector: LanguageModelChatSelector = {
vendor: 'test-vendor',
family: 'test-family',
version: 'v1',
id: 'test-id'
};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('test-vendor/test-family/v1/test-id');
});
it('should skip undefined properties', () => {
const selector: LanguageModelChatSelector = {
vendor: 'test-vendor',
family: 'test-family'
};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('test-vendor/test-family');
});
it('should handle empty selector', () => {
const selector: LanguageModelChatSelector = {};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('');
});
it('should handle selector with only one property', () => {
const selector: LanguageModelChatSelector = {
vendor: 'test-vendor'
};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('test-vendor');
});
});
});

View File

@@ -1,3 +1,5 @@
import * as vscode from 'vscode';
export type ApiProvider =
| "anthropic"
| "glama"
@@ -10,11 +12,13 @@ export type ApiProvider =
| "gemini"
| "openai-native"
| "deepseek"
| "vscode-lm"
export interface ApiHandlerOptions {
apiModelId?: string
apiKey?: string // anthropic
anthropicBaseUrl?: string
vsCodeLmModelSelector?: vscode.LanguageModelChatSelector
glamaModelId?: string
glamaModelInfo?: ModelInfo
glamaApiKey?: string
@@ -58,7 +62,7 @@ export type ApiConfiguration = ApiHandlerOptions & {
export interface ModelInfo {
maxTokens?: number
contextWindow?: number
contextWindow: number
supportsImages?: boolean
supportsComputerUse?: boolean
supportsPromptCache: boolean // this value is hardcoded for now

View File

@@ -13,7 +13,8 @@ export function checkExistKey(config: ApiConfiguration | undefined) {
config.lmStudioModelId,
config.geminiApiKey,
config.openAiNativeApiKey,
config.deepSeekApiKey
config.deepSeekApiKey,
config.vsCodeLmModelSelector,
].some((key) => key !== undefined)
: false;
}

View File

@@ -0,0 +1,14 @@
import { LanguageModelChatSelector } from 'vscode';
export const SELECTOR_SEPARATOR = '/';
export function stringifyVsCodeLmModelSelector(selector: LanguageModelChatSelector): string {
return [
selector.vendor,
selector.family,
selector.version,
selector.id
]
.filter(Boolean)
.join(SELECTOR_SEPARATOR);
}