mirror of
https://github.com/pacnpal/Roo-Code.git
synced 2025-12-20 04:11:10 -05:00
Merge pull request #367 from RooVetGit/vscode-lm-provider
Add VSCode-LM as a provider
This commit is contained in:
@@ -11,6 +11,7 @@ import { LmStudioHandler } from "./providers/lmstudio"
|
||||
import { GeminiHandler } from "./providers/gemini"
|
||||
import { OpenAiNativeHandler } from "./providers/openai-native"
|
||||
import { DeepSeekHandler } from "./providers/deepseek"
|
||||
import { VsCodeLmHandler } from "./providers/vscode-lm"
|
||||
import { ApiStream } from "./transform/stream"
|
||||
|
||||
export interface SingleCompletionHandler {
|
||||
|
||||
289
src/api/providers/__tests__/vscode-lm.test.ts
Normal file
289
src/api/providers/__tests__/vscode-lm.test.ts
Normal file
@@ -0,0 +1,289 @@
|
||||
import * as vscode from 'vscode';
|
||||
import { VsCodeLmHandler } from '../vscode-lm';
|
||||
import { ApiHandlerOptions } from '../../../shared/api';
|
||||
import { Anthropic } from '@anthropic-ai/sdk';
|
||||
|
||||
// Mock vscode namespace
|
||||
jest.mock('vscode', () => {
|
||||
class MockLanguageModelTextPart {
|
||||
type = 'text';
|
||||
constructor(public value: string) {}
|
||||
}
|
||||
|
||||
class MockLanguageModelToolCallPart {
|
||||
type = 'tool_call';
|
||||
constructor(
|
||||
public callId: string,
|
||||
public name: string,
|
||||
public input: any
|
||||
) {}
|
||||
}
|
||||
|
||||
return {
|
||||
workspace: {
|
||||
onDidChangeConfiguration: jest.fn((callback) => ({
|
||||
dispose: jest.fn()
|
||||
}))
|
||||
},
|
||||
CancellationTokenSource: jest.fn(() => ({
|
||||
token: {
|
||||
isCancellationRequested: false,
|
||||
onCancellationRequested: jest.fn()
|
||||
},
|
||||
cancel: jest.fn(),
|
||||
dispose: jest.fn()
|
||||
})),
|
||||
CancellationError: class CancellationError extends Error {
|
||||
constructor() {
|
||||
super('Operation cancelled');
|
||||
this.name = 'CancellationError';
|
||||
}
|
||||
},
|
||||
LanguageModelChatMessage: {
|
||||
Assistant: jest.fn((content) => ({
|
||||
role: 'assistant',
|
||||
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
|
||||
})),
|
||||
User: jest.fn((content) => ({
|
||||
role: 'user',
|
||||
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
|
||||
}))
|
||||
},
|
||||
LanguageModelTextPart: MockLanguageModelTextPart,
|
||||
LanguageModelToolCallPart: MockLanguageModelToolCallPart,
|
||||
lm: {
|
||||
selectChatModels: jest.fn()
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
const mockLanguageModelChat = {
|
||||
id: 'test-model',
|
||||
name: 'Test Model',
|
||||
vendor: 'test-vendor',
|
||||
family: 'test-family',
|
||||
version: '1.0',
|
||||
maxInputTokens: 4096,
|
||||
sendRequest: jest.fn(),
|
||||
countTokens: jest.fn()
|
||||
};
|
||||
|
||||
describe('VsCodeLmHandler', () => {
|
||||
let handler: VsCodeLmHandler;
|
||||
const defaultOptions: ApiHandlerOptions = {
|
||||
vsCodeLmModelSelector: {
|
||||
vendor: 'test-vendor',
|
||||
family: 'test-family'
|
||||
}
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
handler = new VsCodeLmHandler(defaultOptions);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
handler.dispose();
|
||||
});
|
||||
|
||||
describe('constructor', () => {
|
||||
it('should initialize with provided options', () => {
|
||||
expect(handler).toBeDefined();
|
||||
expect(vscode.workspace.onDidChangeConfiguration).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle configuration changes', () => {
|
||||
const callback = (vscode.workspace.onDidChangeConfiguration as jest.Mock).mock.calls[0][0];
|
||||
callback({ affectsConfiguration: () => true });
|
||||
// Should reset client when config changes
|
||||
expect(handler['client']).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('createClient', () => {
|
||||
it('should create client with selector', async () => {
|
||||
const mockModel = { ...mockLanguageModelChat };
|
||||
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
|
||||
|
||||
const client = await handler['createClient']({
|
||||
vendor: 'test-vendor',
|
||||
family: 'test-family'
|
||||
});
|
||||
|
||||
expect(client).toBeDefined();
|
||||
expect(client.id).toBe('test-model');
|
||||
expect(vscode.lm.selectChatModels).toHaveBeenCalledWith({
|
||||
vendor: 'test-vendor',
|
||||
family: 'test-family'
|
||||
});
|
||||
});
|
||||
|
||||
it('should return default client when no models available', async () => {
|
||||
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([]);
|
||||
|
||||
const client = await handler['createClient']({});
|
||||
|
||||
expect(client).toBeDefined();
|
||||
expect(client.id).toBe('default-lm');
|
||||
expect(client.vendor).toBe('vscode');
|
||||
});
|
||||
});
|
||||
|
||||
describe('createMessage', () => {
|
||||
beforeEach(() => {
|
||||
const mockModel = { ...mockLanguageModelChat };
|
||||
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
|
||||
mockLanguageModelChat.countTokens.mockResolvedValue(10);
|
||||
});
|
||||
|
||||
it('should stream text responses', async () => {
|
||||
const systemPrompt = 'You are a helpful assistant';
|
||||
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||
role: 'user' as const,
|
||||
content: 'Hello'
|
||||
}];
|
||||
|
||||
const responseText = 'Hello! How can I help you?';
|
||||
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
|
||||
stream: (async function* () {
|
||||
yield new vscode.LanguageModelTextPart(responseText);
|
||||
return;
|
||||
})(),
|
||||
text: (async function* () {
|
||||
yield responseText;
|
||||
return;
|
||||
})()
|
||||
});
|
||||
|
||||
const stream = handler.createMessage(systemPrompt, messages);
|
||||
const chunks = [];
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk);
|
||||
}
|
||||
|
||||
expect(chunks).toHaveLength(2); // Text chunk + usage chunk
|
||||
expect(chunks[0]).toEqual({
|
||||
type: 'text',
|
||||
text: responseText
|
||||
});
|
||||
expect(chunks[1]).toMatchObject({
|
||||
type: 'usage',
|
||||
inputTokens: expect.any(Number),
|
||||
outputTokens: expect.any(Number)
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle tool calls', async () => {
|
||||
const systemPrompt = 'You are a helpful assistant';
|
||||
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||
role: 'user' as const,
|
||||
content: 'Calculate 2+2'
|
||||
}];
|
||||
|
||||
const toolCallData = {
|
||||
name: 'calculator',
|
||||
arguments: { operation: 'add', numbers: [2, 2] },
|
||||
callId: 'call-1'
|
||||
};
|
||||
|
||||
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
|
||||
stream: (async function* () {
|
||||
yield new vscode.LanguageModelToolCallPart(
|
||||
toolCallData.callId,
|
||||
toolCallData.name,
|
||||
toolCallData.arguments
|
||||
);
|
||||
return;
|
||||
})(),
|
||||
text: (async function* () {
|
||||
yield JSON.stringify({ type: 'tool_call', ...toolCallData });
|
||||
return;
|
||||
})()
|
||||
});
|
||||
|
||||
const stream = handler.createMessage(systemPrompt, messages);
|
||||
const chunks = [];
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk);
|
||||
}
|
||||
|
||||
expect(chunks).toHaveLength(2); // Tool call chunk + usage chunk
|
||||
expect(chunks[0]).toEqual({
|
||||
type: 'text',
|
||||
text: JSON.stringify({ type: 'tool_call', ...toolCallData })
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle errors', async () => {
|
||||
const systemPrompt = 'You are a helpful assistant';
|
||||
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||
role: 'user' as const,
|
||||
content: 'Hello'
|
||||
}];
|
||||
|
||||
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('API Error'));
|
||||
|
||||
await expect(async () => {
|
||||
const stream = handler.createMessage(systemPrompt, messages);
|
||||
for await (const _ of stream) {
|
||||
// consume stream
|
||||
}
|
||||
}).rejects.toThrow('API Error');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getModel', () => {
|
||||
it('should return model info when client exists', async () => {
|
||||
const mockModel = { ...mockLanguageModelChat };
|
||||
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
|
||||
|
||||
// Initialize client
|
||||
await handler['getClient']();
|
||||
|
||||
const model = handler.getModel();
|
||||
expect(model.id).toBe('test-model');
|
||||
expect(model.info).toBeDefined();
|
||||
expect(model.info.contextWindow).toBe(4096);
|
||||
});
|
||||
|
||||
it('should return fallback model info when no client exists', () => {
|
||||
const model = handler.getModel();
|
||||
expect(model.id).toBe('test-vendor/test-family');
|
||||
expect(model.info).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('completePrompt', () => {
|
||||
it('should complete single prompt', async () => {
|
||||
const mockModel = { ...mockLanguageModelChat };
|
||||
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
|
||||
|
||||
const responseText = 'Completed text';
|
||||
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
|
||||
stream: (async function* () {
|
||||
yield new vscode.LanguageModelTextPart(responseText);
|
||||
return;
|
||||
})(),
|
||||
text: (async function* () {
|
||||
yield responseText;
|
||||
return;
|
||||
})()
|
||||
});
|
||||
|
||||
const result = await handler.completePrompt('Test prompt');
|
||||
expect(result).toBe(responseText);
|
||||
expect(mockLanguageModelChat.sendRequest).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle errors during completion', async () => {
|
||||
const mockModel = { ...mockLanguageModelChat };
|
||||
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
|
||||
|
||||
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('Completion failed'));
|
||||
|
||||
await expect(handler.completePrompt('Test prompt'))
|
||||
.rejects
|
||||
.toThrow('VSCode LM completion error: Completion failed');
|
||||
});
|
||||
});
|
||||
});
|
||||
564
src/api/providers/vscode-lm.ts
Normal file
564
src/api/providers/vscode-lm.ts
Normal file
@@ -0,0 +1,564 @@
|
||||
import { Anthropic } from "@anthropic-ai/sdk";
|
||||
import * as vscode from 'vscode';
|
||||
import { ApiHandler, SingleCompletionHandler } from "../";
|
||||
import { calculateApiCost } from "../../utils/cost";
|
||||
import { ApiStream } from "../transform/stream";
|
||||
import { convertToVsCodeLmMessages } from "../transform/vscode-lm-format";
|
||||
import { SELECTOR_SEPARATOR, stringifyVsCodeLmModelSelector } from "../../shared/vsCodeSelectorUtils";
|
||||
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api";
|
||||
|
||||
/**
|
||||
* Handles interaction with VS Code's Language Model API for chat-based operations.
|
||||
* This handler implements the ApiHandler interface to provide VS Code LM specific functionality.
|
||||
*
|
||||
* @implements {ApiHandler}
|
||||
*
|
||||
* @remarks
|
||||
* The handler manages a VS Code language model chat client and provides methods to:
|
||||
* - Create and manage chat client instances
|
||||
* - Stream messages using VS Code's Language Model API
|
||||
* - Retrieve model information
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const options = {
|
||||
* vsCodeLmModelSelector: { vendor: "copilot", family: "gpt-4" }
|
||||
* };
|
||||
* const handler = new VsCodeLmHandler(options);
|
||||
*
|
||||
* // Stream a conversation
|
||||
* const systemPrompt = "You are a helpful assistant";
|
||||
* const messages = [{ role: "user", content: "Hello!" }];
|
||||
* for await (const chunk of handler.createMessage(systemPrompt, messages)) {
|
||||
* console.log(chunk);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export class VsCodeLmHandler implements ApiHandler, SingleCompletionHandler {
|
||||
|
||||
private options: ApiHandlerOptions;
|
||||
private client: vscode.LanguageModelChat | null;
|
||||
private disposable: vscode.Disposable | null;
|
||||
private currentRequestCancellation: vscode.CancellationTokenSource | null;
|
||||
|
||||
constructor(options: ApiHandlerOptions) {
|
||||
this.options = options;
|
||||
this.client = null;
|
||||
this.disposable = null;
|
||||
this.currentRequestCancellation = null;
|
||||
|
||||
try {
|
||||
// Listen for model changes and reset client
|
||||
this.disposable = vscode.workspace.onDidChangeConfiguration(event => {
|
||||
if (event.affectsConfiguration('lm')) {
|
||||
try {
|
||||
this.client = null;
|
||||
this.ensureCleanState();
|
||||
}
|
||||
catch (error) {
|
||||
console.error('Error during configuration change cleanup:', error);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
// Ensure cleanup if constructor fails
|
||||
this.dispose();
|
||||
|
||||
throw new Error(
|
||||
`Cline <Language Model API>: Failed to initialize handler: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a language model chat client based on the provided selector.
|
||||
*
|
||||
* @param selector - Selector criteria to filter language model chat instances
|
||||
* @returns Promise resolving to the first matching language model chat instance
|
||||
* @throws Error when no matching models are found with the given selector
|
||||
*
|
||||
* @example
|
||||
* const selector = { vendor: "copilot", family: "gpt-4o" };
|
||||
* const chatClient = await createClient(selector);
|
||||
*/
|
||||
async createClient(selector: vscode.LanguageModelChatSelector): Promise<vscode.LanguageModelChat> {
|
||||
try {
|
||||
const models = await vscode.lm.selectChatModels(selector);
|
||||
|
||||
// Use first available model or create a minimal model object
|
||||
if (models && Array.isArray(models) && models.length > 0) {
|
||||
return models[0];
|
||||
}
|
||||
|
||||
// Create a minimal model if no models are available
|
||||
return {
|
||||
id: 'default-lm',
|
||||
name: 'Default Language Model',
|
||||
vendor: 'vscode',
|
||||
family: 'lm',
|
||||
version: '1.0',
|
||||
maxInputTokens: 8192,
|
||||
sendRequest: async (messages, options, token) => {
|
||||
// Provide a minimal implementation
|
||||
return {
|
||||
stream: (async function* () {
|
||||
yield new vscode.LanguageModelTextPart(
|
||||
"Language model functionality is limited. Please check VS Code configuration."
|
||||
);
|
||||
})(),
|
||||
text: (async function* () {
|
||||
yield "Language model functionality is limited. Please check VS Code configuration.";
|
||||
})()
|
||||
};
|
||||
},
|
||||
countTokens: async () => 0
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
throw new Error(`Cline <Language Model API>: Failed to select model: ${errorMessage}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates and streams a message using the VS Code Language Model API.
|
||||
*
|
||||
* @param systemPrompt - The system prompt to initialize the conversation context
|
||||
* @param messages - An array of message parameters following the Anthropic message format
|
||||
*
|
||||
* @yields {ApiStream} An async generator that yields either text chunks or tool calls from the model response
|
||||
*
|
||||
* @throws {Error} When vsCodeLmModelSelector option is not provided
|
||||
* @throws {Error} When the response stream encounters an error
|
||||
*
|
||||
* @remarks
|
||||
* This method handles the initialization of the VS Code LM client if not already created,
|
||||
* converts the messages to VS Code LM format, and streams the response chunks.
|
||||
* Tool calls handling is currently a work in progress.
|
||||
*/
|
||||
dispose(): void {
|
||||
|
||||
if (this.disposable) {
|
||||
|
||||
this.disposable.dispose();
|
||||
}
|
||||
|
||||
if (this.currentRequestCancellation) {
|
||||
|
||||
this.currentRequestCancellation.cancel();
|
||||
this.currentRequestCancellation.dispose();
|
||||
}
|
||||
}
|
||||
|
||||
private async countTokens(text: string | vscode.LanguageModelChatMessage): Promise<number> {
|
||||
// Check for required dependencies
|
||||
if (!this.client) {
|
||||
console.warn('Cline <Language Model API>: No client available for token counting');
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!this.currentRequestCancellation) {
|
||||
console.warn('Cline <Language Model API>: No cancellation token available for token counting');
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Validate input
|
||||
if (!text) {
|
||||
console.debug('Cline <Language Model API>: Empty text provided for token counting');
|
||||
return 0;
|
||||
}
|
||||
|
||||
try {
|
||||
// Handle different input types
|
||||
let tokenCount: number;
|
||||
|
||||
if (typeof text === 'string') {
|
||||
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
|
||||
} else if (text instanceof vscode.LanguageModelChatMessage) {
|
||||
// For chat messages, ensure we have content
|
||||
if (!text.content || (Array.isArray(text.content) && text.content.length === 0)) {
|
||||
console.debug('Cline <Language Model API>: Empty chat message content');
|
||||
return 0;
|
||||
}
|
||||
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
|
||||
} else {
|
||||
console.warn('Cline <Language Model API>: Invalid input type for token counting');
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Validate the result
|
||||
if (typeof tokenCount !== 'number') {
|
||||
console.warn('Cline <Language Model API>: Non-numeric token count received:', tokenCount);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (tokenCount < 0) {
|
||||
console.warn('Cline <Language Model API>: Negative token count received:', tokenCount);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return tokenCount;
|
||||
}
|
||||
catch (error) {
|
||||
// Handle specific error types
|
||||
if (error instanceof vscode.CancellationError) {
|
||||
console.debug('Cline <Language Model API>: Token counting cancelled by user');
|
||||
return 0;
|
||||
}
|
||||
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||
console.warn('Cline <Language Model API>: Token counting failed:', errorMessage);
|
||||
|
||||
// Log additional error details if available
|
||||
if (error instanceof Error && error.stack) {
|
||||
console.debug('Token counting error stack:', error.stack);
|
||||
}
|
||||
|
||||
return 0; // Fallback to prevent stream interruption
|
||||
}
|
||||
}
|
||||
|
||||
private async calculateTotalInputTokens(systemPrompt: string, vsCodeLmMessages: vscode.LanguageModelChatMessage[]): Promise<number> {
|
||||
|
||||
const systemTokens: number = await this.countTokens(systemPrompt);
|
||||
|
||||
const messageTokens: number[] = await Promise.all(
|
||||
vsCodeLmMessages.map(msg => this.countTokens(msg))
|
||||
);
|
||||
|
||||
return systemTokens + messageTokens.reduce(
|
||||
(sum: number, tokens: number): number => sum + tokens, 0
|
||||
);
|
||||
}
|
||||
|
||||
private ensureCleanState(): void {
|
||||
|
||||
if (this.currentRequestCancellation) {
|
||||
|
||||
this.currentRequestCancellation.cancel();
|
||||
this.currentRequestCancellation.dispose();
|
||||
this.currentRequestCancellation = null;
|
||||
}
|
||||
}
|
||||
|
||||
private async getClient(): Promise<vscode.LanguageModelChat> {
|
||||
if (!this.client) {
|
||||
console.debug('Cline <Language Model API>: Getting client with options:', {
|
||||
vsCodeLmModelSelector: this.options.vsCodeLmModelSelector,
|
||||
hasOptions: !!this.options,
|
||||
selectorKeys: this.options.vsCodeLmModelSelector ? Object.keys(this.options.vsCodeLmModelSelector) : []
|
||||
});
|
||||
|
||||
try {
|
||||
// Use default empty selector if none provided to get all available models
|
||||
const selector = this.options?.vsCodeLmModelSelector || {};
|
||||
console.debug('Cline <Language Model API>: Creating client with selector:', selector);
|
||||
this.client = await this.createClient(selector);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||
console.error('Cline <Language Model API>: Client creation failed:', message);
|
||||
throw new Error(`Cline <Language Model API>: Failed to create client: ${message}`);
|
||||
}
|
||||
}
|
||||
|
||||
return this.client;
|
||||
}
|
||||
|
||||
private cleanTerminalOutput(text: string): string {
|
||||
if (!text) {
|
||||
return '';
|
||||
}
|
||||
|
||||
return text
|
||||
// Нормализуем переносы строк
|
||||
.replace(/\r\n/g, '\n')
|
||||
.replace(/\r/g, '\n')
|
||||
|
||||
// Удаляем ANSI escape sequences
|
||||
.replace(/\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g, '') // Полный набор ANSI sequences
|
||||
.replace(/\x9B[0-?]*[ -/]*[@-~]/g, '') // CSI sequences
|
||||
|
||||
// Удаляем последовательности установки заголовка терминала и прочие OSC sequences
|
||||
.replace(/\x1B\][0-9;]*(?:\x07|\x1B\\)/g, '')
|
||||
|
||||
// Удаляем управляющие символы
|
||||
.replace(/[\x00-\x09\x0B-\x0C\x0E-\x1F\x7F]/g, '')
|
||||
|
||||
// Удаляем escape-последовательности VS Code
|
||||
.replace(/\x1B[PD].*?\x1B\\/g, '') // DCS sequences
|
||||
.replace(/\x1B_.*?\x1B\\/g, '') // APC sequences
|
||||
.replace(/\x1B\^.*?\x1B\\/g, '') // PM sequences
|
||||
.replace(/\x1B\[[\d;]*[HfABCDEFGJKST]/g, '') // Cursor movement and clear screen
|
||||
|
||||
// Удаляем пути Windows и служебную информацию
|
||||
.replace(/^(?:PS )?[A-Z]:\\[^\n]*$/mg, '')
|
||||
.replace(/^;?Cwd=.*$/mg, '')
|
||||
|
||||
// Очищаем экранированные последовательности
|
||||
.replace(/\\x[0-9a-fA-F]{2}/g, '')
|
||||
.replace(/\\u[0-9a-fA-F]{4}/g, '')
|
||||
|
||||
// Финальная очистка
|
||||
.replace(/\n{3,}/g, '\n\n') // Убираем множественные пустые строки
|
||||
.trim();
|
||||
}
|
||||
|
||||
private cleanMessageContent(content: any): any {
|
||||
if (!content) {
|
||||
return content;
|
||||
}
|
||||
|
||||
if (typeof content === 'string') {
|
||||
return this.cleanTerminalOutput(content);
|
||||
}
|
||||
|
||||
if (Array.isArray(content)) {
|
||||
return content.map(item => this.cleanMessageContent(item));
|
||||
}
|
||||
|
||||
if (typeof content === 'object') {
|
||||
const cleaned: any = {};
|
||||
for (const [key, value] of Object.entries(content)) {
|
||||
cleaned[key] = this.cleanMessageContent(value);
|
||||
}
|
||||
return cleaned;
|
||||
}
|
||||
|
||||
return content;
|
||||
}
|
||||
|
||||
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
|
||||
|
||||
// Ensure clean state before starting a new request
|
||||
this.ensureCleanState();
|
||||
const client: vscode.LanguageModelChat = await this.getClient();
|
||||
|
||||
// Clean system prompt and messages
|
||||
const cleanedSystemPrompt = this.cleanTerminalOutput(systemPrompt);
|
||||
const cleanedMessages = messages.map(msg => ({
|
||||
...msg,
|
||||
content: this.cleanMessageContent(msg.content)
|
||||
}));
|
||||
|
||||
// Convert Anthropic messages to VS Code LM messages
|
||||
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [
|
||||
vscode.LanguageModelChatMessage.Assistant(cleanedSystemPrompt),
|
||||
...convertToVsCodeLmMessages(cleanedMessages),
|
||||
];
|
||||
|
||||
// Initialize cancellation token for the request
|
||||
this.currentRequestCancellation = new vscode.CancellationTokenSource();
|
||||
|
||||
// Calculate input tokens before starting the stream
|
||||
const totalInputTokens: number = await this.calculateTotalInputTokens(systemPrompt, vsCodeLmMessages);
|
||||
|
||||
// Accumulate the text and count at the end of the stream to reduce token counting overhead.
|
||||
let accumulatedText: string = '';
|
||||
|
||||
try {
|
||||
|
||||
// Create the response stream with minimal required options
|
||||
const requestOptions: vscode.LanguageModelChatRequestOptions = {
|
||||
justification: `Cline would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`
|
||||
};
|
||||
|
||||
// Note: Tool support is currently provided by the VSCode Language Model API directly
|
||||
// Extensions can register tools using vscode.lm.registerTool()
|
||||
|
||||
const response: vscode.LanguageModelChatResponse = await client.sendRequest(
|
||||
vsCodeLmMessages,
|
||||
requestOptions,
|
||||
this.currentRequestCancellation.token
|
||||
);
|
||||
|
||||
// Consume the stream and handle both text and tool call chunks
|
||||
for await (const chunk of response.stream) {
|
||||
if (chunk instanceof vscode.LanguageModelTextPart) {
|
||||
// Validate text part value
|
||||
if (typeof chunk.value !== 'string') {
|
||||
console.warn('Cline <Language Model API>: Invalid text part value received:', chunk.value);
|
||||
continue;
|
||||
}
|
||||
|
||||
accumulatedText += chunk.value;
|
||||
yield {
|
||||
type: "text",
|
||||
text: chunk.value,
|
||||
};
|
||||
} else if (chunk instanceof vscode.LanguageModelToolCallPart) {
|
||||
try {
|
||||
// Validate tool call parameters
|
||||
if (!chunk.name || typeof chunk.name !== 'string') {
|
||||
console.warn('Cline <Language Model API>: Invalid tool name received:', chunk.name);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!chunk.callId || typeof chunk.callId !== 'string') {
|
||||
console.warn('Cline <Language Model API>: Invalid tool callId received:', chunk.callId);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ensure input is a valid object
|
||||
if (!chunk.input || typeof chunk.input !== 'object') {
|
||||
console.warn('Cline <Language Model API>: Invalid tool input received:', chunk.input);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Convert tool calls to text format with proper error handling
|
||||
const toolCall = {
|
||||
type: "tool_call",
|
||||
name: chunk.name,
|
||||
arguments: chunk.input,
|
||||
callId: chunk.callId
|
||||
};
|
||||
|
||||
const toolCallText = JSON.stringify(toolCall);
|
||||
accumulatedText += toolCallText;
|
||||
|
||||
// Log tool call for debugging
|
||||
console.debug('Cline <Language Model API>: Processing tool call:', {
|
||||
name: chunk.name,
|
||||
callId: chunk.callId,
|
||||
inputSize: JSON.stringify(chunk.input).length
|
||||
});
|
||||
|
||||
yield {
|
||||
type: "text",
|
||||
text: toolCallText,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Cline <Language Model API>: Failed to process tool call:', error);
|
||||
// Continue processing other chunks even if one fails
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
console.warn('Cline <Language Model API>: Unknown chunk type received:', chunk);
|
||||
}
|
||||
}
|
||||
|
||||
// Count tokens in the accumulated text after stream completion
|
||||
const totalOutputTokens: number = await this.countTokens(accumulatedText);
|
||||
|
||||
// Report final usage after stream completion
|
||||
yield {
|
||||
type: "usage",
|
||||
inputTokens: totalInputTokens,
|
||||
outputTokens: totalOutputTokens,
|
||||
totalCost: calculateApiCost(
|
||||
this.getModel().info,
|
||||
totalInputTokens,
|
||||
totalOutputTokens
|
||||
)
|
||||
};
|
||||
}
|
||||
catch (error: unknown) {
|
||||
|
||||
this.ensureCleanState();
|
||||
|
||||
if (error instanceof vscode.CancellationError) {
|
||||
|
||||
throw new Error("Cline <Language Model API>: Request cancelled by user");
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
console.error('Cline <Language Model API>: Stream error details:', {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
name: error.name
|
||||
});
|
||||
|
||||
// Return original error if it's already an Error instance
|
||||
throw error;
|
||||
} else if (typeof error === 'object' && error !== null) {
|
||||
// Handle error-like objects
|
||||
const errorDetails = JSON.stringify(error, null, 2);
|
||||
console.error('Cline <Language Model API>: Stream error object:', errorDetails);
|
||||
throw new Error(`Cline <Language Model API>: Response stream error: ${errorDetails}`);
|
||||
} else {
|
||||
// Fallback for unknown error types
|
||||
const errorMessage = String(error);
|
||||
console.error('Cline <Language Model API>: Unknown stream error:', errorMessage);
|
||||
throw new Error(`Cline <Language Model API>: Response stream error: ${errorMessage}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return model information based on the current client state
|
||||
getModel(): { id: string; info: ModelInfo; } {
|
||||
if (this.client) {
|
||||
// Validate client properties
|
||||
const requiredProps = {
|
||||
id: this.client.id,
|
||||
vendor: this.client.vendor,
|
||||
family: this.client.family,
|
||||
version: this.client.version,
|
||||
maxInputTokens: this.client.maxInputTokens
|
||||
};
|
||||
|
||||
// Log any missing properties for debugging
|
||||
for (const [prop, value] of Object.entries(requiredProps)) {
|
||||
if (!value && value !== 0) {
|
||||
console.warn(`Cline <Language Model API>: Client missing ${prop} property`);
|
||||
}
|
||||
}
|
||||
|
||||
// Construct model ID using available information
|
||||
const modelParts = [
|
||||
this.client.vendor,
|
||||
this.client.family,
|
||||
this.client.version
|
||||
].filter(Boolean);
|
||||
|
||||
const modelId = this.client.id || modelParts.join(SELECTOR_SEPARATOR);
|
||||
|
||||
// Build model info with conservative defaults for missing values
|
||||
const modelInfo: ModelInfo = {
|
||||
maxTokens: -1, // Unlimited tokens by default
|
||||
contextWindow: typeof this.client.maxInputTokens === 'number'
|
||||
? Math.max(0, this.client.maxInputTokens)
|
||||
: openAiModelInfoSaneDefaults.contextWindow,
|
||||
supportsImages: false, // VSCode Language Model API currently doesn't support image inputs
|
||||
supportsPromptCache: true,
|
||||
inputPrice: 0,
|
||||
outputPrice: 0,
|
||||
description: `VSCode Language Model: ${modelId}`
|
||||
};
|
||||
|
||||
return { id: modelId, info: modelInfo };
|
||||
}
|
||||
|
||||
// Fallback when no client is available
|
||||
const fallbackId = this.options.vsCodeLmModelSelector
|
||||
? stringifyVsCodeLmModelSelector(this.options.vsCodeLmModelSelector)
|
||||
: "vscode-lm";
|
||||
|
||||
console.debug('Cline <Language Model API>: No client available, using fallback model info');
|
||||
|
||||
return {
|
||||
id: fallbackId,
|
||||
info: {
|
||||
...openAiModelInfoSaneDefaults,
|
||||
description: `VSCode Language Model (Fallback): ${fallbackId}`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
async completePrompt(prompt: string): Promise<string> {
|
||||
try {
|
||||
const client = await this.getClient();
|
||||
const response = await client.sendRequest([vscode.LanguageModelChatMessage.User(prompt)], {}, new vscode.CancellationTokenSource().token);
|
||||
let result = "";
|
||||
for await (const chunk of response.stream) {
|
||||
if (chunk instanceof vscode.LanguageModelTextPart) {
|
||||
result += chunk.value;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
throw new Error(`VSCode LM completion error: ${error.message}`)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
246
src/api/transform/__tests__/vscode-lm-format.test.ts
Normal file
246
src/api/transform/__tests__/vscode-lm-format.test.ts
Normal file
@@ -0,0 +1,246 @@
|
||||
import { Anthropic } from "@anthropic-ai/sdk";
|
||||
import * as vscode from 'vscode';
|
||||
import { convertToVsCodeLmMessages, convertToAnthropicRole, convertToAnthropicMessage } from '../vscode-lm-format';
|
||||
|
||||
// Mock crypto
|
||||
const mockCrypto = {
|
||||
randomUUID: () => 'test-uuid'
|
||||
};
|
||||
global.crypto = mockCrypto as any;
|
||||
|
||||
// Define types for our mocked classes
|
||||
interface MockLanguageModelTextPart {
|
||||
type: 'text';
|
||||
value: string;
|
||||
}
|
||||
|
||||
interface MockLanguageModelToolCallPart {
|
||||
type: 'tool_call';
|
||||
callId: string;
|
||||
name: string;
|
||||
input: any;
|
||||
}
|
||||
|
||||
interface MockLanguageModelToolResultPart {
|
||||
type: 'tool_result';
|
||||
toolUseId: string;
|
||||
parts: MockLanguageModelTextPart[];
|
||||
}
|
||||
|
||||
type MockMessageContent = MockLanguageModelTextPart | MockLanguageModelToolCallPart | MockLanguageModelToolResultPart;
|
||||
|
||||
interface MockLanguageModelChatMessage {
|
||||
role: string;
|
||||
name?: string;
|
||||
content: MockMessageContent[];
|
||||
}
|
||||
|
||||
// Mock vscode namespace
|
||||
jest.mock('vscode', () => {
|
||||
const LanguageModelChatMessageRole = {
|
||||
Assistant: 'assistant',
|
||||
User: 'user'
|
||||
};
|
||||
|
||||
class MockLanguageModelTextPart {
|
||||
type = 'text';
|
||||
constructor(public value: string) {}
|
||||
}
|
||||
|
||||
class MockLanguageModelToolCallPart {
|
||||
type = 'tool_call';
|
||||
constructor(
|
||||
public callId: string,
|
||||
public name: string,
|
||||
public input: any
|
||||
) {}
|
||||
}
|
||||
|
||||
class MockLanguageModelToolResultPart {
|
||||
type = 'tool_result';
|
||||
constructor(
|
||||
public toolUseId: string,
|
||||
public parts: MockLanguageModelTextPart[]
|
||||
) {}
|
||||
}
|
||||
|
||||
return {
|
||||
LanguageModelChatMessage: {
|
||||
Assistant: jest.fn((content) => ({
|
||||
role: LanguageModelChatMessageRole.Assistant,
|
||||
name: 'assistant',
|
||||
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
|
||||
})),
|
||||
User: jest.fn((content) => ({
|
||||
role: LanguageModelChatMessageRole.User,
|
||||
name: 'user',
|
||||
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
|
||||
}))
|
||||
},
|
||||
LanguageModelChatMessageRole,
|
||||
LanguageModelTextPart: MockLanguageModelTextPart,
|
||||
LanguageModelToolCallPart: MockLanguageModelToolCallPart,
|
||||
LanguageModelToolResultPart: MockLanguageModelToolResultPart
|
||||
};
|
||||
});
|
||||
|
||||
describe('vscode-lm-format', () => {
|
||||
describe('convertToVsCodeLmMessages', () => {
|
||||
it('should convert simple string messages', () => {
|
||||
const messages: Anthropic.Messages.MessageParam[] = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'assistant', content: 'Hi there' }
|
||||
];
|
||||
|
||||
const result = convertToVsCodeLmMessages(messages);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].role).toBe('user');
|
||||
expect((result[0].content[0] as MockLanguageModelTextPart).value).toBe('Hello');
|
||||
expect(result[1].role).toBe('assistant');
|
||||
expect((result[1].content[0] as MockLanguageModelTextPart).value).toBe('Hi there');
|
||||
});
|
||||
|
||||
it('should handle complex user messages with tool results', () => {
|
||||
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Here is the result:' },
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: 'tool-1',
|
||||
content: 'Tool output'
|
||||
}
|
||||
]
|
||||
}];
|
||||
|
||||
const result = convertToVsCodeLmMessages(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].role).toBe('user');
|
||||
expect(result[0].content).toHaveLength(2);
|
||||
const [toolResult, textContent] = result[0].content as [MockLanguageModelToolResultPart, MockLanguageModelTextPart];
|
||||
expect(toolResult.type).toBe('tool_result');
|
||||
expect(textContent.type).toBe('text');
|
||||
});
|
||||
|
||||
it('should handle complex assistant messages with tool calls', () => {
|
||||
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: 'text', text: 'Let me help you with that.' },
|
||||
{
|
||||
type: 'tool_use',
|
||||
id: 'tool-1',
|
||||
name: 'calculator',
|
||||
input: { operation: 'add', numbers: [2, 2] }
|
||||
}
|
||||
]
|
||||
}];
|
||||
|
||||
const result = convertToVsCodeLmMessages(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].role).toBe('assistant');
|
||||
expect(result[0].content).toHaveLength(2);
|
||||
const [toolCall, textContent] = result[0].content as [MockLanguageModelToolCallPart, MockLanguageModelTextPart];
|
||||
expect(toolCall.type).toBe('tool_call');
|
||||
expect(textContent.type).toBe('text');
|
||||
});
|
||||
|
||||
it('should handle image blocks with appropriate placeholders', () => {
|
||||
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Look at this:' },
|
||||
{
|
||||
type: 'image',
|
||||
source: {
|
||||
type: 'base64',
|
||||
media_type: 'image/png',
|
||||
data: 'base64data'
|
||||
}
|
||||
}
|
||||
]
|
||||
}];
|
||||
|
||||
const result = convertToVsCodeLmMessages(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
const imagePlaceholder = result[0].content[1] as MockLanguageModelTextPart;
|
||||
expect(imagePlaceholder.value).toContain('[Image (base64): image/png not supported by VSCode LM API]');
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertToAnthropicRole', () => {
|
||||
it('should convert assistant role correctly', () => {
|
||||
const result = convertToAnthropicRole('assistant' as any);
|
||||
expect(result).toBe('assistant');
|
||||
});
|
||||
|
||||
it('should convert user role correctly', () => {
|
||||
const result = convertToAnthropicRole('user' as any);
|
||||
expect(result).toBe('user');
|
||||
});
|
||||
|
||||
it('should return null for unknown roles', () => {
|
||||
const result = convertToAnthropicRole('unknown' as any);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertToAnthropicMessage', () => {
|
||||
it('should convert assistant message with text content', async () => {
|
||||
const vsCodeMessage = {
|
||||
role: 'assistant',
|
||||
name: 'assistant',
|
||||
content: [new vscode.LanguageModelTextPart('Hello')]
|
||||
};
|
||||
|
||||
const result = await convertToAnthropicMessage(vsCodeMessage as any);
|
||||
|
||||
expect(result.role).toBe('assistant');
|
||||
expect(result.content).toHaveLength(1);
|
||||
expect(result.content[0]).toEqual({
|
||||
type: 'text',
|
||||
text: 'Hello'
|
||||
});
|
||||
expect(result.id).toBe('test-uuid');
|
||||
});
|
||||
|
||||
it('should convert assistant message with tool calls', async () => {
|
||||
const vsCodeMessage = {
|
||||
role: 'assistant',
|
||||
name: 'assistant',
|
||||
content: [new vscode.LanguageModelToolCallPart(
|
||||
'call-1',
|
||||
'calculator',
|
||||
{ operation: 'add', numbers: [2, 2] }
|
||||
)]
|
||||
};
|
||||
|
||||
const result = await convertToAnthropicMessage(vsCodeMessage as any);
|
||||
|
||||
expect(result.content).toHaveLength(1);
|
||||
expect(result.content[0]).toEqual({
|
||||
type: 'tool_use',
|
||||
id: 'call-1',
|
||||
name: 'calculator',
|
||||
input: { operation: 'add', numbers: [2, 2] }
|
||||
});
|
||||
expect(result.id).toBe('test-uuid');
|
||||
});
|
||||
|
||||
it('should throw error for non-assistant messages', async () => {
|
||||
const vsCodeMessage = {
|
||||
role: 'user',
|
||||
name: 'user',
|
||||
content: [new vscode.LanguageModelTextPart('Hello')]
|
||||
};
|
||||
|
||||
await expect(convertToAnthropicMessage(vsCodeMessage as any))
|
||||
.rejects
|
||||
.toThrow('Cline <Language Model API>: Only assistant messages are supported.');
|
||||
});
|
||||
});
|
||||
});
|
||||
209
src/api/transform/vscode-lm-format.ts
Normal file
209
src/api/transform/vscode-lm-format.ts
Normal file
@@ -0,0 +1,209 @@
|
||||
import { Anthropic } from "@anthropic-ai/sdk";
|
||||
import * as vscode from 'vscode';
|
||||
|
||||
/**
|
||||
* Safely converts a value into a plain object.
|
||||
*/
|
||||
function asObjectSafe(value: any): object {
|
||||
// Handle null/undefined
|
||||
if (!value) {
|
||||
return {};
|
||||
}
|
||||
|
||||
try {
|
||||
// Handle strings that might be JSON
|
||||
if (typeof value === 'string') {
|
||||
return JSON.parse(value);
|
||||
}
|
||||
|
||||
// Handle pre-existing objects
|
||||
if (typeof value === 'object') {
|
||||
return Object.assign({}, value);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
catch (error) {
|
||||
console.warn('Cline <Language Model API>: Failed to parse object:', error);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
export function convertToVsCodeLmMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): vscode.LanguageModelChatMessage[] {
|
||||
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
|
||||
|
||||
for (const anthropicMessage of anthropicMessages) {
|
||||
// Handle simple string messages
|
||||
if (typeof anthropicMessage.content === "string") {
|
||||
vsCodeLmMessages.push(
|
||||
anthropicMessage.role === "assistant"
|
||||
? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
|
||||
: vscode.LanguageModelChatMessage.User(anthropicMessage.content)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle complex message structures
|
||||
switch (anthropicMessage.role) {
|
||||
case "user": {
|
||||
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
|
||||
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
|
||||
toolMessages: Anthropic.ToolResultBlockParam[];
|
||||
}>(
|
||||
(acc, part) => {
|
||||
if (part.type === "tool_result") {
|
||||
acc.toolMessages.push(part);
|
||||
}
|
||||
else if (part.type === "text" || part.type === "image") {
|
||||
acc.nonToolMessages.push(part);
|
||||
}
|
||||
return acc;
|
||||
},
|
||||
{ nonToolMessages: [], toolMessages: [] },
|
||||
);
|
||||
|
||||
// Process tool messages first then non-tool messages
|
||||
const contentParts = [
|
||||
// Convert tool messages to ToolResultParts
|
||||
...toolMessages.map((toolMessage) => {
|
||||
// Process tool result content into TextParts
|
||||
const toolContentParts: vscode.LanguageModelTextPart[] = (
|
||||
typeof toolMessage.content === "string"
|
||||
? [new vscode.LanguageModelTextPart(toolMessage.content)]
|
||||
: (
|
||||
toolMessage.content?.map((part) => {
|
||||
if (part.type === "image") {
|
||||
return new vscode.LanguageModelTextPart(
|
||||
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
|
||||
);
|
||||
}
|
||||
return new vscode.LanguageModelTextPart(part.text);
|
||||
})
|
||||
?? [new vscode.LanguageModelTextPart("")]
|
||||
)
|
||||
);
|
||||
|
||||
return new vscode.LanguageModelToolResultPart(
|
||||
toolMessage.tool_use_id,
|
||||
toolContentParts
|
||||
);
|
||||
}),
|
||||
|
||||
// Convert non-tool messages to TextParts after tool messages
|
||||
...nonToolMessages.map((part) => {
|
||||
if (part.type === "image") {
|
||||
return new vscode.LanguageModelTextPart(
|
||||
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
|
||||
);
|
||||
}
|
||||
return new vscode.LanguageModelTextPart(part.text);
|
||||
})
|
||||
];
|
||||
|
||||
// Add single user message with all content parts
|
||||
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
|
||||
break;
|
||||
}
|
||||
|
||||
case "assistant": {
|
||||
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
|
||||
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
|
||||
toolMessages: Anthropic.ToolUseBlockParam[];
|
||||
}>(
|
||||
(acc, part) => {
|
||||
if (part.type === "tool_use") {
|
||||
acc.toolMessages.push(part);
|
||||
}
|
||||
else if (part.type === "text" || part.type === "image") {
|
||||
acc.nonToolMessages.push(part);
|
||||
}
|
||||
return acc;
|
||||
},
|
||||
{ nonToolMessages: [], toolMessages: [] },
|
||||
);
|
||||
|
||||
// Process tool messages first then non-tool messages
|
||||
const contentParts = [
|
||||
// Convert tool messages to ToolCallParts first
|
||||
...toolMessages.map((toolMessage) =>
|
||||
new vscode.LanguageModelToolCallPart(
|
||||
toolMessage.id,
|
||||
toolMessage.name,
|
||||
asObjectSafe(toolMessage.input)
|
||||
)
|
||||
),
|
||||
|
||||
// Convert non-tool messages to TextParts after tool messages
|
||||
...nonToolMessages.map((part) => {
|
||||
if (part.type === "image") {
|
||||
return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
|
||||
}
|
||||
return new vscode.LanguageModelTextPart(part.text);
|
||||
})
|
||||
];
|
||||
|
||||
// Add the assistant message to the list of messages
|
||||
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vsCodeLmMessages;
|
||||
}
|
||||
|
||||
export function convertToAnthropicRole(vsCodeLmMessageRole: vscode.LanguageModelChatMessageRole): string | null {
|
||||
switch (vsCodeLmMessageRole) {
|
||||
case vscode.LanguageModelChatMessageRole.Assistant:
|
||||
return "assistant";
|
||||
case vscode.LanguageModelChatMessageRole.User:
|
||||
return "user";
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export async function convertToAnthropicMessage(vsCodeLmMessage: vscode.LanguageModelChatMessage): Promise<Anthropic.Messages.Message> {
|
||||
const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
|
||||
if (anthropicRole !== "assistant") {
|
||||
throw new Error("Cline <Language Model API>: Only assistant messages are supported.");
|
||||
}
|
||||
|
||||
return {
|
||||
id: crypto.randomUUID(),
|
||||
type: "message",
|
||||
model: "vscode-lm",
|
||||
role: anthropicRole,
|
||||
content: (
|
||||
vsCodeLmMessage.content
|
||||
.map((part): Anthropic.ContentBlock | null => {
|
||||
if (part instanceof vscode.LanguageModelTextPart) {
|
||||
return {
|
||||
type: "text",
|
||||
text: part.value
|
||||
};
|
||||
}
|
||||
|
||||
if (part instanceof vscode.LanguageModelToolCallPart) {
|
||||
return {
|
||||
type: "tool_use",
|
||||
id: part.callId || crypto.randomUUID(),
|
||||
name: part.name,
|
||||
input: asObjectSafe(part.input)
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
})
|
||||
.filter(
|
||||
(part): part is Anthropic.ContentBlock => part !== null
|
||||
)
|
||||
),
|
||||
stop_reason: null,
|
||||
stop_sequence: null,
|
||||
usage: {
|
||||
input_tokens: 0,
|
||||
output_tokens: 0,
|
||||
}
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user