mirror of
https://github.com/pacnpal/Roo-Code.git
synced 2025-12-20 04:11:10 -05:00
feat(vscode-lm): implement VS Code Language Models provider
This commit is contained in:
1319
docs/vscode_lm_api_docs.md
Normal file
1319
docs/vscode_lm_api_docs.md
Normal file
File diff suppressed because it is too large
Load Diff
19
package.json
19
package.json
@@ -42,7 +42,10 @@
|
|||||||
"ai",
|
"ai",
|
||||||
"llama"
|
"llama"
|
||||||
],
|
],
|
||||||
"activationEvents": [],
|
"activationEvents": [
|
||||||
|
"onLanguage",
|
||||||
|
"onStartupFinished"
|
||||||
|
],
|
||||||
"main": "./dist/extension.js",
|
"main": "./dist/extension.js",
|
||||||
"contributes": {
|
"contributes": {
|
||||||
"viewsContainers": {
|
"viewsContainers": {
|
||||||
@@ -141,6 +144,20 @@
|
|||||||
"git show"
|
"git show"
|
||||||
],
|
],
|
||||||
"description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled"
|
"description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled"
|
||||||
|
},
|
||||||
|
"roo-cline.vsCodeLmModelSelector": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"vendor": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The vendor of the language model (e.g. copilot)"
|
||||||
|
},
|
||||||
|
"family": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The family of the language model (e.g. gpt-4)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Settings for VSCode Language Model API"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,41 +10,45 @@ import { LmStudioHandler } from "./providers/lmstudio"
|
|||||||
import { GeminiHandler } from "./providers/gemini"
|
import { GeminiHandler } from "./providers/gemini"
|
||||||
import { OpenAiNativeHandler } from "./providers/openai-native"
|
import { OpenAiNativeHandler } from "./providers/openai-native"
|
||||||
import { DeepSeekHandler } from "./providers/deepseek"
|
import { DeepSeekHandler } from "./providers/deepseek"
|
||||||
|
import { VsCodeLmHandler } from "./providers/vscode-lm"
|
||||||
import { ApiStream } from "./transform/stream"
|
import { ApiStream } from "./transform/stream"
|
||||||
|
|
||||||
export interface SingleCompletionHandler {
|
export interface SingleCompletionHandler {
|
||||||
completePrompt(prompt: string): Promise<string>
|
completePrompt(prompt: string): Promise<string>
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ApiHandler {
|
export interface ApiHandler {
|
||||||
createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
|
createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
|
||||||
getModel(): { id: string; info: ModelInfo }
|
getModel(): { id: string; info: ModelInfo }
|
||||||
}
|
}
|
||||||
|
|
||||||
export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
|
export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
|
||||||
const { apiProvider, ...options } = configuration
|
const { apiProvider, ...options } = configuration
|
||||||
switch (apiProvider) {
|
|
||||||
case "anthropic":
|
switch (apiProvider) {
|
||||||
return new AnthropicHandler(options)
|
case "anthropic":
|
||||||
case "openrouter":
|
return new AnthropicHandler(options)
|
||||||
return new OpenRouterHandler(options)
|
case "openrouter":
|
||||||
case "bedrock":
|
return new OpenRouterHandler(options)
|
||||||
return new AwsBedrockHandler(options)
|
case "bedrock":
|
||||||
case "vertex":
|
return new AwsBedrockHandler(options)
|
||||||
return new VertexHandler(options)
|
case "vertex":
|
||||||
case "openai":
|
return new VertexHandler(options)
|
||||||
return new OpenAiHandler(options)
|
case "openai":
|
||||||
case "ollama":
|
return new OpenAiHandler(options)
|
||||||
return new OllamaHandler(options)
|
case "ollama":
|
||||||
case "lmstudio":
|
return new OllamaHandler(options)
|
||||||
return new LmStudioHandler(options)
|
case "lmstudio":
|
||||||
case "gemini":
|
return new LmStudioHandler(options)
|
||||||
return new GeminiHandler(options)
|
case "gemini":
|
||||||
case "openai-native":
|
return new GeminiHandler(options)
|
||||||
return new OpenAiNativeHandler(options)
|
case "openai-native":
|
||||||
case "deepseek":
|
return new OpenAiNativeHandler(options)
|
||||||
return new DeepSeekHandler(options)
|
case "deepseek":
|
||||||
default:
|
return new DeepSeekHandler(options)
|
||||||
return new AnthropicHandler(options)
|
case "vscode-lm":
|
||||||
}
|
return new VsCodeLmHandler(options)
|
||||||
|
default:
|
||||||
|
return new AnthropicHandler(options)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
569
src/api/providers/vscode-lm.ts
Normal file
569
src/api/providers/vscode-lm.ts
Normal file
@@ -0,0 +1,569 @@
|
|||||||
|
import { Anthropic } from "@anthropic-ai/sdk";
|
||||||
|
import * as vscode from 'vscode';
|
||||||
|
import { ApiHandler, SingleCompletionHandler } from "../";
|
||||||
|
import { calculateApiCost } from "../../utils/cost";
|
||||||
|
import { ApiStream } from "../transform/stream";
|
||||||
|
import { convertToVsCodeLmMessages } from "../transform/vscode-lm-format";
|
||||||
|
import { SELECTOR_SEPARATOR, stringifyVsCodeLmModelSelector } from "../../shared/vsCodeSelectorUtils";
|
||||||
|
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles interaction with VS Code's Language Model API for chat-based operations.
|
||||||
|
* This handler implements the ApiHandler interface to provide VS Code LM specific functionality.
|
||||||
|
*
|
||||||
|
* @implements {ApiHandler}
|
||||||
|
*
|
||||||
|
* @remarks
|
||||||
|
* The handler manages a VS Code language model chat client and provides methods to:
|
||||||
|
* - Create and manage chat client instances
|
||||||
|
* - Stream messages using VS Code's Language Model API
|
||||||
|
* - Retrieve model information
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const options = {
|
||||||
|
* vsCodeLmModelSelector: { vendor: "copilot", family: "gpt-4" }
|
||||||
|
* };
|
||||||
|
* const handler = new VsCodeLmHandler(options);
|
||||||
|
*
|
||||||
|
* // Stream a conversation
|
||||||
|
* const systemPrompt = "You are a helpful assistant";
|
||||||
|
* const messages = [{ role: "user", content: "Hello!" }];
|
||||||
|
* for await (const chunk of handler.createMessage(systemPrompt, messages)) {
|
||||||
|
* console.log(chunk);
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export class VsCodeLmHandler implements ApiHandler, SingleCompletionHandler {
|
||||||
|
|
||||||
|
private options: ApiHandlerOptions;
|
||||||
|
private client: vscode.LanguageModelChat | null;
|
||||||
|
private disposable: vscode.Disposable | null;
|
||||||
|
private currentRequestCancellation: vscode.CancellationTokenSource | null;
|
||||||
|
|
||||||
|
constructor(options: ApiHandlerOptions) {
|
||||||
|
this.options = options;
|
||||||
|
this.client = null;
|
||||||
|
this.disposable = null;
|
||||||
|
this.currentRequestCancellation = null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Listen for model changes and reset client
|
||||||
|
this.disposable = vscode.workspace.onDidChangeConfiguration(event => {
|
||||||
|
|
||||||
|
if (event.affectsConfiguration('lm')) {
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
this.client = null;
|
||||||
|
this.ensureCleanState();
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
|
||||||
|
console.error('Error during configuration change cleanup:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
|
||||||
|
// Ensure cleanup if constructor fails
|
||||||
|
this.dispose();
|
||||||
|
|
||||||
|
throw new Error(
|
||||||
|
`Cline <Language Model API>: Failed to initialize handler: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a language model chat client based on the provided selector.
|
||||||
|
*
|
||||||
|
* @param selector - Selector criteria to filter language model chat instances
|
||||||
|
* @returns Promise resolving to the first matching language model chat instance
|
||||||
|
* @throws Error when no matching models are found with the given selector
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const selector = { vendor: "copilot", family: "gpt-4o" };
|
||||||
|
* const chatClient = await createClient(selector);
|
||||||
|
*/
|
||||||
|
async createClient(selector: vscode.LanguageModelChatSelector): Promise<vscode.LanguageModelChat> {
|
||||||
|
try {
|
||||||
|
const models = await vscode.lm.selectChatModels(selector);
|
||||||
|
|
||||||
|
// Use first available model or create a minimal model object
|
||||||
|
if (models && Array.isArray(models) && models.length > 0) {
|
||||||
|
return models[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a minimal model if no models are available
|
||||||
|
return {
|
||||||
|
id: 'default-lm',
|
||||||
|
name: 'Default Language Model',
|
||||||
|
vendor: 'vscode',
|
||||||
|
family: 'lm',
|
||||||
|
version: '1.0',
|
||||||
|
maxInputTokens: 8192,
|
||||||
|
sendRequest: async (messages, options, token) => {
|
||||||
|
// Provide a minimal implementation
|
||||||
|
return {
|
||||||
|
stream: (async function* () {
|
||||||
|
yield new vscode.LanguageModelTextPart(
|
||||||
|
"Language model functionality is limited. Please check VS Code configuration."
|
||||||
|
);
|
||||||
|
})(),
|
||||||
|
text: (async function* () {
|
||||||
|
yield "Language model functionality is limited. Please check VS Code configuration.";
|
||||||
|
})()
|
||||||
|
};
|
||||||
|
},
|
||||||
|
countTokens: async () => 0
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
throw new Error(`Cline <Language Model API>: Failed to select model: ${errorMessage}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates and streams a message using the VS Code Language Model API.
|
||||||
|
*
|
||||||
|
* @param systemPrompt - The system prompt to initialize the conversation context
|
||||||
|
* @param messages - An array of message parameters following the Anthropic message format
|
||||||
|
*
|
||||||
|
* @yields {ApiStream} An async generator that yields either text chunks or tool calls from the model response
|
||||||
|
*
|
||||||
|
* @throws {Error} When vsCodeLmModelSelector option is not provided
|
||||||
|
* @throws {Error} When the response stream encounters an error
|
||||||
|
*
|
||||||
|
* @remarks
|
||||||
|
* This method handles the initialization of the VS Code LM client if not already created,
|
||||||
|
* converts the messages to VS Code LM format, and streams the response chunks.
|
||||||
|
* Tool calls handling is currently a work in progress.
|
||||||
|
*/
|
||||||
|
dispose(): void {
|
||||||
|
|
||||||
|
if (this.disposable) {
|
||||||
|
|
||||||
|
this.disposable.dispose();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.currentRequestCancellation) {
|
||||||
|
|
||||||
|
this.currentRequestCancellation.cancel();
|
||||||
|
this.currentRequestCancellation.dispose();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async countTokens(text: string | vscode.LanguageModelChatMessage): Promise<number> {
|
||||||
|
// Check for required dependencies
|
||||||
|
if (!this.client) {
|
||||||
|
console.warn('Cline <Language Model API>: No client available for token counting');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.currentRequestCancellation) {
|
||||||
|
console.warn('Cline <Language Model API>: No cancellation token available for token counting');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate input
|
||||||
|
if (!text) {
|
||||||
|
console.debug('Cline <Language Model API>: Empty text provided for token counting');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Handle different input types
|
||||||
|
let tokenCount: number;
|
||||||
|
|
||||||
|
if (typeof text === 'string') {
|
||||||
|
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
|
||||||
|
} else if (text instanceof vscode.LanguageModelChatMessage) {
|
||||||
|
// For chat messages, ensure we have content
|
||||||
|
if (!text.content || (Array.isArray(text.content) && text.content.length === 0)) {
|
||||||
|
console.debug('Cline <Language Model API>: Empty chat message content');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
|
||||||
|
} else {
|
||||||
|
console.warn('Cline <Language Model API>: Invalid input type for token counting');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate the result
|
||||||
|
if (typeof tokenCount !== 'number') {
|
||||||
|
console.warn('Cline <Language Model API>: Non-numeric token count received:', tokenCount);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tokenCount < 0) {
|
||||||
|
console.warn('Cline <Language Model API>: Negative token count received:', tokenCount);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return tokenCount;
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
// Handle specific error types
|
||||||
|
if (error instanceof vscode.CancellationError) {
|
||||||
|
console.debug('Cline <Language Model API>: Token counting cancelled by user');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
console.warn('Cline <Language Model API>: Token counting failed:', errorMessage);
|
||||||
|
|
||||||
|
// Log additional error details if available
|
||||||
|
if (error instanceof Error && error.stack) {
|
||||||
|
console.debug('Token counting error stack:', error.stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0; // Fallback to prevent stream interruption
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async calculateTotalInputTokens(systemPrompt: string, vsCodeLmMessages: vscode.LanguageModelChatMessage[]): Promise<number> {
|
||||||
|
|
||||||
|
const systemTokens: number = await this.countTokens(systemPrompt);
|
||||||
|
|
||||||
|
const messageTokens: number[] = await Promise.all(
|
||||||
|
vsCodeLmMessages.map(msg => this.countTokens(msg))
|
||||||
|
);
|
||||||
|
|
||||||
|
return systemTokens + messageTokens.reduce(
|
||||||
|
(sum: number, tokens: number): number => sum + tokens, 0
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private ensureCleanState(): void {
|
||||||
|
|
||||||
|
if (this.currentRequestCancellation) {
|
||||||
|
|
||||||
|
this.currentRequestCancellation.cancel();
|
||||||
|
this.currentRequestCancellation.dispose();
|
||||||
|
this.currentRequestCancellation = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async getClient(): Promise<vscode.LanguageModelChat> {
|
||||||
|
if (!this.client) {
|
||||||
|
console.debug('Cline <Language Model API>: Getting client with options:', {
|
||||||
|
vsCodeLmModelSelector: this.options.vsCodeLmModelSelector,
|
||||||
|
hasOptions: !!this.options,
|
||||||
|
selectorKeys: this.options.vsCodeLmModelSelector ? Object.keys(this.options.vsCodeLmModelSelector) : []
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Use default empty selector if none provided to get all available models
|
||||||
|
const selector = this.options?.vsCodeLmModelSelector || {};
|
||||||
|
console.debug('Cline <Language Model API>: Creating client with selector:', selector);
|
||||||
|
this.client = await this.createClient(selector);
|
||||||
|
} catch (error) {
|
||||||
|
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
console.error('Cline <Language Model API>: Client creation failed:', message);
|
||||||
|
throw new Error(`Cline <Language Model API>: Failed to create client: ${message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.client;
|
||||||
|
}
|
||||||
|
|
||||||
|
private cleanTerminalOutput(text: string): string {
|
||||||
|
if (!text) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
return text
|
||||||
|
// Нормализуем переносы строк
|
||||||
|
.replace(/\r\n/g, '\n')
|
||||||
|
.replace(/\r/g, '\n')
|
||||||
|
|
||||||
|
// Удаляем ANSI escape sequences
|
||||||
|
.replace(/\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g, '') // Полный набор ANSI sequences
|
||||||
|
.replace(/\x9B[0-?]*[ -/]*[@-~]/g, '') // CSI sequences
|
||||||
|
|
||||||
|
// Удаляем последовательности установки заголовка терминала и прочие OSC sequences
|
||||||
|
.replace(/\x1B\][0-9;]*(?:\x07|\x1B\\)/g, '')
|
||||||
|
|
||||||
|
// Удаляем управляющие символы
|
||||||
|
.replace(/[\x00-\x09\x0B-\x0C\x0E-\x1F\x7F]/g, '')
|
||||||
|
|
||||||
|
// Удаляем escape-последовательности VS Code
|
||||||
|
.replace(/\x1B[PD].*?\x1B\\/g, '') // DCS sequences
|
||||||
|
.replace(/\x1B_.*?\x1B\\/g, '') // APC sequences
|
||||||
|
.replace(/\x1B\^.*?\x1B\\/g, '') // PM sequences
|
||||||
|
.replace(/\x1B\[[\d;]*[HfABCDEFGJKST]/g, '') // Cursor movement and clear screen
|
||||||
|
|
||||||
|
// Удаляем пути Windows и служебную информацию
|
||||||
|
.replace(/^(?:PS )?[A-Z]:\\[^\n]*$/mg, '')
|
||||||
|
.replace(/^;?Cwd=.*$/mg, '')
|
||||||
|
|
||||||
|
// Очищаем экранированные последовательности
|
||||||
|
.replace(/\\x[0-9a-fA-F]{2}/g, '')
|
||||||
|
.replace(/\\u[0-9a-fA-F]{4}/g, '')
|
||||||
|
|
||||||
|
// Финальная очистка
|
||||||
|
.replace(/\n{3,}/g, '\n\n') // Убираем множественные пустые строки
|
||||||
|
.trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
private cleanMessageContent(content: any): any {
|
||||||
|
if (!content) {
|
||||||
|
return content;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof content === 'string') {
|
||||||
|
return this.cleanTerminalOutput(content);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Array.isArray(content)) {
|
||||||
|
return content.map(item => this.cleanMessageContent(item));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof content === 'object') {
|
||||||
|
const cleaned: any = {};
|
||||||
|
for (const [key, value] of Object.entries(content)) {
|
||||||
|
cleaned[key] = this.cleanMessageContent(value);
|
||||||
|
}
|
||||||
|
return cleaned;
|
||||||
|
}
|
||||||
|
|
||||||
|
return content;
|
||||||
|
}
|
||||||
|
|
||||||
|
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
|
||||||
|
|
||||||
|
// Ensure clean state before starting a new request
|
||||||
|
this.ensureCleanState();
|
||||||
|
const client: vscode.LanguageModelChat = await this.getClient();
|
||||||
|
|
||||||
|
// Clean system prompt and messages
|
||||||
|
const cleanedSystemPrompt = this.cleanTerminalOutput(systemPrompt);
|
||||||
|
const cleanedMessages = messages.map(msg => ({
|
||||||
|
...msg,
|
||||||
|
content: this.cleanMessageContent(msg.content)
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Convert Anthropic messages to VS Code LM messages
|
||||||
|
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [
|
||||||
|
vscode.LanguageModelChatMessage.Assistant(cleanedSystemPrompt),
|
||||||
|
...convertToVsCodeLmMessages(cleanedMessages),
|
||||||
|
];
|
||||||
|
|
||||||
|
// Initialize cancellation token for the request
|
||||||
|
this.currentRequestCancellation = new vscode.CancellationTokenSource();
|
||||||
|
|
||||||
|
// Calculate input tokens before starting the stream
|
||||||
|
const totalInputTokens: number = await this.calculateTotalInputTokens(systemPrompt, vsCodeLmMessages);
|
||||||
|
|
||||||
|
// Accumulate the text and count at the end of the stream to reduce token counting overhead.
|
||||||
|
let accumulatedText: string = '';
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
// Create the response stream with minimal required options
|
||||||
|
const requestOptions: vscode.LanguageModelChatRequestOptions = {
|
||||||
|
justification: `Cline would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`
|
||||||
|
};
|
||||||
|
|
||||||
|
// Note: Tool support is currently provided by the VSCode Language Model API directly
|
||||||
|
// Extensions can register tools using vscode.lm.registerTool()
|
||||||
|
|
||||||
|
const response: vscode.LanguageModelChatResponse = await client.sendRequest(
|
||||||
|
vsCodeLmMessages,
|
||||||
|
requestOptions,
|
||||||
|
this.currentRequestCancellation.token
|
||||||
|
);
|
||||||
|
|
||||||
|
// Consume the stream and handle both text and tool call chunks
|
||||||
|
for await (const chunk of response.stream) {
|
||||||
|
if (chunk instanceof vscode.LanguageModelTextPart) {
|
||||||
|
// Validate text part value
|
||||||
|
if (typeof chunk.value !== 'string') {
|
||||||
|
console.warn('Cline <Language Model API>: Invalid text part value received:', chunk.value);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
accumulatedText += chunk.value;
|
||||||
|
yield {
|
||||||
|
type: "text",
|
||||||
|
text: chunk.value,
|
||||||
|
};
|
||||||
|
} else if (chunk instanceof vscode.LanguageModelToolCallPart) {
|
||||||
|
try {
|
||||||
|
// Validate tool call parameters
|
||||||
|
if (!chunk.name || typeof chunk.name !== 'string') {
|
||||||
|
console.warn('Cline <Language Model API>: Invalid tool name received:', chunk.name);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!chunk.callId || typeof chunk.callId !== 'string') {
|
||||||
|
console.warn('Cline <Language Model API>: Invalid tool callId received:', chunk.callId);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure input is a valid object
|
||||||
|
if (!chunk.input || typeof chunk.input !== 'object') {
|
||||||
|
console.warn('Cline <Language Model API>: Invalid tool input received:', chunk.input);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert tool calls to text format with proper error handling
|
||||||
|
const toolCall = {
|
||||||
|
type: "tool_call",
|
||||||
|
name: chunk.name,
|
||||||
|
arguments: chunk.input,
|
||||||
|
callId: chunk.callId
|
||||||
|
};
|
||||||
|
|
||||||
|
const toolCallText = JSON.stringify(toolCall);
|
||||||
|
accumulatedText += toolCallText;
|
||||||
|
|
||||||
|
// Log tool call for debugging
|
||||||
|
console.debug('Cline <Language Model API>: Processing tool call:', {
|
||||||
|
name: chunk.name,
|
||||||
|
callId: chunk.callId,
|
||||||
|
inputSize: JSON.stringify(chunk.input).length
|
||||||
|
});
|
||||||
|
|
||||||
|
yield {
|
||||||
|
type: "text",
|
||||||
|
text: toolCallText,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Cline <Language Model API>: Failed to process tool call:', error);
|
||||||
|
// Continue processing other chunks even if one fails
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.warn('Cline <Language Model API>: Unknown chunk type received:', chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count tokens in the accumulated text after stream completion
|
||||||
|
const totalOutputTokens: number = await this.countTokens(accumulatedText);
|
||||||
|
|
||||||
|
// Report final usage after stream completion
|
||||||
|
yield {
|
||||||
|
type: "usage",
|
||||||
|
inputTokens: totalInputTokens,
|
||||||
|
outputTokens: totalOutputTokens,
|
||||||
|
totalCost: calculateApiCost(
|
||||||
|
this.getModel().info,
|
||||||
|
totalInputTokens,
|
||||||
|
totalOutputTokens
|
||||||
|
)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
catch (error: unknown) {
|
||||||
|
|
||||||
|
this.ensureCleanState();
|
||||||
|
|
||||||
|
if (error instanceof vscode.CancellationError) {
|
||||||
|
|
||||||
|
throw new Error("Cline <Language Model API>: Request cancelled by user");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (error instanceof Error) {
|
||||||
|
console.error('Cline <Language Model API>: Stream error details:', {
|
||||||
|
message: error.message,
|
||||||
|
stack: error.stack,
|
||||||
|
name: error.name
|
||||||
|
});
|
||||||
|
|
||||||
|
// Return original error if it's already an Error instance
|
||||||
|
throw error;
|
||||||
|
} else if (typeof error === 'object' && error !== null) {
|
||||||
|
// Handle error-like objects
|
||||||
|
const errorDetails = JSON.stringify(error, null, 2);
|
||||||
|
console.error('Cline <Language Model API>: Stream error object:', errorDetails);
|
||||||
|
throw new Error(`Cline <Language Model API>: Response stream error: ${errorDetails}`);
|
||||||
|
} else {
|
||||||
|
// Fallback for unknown error types
|
||||||
|
const errorMessage = String(error);
|
||||||
|
console.error('Cline <Language Model API>: Unknown stream error:', errorMessage);
|
||||||
|
throw new Error(`Cline <Language Model API>: Response stream error: ${errorMessage}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return model information based on the current client state
|
||||||
|
getModel(): { id: string; info: ModelInfo; } {
|
||||||
|
if (this.client) {
|
||||||
|
// Validate client properties
|
||||||
|
const requiredProps = {
|
||||||
|
id: this.client.id,
|
||||||
|
vendor: this.client.vendor,
|
||||||
|
family: this.client.family,
|
||||||
|
version: this.client.version,
|
||||||
|
maxInputTokens: this.client.maxInputTokens
|
||||||
|
};
|
||||||
|
|
||||||
|
// Log any missing properties for debugging
|
||||||
|
for (const [prop, value] of Object.entries(requiredProps)) {
|
||||||
|
if (!value && value !== 0) {
|
||||||
|
console.warn(`Cline <Language Model API>: Client missing ${prop} property`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct model ID using available information
|
||||||
|
const modelParts = [
|
||||||
|
this.client.vendor,
|
||||||
|
this.client.family,
|
||||||
|
this.client.version
|
||||||
|
].filter(Boolean);
|
||||||
|
|
||||||
|
const modelId = this.client.id || modelParts.join(SELECTOR_SEPARATOR);
|
||||||
|
|
||||||
|
// Build model info with conservative defaults for missing values
|
||||||
|
const modelInfo: ModelInfo = {
|
||||||
|
maxTokens: -1, // Unlimited tokens by default
|
||||||
|
contextWindow: typeof this.client.maxInputTokens === 'number'
|
||||||
|
? Math.max(0, this.client.maxInputTokens)
|
||||||
|
: openAiModelInfoSaneDefaults.contextWindow,
|
||||||
|
supportsImages: false, // VSCode Language Model API currently doesn't support image inputs
|
||||||
|
supportsPromptCache: true,
|
||||||
|
inputPrice: 0,
|
||||||
|
outputPrice: 0,
|
||||||
|
description: `VSCode Language Model: ${modelId}`
|
||||||
|
};
|
||||||
|
|
||||||
|
return { id: modelId, info: modelInfo };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback when no client is available
|
||||||
|
const fallbackId = this.options.vsCodeLmModelSelector
|
||||||
|
? stringifyVsCodeLmModelSelector(this.options.vsCodeLmModelSelector)
|
||||||
|
: "vscode-lm";
|
||||||
|
|
||||||
|
console.debug('Cline <Language Model API>: No client available, using fallback model info');
|
||||||
|
|
||||||
|
return {
|
||||||
|
id: fallbackId,
|
||||||
|
info: {
|
||||||
|
...openAiModelInfoSaneDefaults,
|
||||||
|
description: `VSCode Language Model (Fallback): ${fallbackId}`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
async completePrompt(prompt: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
const client = await this.getClient();
|
||||||
|
const response = await client.sendRequest([vscode.LanguageModelChatMessage.User(prompt)], {}, new vscode.CancellationTokenSource().token);
|
||||||
|
let result = "";
|
||||||
|
for await (const chunk of response.stream) {
|
||||||
|
if (chunk instanceof vscode.LanguageModelTextPart) {
|
||||||
|
result += chunk.value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`VSCode LM completion error: ${error.message}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
209
src/api/transform/vscode-lm-format.ts
Normal file
209
src/api/transform/vscode-lm-format.ts
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
import { Anthropic } from "@anthropic-ai/sdk";
|
||||||
|
import * as vscode from 'vscode';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Safely converts a value into a plain object.
|
||||||
|
*/
|
||||||
|
function asObjectSafe(value: any): object {
|
||||||
|
// Handle null/undefined
|
||||||
|
if (!value) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Handle strings that might be JSON
|
||||||
|
if (typeof value === 'string') {
|
||||||
|
return JSON.parse(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle pre-existing objects
|
||||||
|
if (typeof value === 'object') {
|
||||||
|
return Object.assign({}, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
console.warn('Cline <Language Model API>: Failed to parse object:', error);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function convertToVsCodeLmMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): vscode.LanguageModelChatMessage[] {
|
||||||
|
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
|
||||||
|
|
||||||
|
for (const anthropicMessage of anthropicMessages) {
|
||||||
|
// Handle simple string messages
|
||||||
|
if (typeof anthropicMessage.content === "string") {
|
||||||
|
vsCodeLmMessages.push(
|
||||||
|
anthropicMessage.role === "assistant"
|
||||||
|
? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
|
||||||
|
: vscode.LanguageModelChatMessage.User(anthropicMessage.content)
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle complex message structures
|
||||||
|
switch (anthropicMessage.role) {
|
||||||
|
case "user": {
|
||||||
|
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
|
||||||
|
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
|
||||||
|
toolMessages: Anthropic.ToolResultBlockParam[];
|
||||||
|
}>(
|
||||||
|
(acc, part) => {
|
||||||
|
if (part.type === "tool_result") {
|
||||||
|
acc.toolMessages.push(part);
|
||||||
|
}
|
||||||
|
else if (part.type === "text" || part.type === "image") {
|
||||||
|
acc.nonToolMessages.push(part);
|
||||||
|
}
|
||||||
|
return acc;
|
||||||
|
},
|
||||||
|
{ nonToolMessages: [], toolMessages: [] },
|
||||||
|
);
|
||||||
|
|
||||||
|
// Process tool messages first then non-tool messages
|
||||||
|
const contentParts = [
|
||||||
|
// Convert tool messages to ToolResultParts
|
||||||
|
...toolMessages.map((toolMessage) => {
|
||||||
|
// Process tool result content into TextParts
|
||||||
|
const toolContentParts: vscode.LanguageModelTextPart[] = (
|
||||||
|
typeof toolMessage.content === "string"
|
||||||
|
? [new vscode.LanguageModelTextPart(toolMessage.content)]
|
||||||
|
: (
|
||||||
|
toolMessage.content?.map((part) => {
|
||||||
|
if (part.type === "image") {
|
||||||
|
return new vscode.LanguageModelTextPart(
|
||||||
|
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return new vscode.LanguageModelTextPart(part.text);
|
||||||
|
})
|
||||||
|
?? [new vscode.LanguageModelTextPart("")]
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
return new vscode.LanguageModelToolResultPart(
|
||||||
|
toolMessage.tool_use_id,
|
||||||
|
toolContentParts
|
||||||
|
);
|
||||||
|
}),
|
||||||
|
|
||||||
|
// Convert non-tool messages to TextParts after tool messages
|
||||||
|
...nonToolMessages.map((part) => {
|
||||||
|
if (part.type === "image") {
|
||||||
|
return new vscode.LanguageModelTextPart(
|
||||||
|
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return new vscode.LanguageModelTextPart(part.text);
|
||||||
|
})
|
||||||
|
];
|
||||||
|
|
||||||
|
// Add single user message with all content parts
|
||||||
|
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case "assistant": {
|
||||||
|
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
|
||||||
|
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
|
||||||
|
toolMessages: Anthropic.ToolUseBlockParam[];
|
||||||
|
}>(
|
||||||
|
(acc, part) => {
|
||||||
|
if (part.type === "tool_use") {
|
||||||
|
acc.toolMessages.push(part);
|
||||||
|
}
|
||||||
|
else if (part.type === "text" || part.type === "image") {
|
||||||
|
acc.nonToolMessages.push(part);
|
||||||
|
}
|
||||||
|
return acc;
|
||||||
|
},
|
||||||
|
{ nonToolMessages: [], toolMessages: [] },
|
||||||
|
);
|
||||||
|
|
||||||
|
// Process tool messages first then non-tool messages
|
||||||
|
const contentParts = [
|
||||||
|
// Convert tool messages to ToolCallParts first
|
||||||
|
...toolMessages.map((toolMessage) =>
|
||||||
|
new vscode.LanguageModelToolCallPart(
|
||||||
|
toolMessage.id,
|
||||||
|
toolMessage.name,
|
||||||
|
asObjectSafe(toolMessage.input)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
|
||||||
|
// Convert non-tool messages to TextParts after tool messages
|
||||||
|
...nonToolMessages.map((part) => {
|
||||||
|
if (part.type === "image") {
|
||||||
|
return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
|
||||||
|
}
|
||||||
|
return new vscode.LanguageModelTextPart(part.text);
|
||||||
|
})
|
||||||
|
];
|
||||||
|
|
||||||
|
// Add the assistant message to the list of messages
|
||||||
|
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return vsCodeLmMessages;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function convertToAnthropicRole(vsCodeLmMessageRole: vscode.LanguageModelChatMessageRole): string | null {
|
||||||
|
switch (vsCodeLmMessageRole) {
|
||||||
|
case vscode.LanguageModelChatMessageRole.Assistant:
|
||||||
|
return "assistant";
|
||||||
|
case vscode.LanguageModelChatMessageRole.User:
|
||||||
|
return "user";
|
||||||
|
default:
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function convertToAnthropicMessage(vsCodeLmMessage: vscode.LanguageModelChatMessage): Promise<Anthropic.Messages.Message> {
|
||||||
|
const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
|
||||||
|
if (anthropicRole !== "assistant") {
|
||||||
|
throw new Error("Cline <Language Model API>: Only assistant messages are supported.");
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
id: crypto.randomUUID(),
|
||||||
|
type: "message",
|
||||||
|
model: "vscode-lm",
|
||||||
|
role: anthropicRole,
|
||||||
|
content: (
|
||||||
|
vsCodeLmMessage.content
|
||||||
|
.map((part): Anthropic.ContentBlock | null => {
|
||||||
|
if (part instanceof vscode.LanguageModelTextPart) {
|
||||||
|
return {
|
||||||
|
type: "text",
|
||||||
|
text: part.value
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (part instanceof vscode.LanguageModelToolCallPart) {
|
||||||
|
return {
|
||||||
|
type: "tool_use",
|
||||||
|
id: part.callId || crypto.randomUUID(),
|
||||||
|
name: part.name,
|
||||||
|
input: asObjectSafe(part.input)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
})
|
||||||
|
.filter(
|
||||||
|
(part): part is Anthropic.ContentBlock => part !== null
|
||||||
|
)
|
||||||
|
),
|
||||||
|
stop_reason: null,
|
||||||
|
stop_sequence: null,
|
||||||
|
usage: {
|
||||||
|
input_tokens: 0,
|
||||||
|
output_tokens: 0,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -41,6 +41,7 @@ type SecretKey =
|
|||||||
| "geminiApiKey"
|
| "geminiApiKey"
|
||||||
| "openAiNativeApiKey"
|
| "openAiNativeApiKey"
|
||||||
| "deepSeekApiKey"
|
| "deepSeekApiKey"
|
||||||
|
|
||||||
type GlobalStateKey =
|
type GlobalStateKey =
|
||||||
| "apiProvider"
|
| "apiProvider"
|
||||||
| "apiModelId"
|
| "apiModelId"
|
||||||
@@ -79,6 +80,8 @@ type GlobalStateKey =
|
|||||||
| "writeDelayMs"
|
| "writeDelayMs"
|
||||||
| "terminalOutputLineLimit"
|
| "terminalOutputLineLimit"
|
||||||
| "mcpEnabled"
|
| "mcpEnabled"
|
||||||
|
| "vsCodeLmModelSelector"
|
||||||
|
|
||||||
export const GlobalFileNames = {
|
export const GlobalFileNames = {
|
||||||
apiConversationHistory: "api_conversation_history.json",
|
apiConversationHistory: "api_conversation_history.json",
|
||||||
uiMessages: "ui_messages.json",
|
uiMessages: "ui_messages.json",
|
||||||
@@ -228,7 +231,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
diffEnabled,
|
diffEnabled,
|
||||||
fuzzyMatchThreshold
|
fuzzyMatchThreshold
|
||||||
} = await this.getState()
|
} = await this.getState()
|
||||||
|
|
||||||
this.cline = new Cline(
|
this.cline = new Cline(
|
||||||
this,
|
this,
|
||||||
apiConfiguration,
|
apiConfiguration,
|
||||||
@@ -248,7 +251,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
diffEnabled,
|
diffEnabled,
|
||||||
fuzzyMatchThreshold
|
fuzzyMatchThreshold
|
||||||
} = await this.getState()
|
} = await this.getState()
|
||||||
|
|
||||||
this.cline = new Cline(
|
this.cline = new Cline(
|
||||||
this,
|
this,
|
||||||
apiConfiguration,
|
apiConfiguration,
|
||||||
@@ -314,15 +317,15 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
|
|
||||||
// Use a nonce to only allow a specific script to be run.
|
// Use a nonce to only allow a specific script to be run.
|
||||||
/*
|
/*
|
||||||
content security policy of your webview to only allow scripts that have a specific nonce
|
content security policy of your webview to only allow scripts that have a specific nonce
|
||||||
create a content security policy meta tag so that only loading scripts with a nonce is allowed
|
create a content security policy meta tag so that only loading scripts with a nonce is allowed
|
||||||
As your extension grows you will likely want to add custom styles, fonts, and/or images to your webview. If you do, you will need to update the content security policy meta tag to explicity allow for these resources. E.g.
|
As your extension grows you will likely want to add custom styles, fonts, and/or images to your webview. If you do, you will need to update the content security policy meta tag to explicity allow for these resources. E.g.
|
||||||
<meta http-equiv="Content-Security-Policy" content="default-src 'none'; style-src ${webview.cspSource}; font-src ${webview.cspSource}; img-src ${webview.cspSource} https:; script-src 'nonce-${nonce}';">
|
<meta http-equiv="Content-Security-Policy" content="default-src 'none'; style-src ${webview.cspSource}; font-src ${webview.cspSource}; img-src ${webview.cspSource} https:; script-src 'nonce-${nonce}';">
|
||||||
- 'unsafe-inline' is required for styles due to vscode-webview-toolkit's dynamic style injection
|
- 'unsafe-inline' is required for styles due to vscode-webview-toolkit's dynamic style injection
|
||||||
- since we pass base64 images to the webview, we need to specify img-src ${webview.cspSource} data:;
|
- since we pass base64 images to the webview, we need to specify img-src ${webview.cspSource} data:;
|
||||||
|
|
||||||
in meta tag we add nonce attribute: A cryptographic nonce (only used once) to allow scripts. The server must generate a unique nonce value each time it transmits a policy. It is critical to provide a nonce that cannot be guessed as bypassing a resource's policy is otherwise trivial.
|
in meta tag we add nonce attribute: A cryptographic nonce (only used once) to allow scripts. The server must generate a unique nonce value each time it transmits a policy. It is critical to provide a nonce that cannot be guessed as bypassing a resource's policy is otherwise trivial.
|
||||||
*/
|
*/
|
||||||
const nonce = getNonce()
|
const nonce = getNonce()
|
||||||
|
|
||||||
// Tip: Install the es6-string-html VS Code extension to enable code highlighting below
|
// Tip: Install the es6-string-html VS Code extension to enable code highlighting below
|
||||||
@@ -426,6 +429,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
openRouterModelId,
|
openRouterModelId,
|
||||||
openRouterModelInfo,
|
openRouterModelInfo,
|
||||||
openRouterUseMiddleOutTransform,
|
openRouterUseMiddleOutTransform,
|
||||||
|
vsCodeLmModelSelector,
|
||||||
} = message.apiConfiguration
|
} = message.apiConfiguration
|
||||||
await this.updateGlobalState("apiProvider", apiProvider)
|
await this.updateGlobalState("apiProvider", apiProvider)
|
||||||
await this.updateGlobalState("apiModelId", apiModelId)
|
await this.updateGlobalState("apiModelId", apiModelId)
|
||||||
@@ -454,6 +458,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
await this.updateGlobalState("openRouterModelId", openRouterModelId)
|
await this.updateGlobalState("openRouterModelId", openRouterModelId)
|
||||||
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
|
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
|
||||||
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
|
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
|
||||||
|
await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
|
||||||
if (this.cline) {
|
if (this.cline) {
|
||||||
this.cline.api = buildApiHandler(message.apiConfiguration)
|
this.cline.api = buildApiHandler(message.apiConfiguration)
|
||||||
}
|
}
|
||||||
@@ -525,6 +530,10 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
const lmStudioModels = await this.getLmStudioModels(message.text)
|
const lmStudioModels = await this.getLmStudioModels(message.text)
|
||||||
this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels })
|
this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels })
|
||||||
break
|
break
|
||||||
|
case "requestVsCodeLmModels":
|
||||||
|
const vsCodeLmModels = await this.getVsCodeLmModels()
|
||||||
|
this.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
|
||||||
|
break
|
||||||
case "refreshOpenRouterModels":
|
case "refreshOpenRouterModels":
|
||||||
await this.refreshOpenRouterModels()
|
await this.refreshOpenRouterModels()
|
||||||
break
|
break
|
||||||
@@ -532,7 +541,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
if (message?.values?.baseUrl && message?.values?.apiKey) {
|
if (message?.values?.baseUrl && message?.values?.apiKey) {
|
||||||
const openAiModels = await this.getOpenAiModels(message?.values?.baseUrl, message?.values?.apiKey)
|
const openAiModels = await this.getOpenAiModels(message?.values?.baseUrl, message?.values?.apiKey)
|
||||||
this.postMessageToWebview({ type: "openAiModels", openAiModels })
|
this.postMessageToWebview({ type: "openAiModels", openAiModels })
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
case "openImage":
|
case "openImage":
|
||||||
openImage(message.text!)
|
openImage(message.text!)
|
||||||
@@ -664,7 +673,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
)
|
)
|
||||||
if (answer === "Yes" && this.cline && typeof message.value === 'number' && message.value) {
|
if (answer === "Yes" && this.cline && typeof message.value === 'number' && message.value) {
|
||||||
const timeCutoff = message.value - 1000; // 1 second buffer before the message to delete
|
const timeCutoff = message.value - 1000; // 1 second buffer before the message to delete
|
||||||
const messageIndex = this.cline.clineMessages.findIndex(msg => msg.ts && msg.ts >= timeCutoff)
|
const messageIndex = this.cline.clineMessages.findIndex(msg => msg.ts && msg.ts >= timeCutoff)
|
||||||
const apiConversationHistoryIndex = this.cline.apiConversationHistory.findIndex(msg => msg.ts && msg.ts >= timeCutoff)
|
const apiConversationHistoryIndex = this.cline.apiConversationHistory.findIndex(msg => msg.ts && msg.ts >= timeCutoff)
|
||||||
if (messageIndex !== -1) {
|
if (messageIndex !== -1) {
|
||||||
const { historyItem } = await this.getTaskWithId(this.cline.taskId)
|
const { historyItem } = await this.getTaskWithId(this.cline.taskId)
|
||||||
@@ -773,6 +782,17 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VSCode LM API
|
||||||
|
private async getVsCodeLmModels() {
|
||||||
|
try {
|
||||||
|
const models = await vscode.lm.selectChatModels({});
|
||||||
|
return models || [];
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error fetching VS Code LM models:', error);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// OpenAi
|
// OpenAi
|
||||||
|
|
||||||
async getOpenAiModels(baseUrl?: string, apiKey?: string) {
|
async getOpenAiModels(baseUrl?: string, apiKey?: string) {
|
||||||
@@ -1042,9 +1062,9 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async getStateToPostToWebview() {
|
async getStateToPostToWebview() {
|
||||||
const {
|
const {
|
||||||
apiConfiguration,
|
apiConfiguration,
|
||||||
lastShownAnnouncementId,
|
lastShownAnnouncementId,
|
||||||
customInstructions,
|
customInstructions,
|
||||||
alwaysAllowReadOnly,
|
alwaysAllowReadOnly,
|
||||||
alwaysAllowWrite,
|
alwaysAllowWrite,
|
||||||
@@ -1063,7 +1083,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
fuzzyMatchThreshold,
|
fuzzyMatchThreshold,
|
||||||
mcpEnabled,
|
mcpEnabled,
|
||||||
} = await this.getState()
|
} = await this.getState()
|
||||||
|
|
||||||
|
|
||||||
const allowedCommands = vscode.workspace
|
const allowedCommands = vscode.workspace
|
||||||
.getConfiguration('roo-cline')
|
.getConfiguration('roo-cline')
|
||||||
.get<string[]>('allowedCommands') || []
|
.get<string[]>('allowedCommands') || []
|
||||||
@@ -1196,6 +1217,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
screenshotQuality,
|
screenshotQuality,
|
||||||
terminalOutputLineLimit,
|
terminalOutputLineLimit,
|
||||||
mcpEnabled,
|
mcpEnabled,
|
||||||
|
vsCodeLmModelSelector,
|
||||||
] = await Promise.all([
|
] = await Promise.all([
|
||||||
this.getGlobalState("apiProvider") as Promise<ApiProvider | undefined>,
|
this.getGlobalState("apiProvider") as Promise<ApiProvider | undefined>,
|
||||||
this.getGlobalState("apiModelId") as Promise<string | undefined>,
|
this.getGlobalState("apiModelId") as Promise<string | undefined>,
|
||||||
@@ -1243,6 +1265,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
this.getGlobalState("screenshotQuality") as Promise<number | undefined>,
|
this.getGlobalState("screenshotQuality") as Promise<number | undefined>,
|
||||||
this.getGlobalState("terminalOutputLineLimit") as Promise<number | undefined>,
|
this.getGlobalState("terminalOutputLineLimit") as Promise<number | undefined>,
|
||||||
this.getGlobalState("mcpEnabled") as Promise<boolean | undefined>,
|
this.getGlobalState("mcpEnabled") as Promise<boolean | undefined>,
|
||||||
|
this.getGlobalState("vsCodeLmModelSelector") as Promise<vscode.LanguageModelChatSelector | undefined>,
|
||||||
])
|
])
|
||||||
|
|
||||||
let apiProvider: ApiProvider
|
let apiProvider: ApiProvider
|
||||||
@@ -1288,6 +1311,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
openRouterModelId,
|
openRouterModelId,
|
||||||
openRouterModelInfo,
|
openRouterModelInfo,
|
||||||
openRouterUseMiddleOutTransform,
|
openRouterUseMiddleOutTransform,
|
||||||
|
vsCodeLmModelSelector,
|
||||||
},
|
},
|
||||||
lastShownAnnouncementId,
|
lastShownAnnouncementId,
|
||||||
customInstructions,
|
customInstructions,
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ export function activate(context: vscode.ExtensionContext) {
|
|||||||
context.globalState.update('allowedCommands', defaultCommands);
|
context.globalState.update('allowedCommands', defaultCommands);
|
||||||
}
|
}
|
||||||
|
|
||||||
const sidebarProvider = new ClineProvider(context, outputChannel)
|
const sidebarProvider = new ClineProvider(context, outputChannel);
|
||||||
|
|
||||||
context.subscriptions.push(
|
context.subscriptions.push(
|
||||||
vscode.window.registerWebviewViewProvider(ClineProvider.sideBarId, sidebarProvider, {
|
vscode.window.registerWebviewViewProvider(ClineProvider.sideBarId, sidebarProvider, {
|
||||||
|
|||||||
@@ -6,32 +6,36 @@ import { McpServer } from "./mcp"
|
|||||||
|
|
||||||
// webview will hold state
|
// webview will hold state
|
||||||
export interface ExtensionMessage {
|
export interface ExtensionMessage {
|
||||||
type:
|
type:
|
||||||
| "action"
|
| "action"
|
||||||
| "state"
|
| "state"
|
||||||
| "selectedImages"
|
| "selectedImages"
|
||||||
| "ollamaModels"
|
| "ollamaModels"
|
||||||
| "lmStudioModels"
|
| "lmStudioModels"
|
||||||
| "theme"
|
| "vsCodeLmModels"
|
||||||
| "workspaceUpdated"
|
| "vsCodeLmApiAvailable"
|
||||||
| "invoke"
|
| "requestVsCodeLmModels"
|
||||||
| "partialMessage"
|
| "theme"
|
||||||
| "openRouterModels"
|
| "workspaceUpdated"
|
||||||
| "openAiModels"
|
| "invoke"
|
||||||
| "mcpServers"
|
| "partialMessage"
|
||||||
| "enhancedPrompt"
|
| "openRouterModels"
|
||||||
|
| "openAiModels"
|
||||||
|
| "mcpServers"
|
||||||
|
| "enhancedPrompt"
|
||||||
text?: string
|
text?: string
|
||||||
action?:
|
action?:
|
||||||
| "chatButtonClicked"
|
| "chatButtonClicked"
|
||||||
| "mcpButtonClicked"
|
| "mcpButtonClicked"
|
||||||
| "settingsButtonClicked"
|
| "settingsButtonClicked"
|
||||||
| "historyButtonClicked"
|
| "historyButtonClicked"
|
||||||
| "didBecomeVisible"
|
| "didBecomeVisible"
|
||||||
invoke?: "sendMessage" | "primaryButtonClick" | "secondaryButtonClick"
|
invoke?: "sendMessage" | "primaryButtonClick" | "secondaryButtonClick"
|
||||||
state?: ExtensionState
|
state?: ExtensionState
|
||||||
images?: string[]
|
images?: string[]
|
||||||
ollamaModels?: string[]
|
ollamaModels?: string[]
|
||||||
lmStudioModels?: string[]
|
lmStudioModels?: string[]
|
||||||
|
vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[]
|
||||||
filePaths?: string[]
|
filePaths?: string[]
|
||||||
partialMessage?: ClineMessage
|
partialMessage?: ClineMessage
|
||||||
openRouterModels?: Record<string, ModelInfo>
|
openRouterModels?: Record<string, ModelInfo>
|
||||||
@@ -109,14 +113,14 @@ export type ClineSay =
|
|||||||
|
|
||||||
export interface ClineSayTool {
|
export interface ClineSayTool {
|
||||||
tool:
|
tool:
|
||||||
| "editedExistingFile"
|
| "editedExistingFile"
|
||||||
| "appliedDiff"
|
| "appliedDiff"
|
||||||
| "newFileCreated"
|
| "newFileCreated"
|
||||||
| "readFile"
|
| "readFile"
|
||||||
| "listFilesTopLevel"
|
| "listFilesTopLevel"
|
||||||
| "listFilesRecursive"
|
| "listFilesRecursive"
|
||||||
| "listCodeDefinitionNames"
|
| "listCodeDefinitionNames"
|
||||||
| "searchFiles"
|
| "searchFiles"
|
||||||
path?: string
|
path?: string
|
||||||
diff?: string
|
diff?: string
|
||||||
content?: string
|
content?: string
|
||||||
|
|||||||
@@ -4,52 +4,53 @@ export type AudioType = "notification" | "celebration" | "progress_loop"
|
|||||||
|
|
||||||
export interface WebviewMessage {
|
export interface WebviewMessage {
|
||||||
type:
|
type:
|
||||||
| "apiConfiguration"
|
| "apiConfiguration"
|
||||||
| "customInstructions"
|
| "customInstructions"
|
||||||
| "allowedCommands"
|
| "allowedCommands"
|
||||||
| "alwaysAllowReadOnly"
|
| "alwaysAllowReadOnly"
|
||||||
| "alwaysAllowWrite"
|
| "alwaysAllowWrite"
|
||||||
| "alwaysAllowExecute"
|
| "alwaysAllowExecute"
|
||||||
| "webviewDidLaunch"
|
| "webviewDidLaunch"
|
||||||
| "newTask"
|
| "newTask"
|
||||||
| "askResponse"
|
| "askResponse"
|
||||||
| "clearTask"
|
| "clearTask"
|
||||||
| "didShowAnnouncement"
|
| "didShowAnnouncement"
|
||||||
| "selectImages"
|
| "selectImages"
|
||||||
| "exportCurrentTask"
|
| "exportCurrentTask"
|
||||||
| "showTaskWithId"
|
| "showTaskWithId"
|
||||||
| "deleteTaskWithId"
|
| "deleteTaskWithId"
|
||||||
| "exportTaskWithId"
|
| "exportTaskWithId"
|
||||||
| "resetState"
|
| "resetState"
|
||||||
| "requestOllamaModels"
|
| "requestOllamaModels"
|
||||||
| "requestLmStudioModels"
|
| "requestLmStudioModels"
|
||||||
| "openImage"
|
| "requestVsCodeLmModels"
|
||||||
| "openFile"
|
| "openImage"
|
||||||
| "openMention"
|
| "openFile"
|
||||||
| "cancelTask"
|
| "openMention"
|
||||||
| "refreshOpenRouterModels"
|
| "cancelTask"
|
||||||
| "refreshOpenAiModels"
|
| "refreshOpenRouterModels"
|
||||||
| "alwaysAllowBrowser"
|
| "refreshOpenAiModels"
|
||||||
| "alwaysAllowMcp"
|
| "alwaysAllowBrowser"
|
||||||
| "playSound"
|
| "alwaysAllowMcp"
|
||||||
| "soundEnabled"
|
| "playSound"
|
||||||
| "soundVolume"
|
| "soundEnabled"
|
||||||
| "diffEnabled"
|
| "soundVolume"
|
||||||
| "browserViewportSize"
|
| "diffEnabled"
|
||||||
| "screenshotQuality"
|
| "browserViewportSize"
|
||||||
| "openMcpSettings"
|
| "screenshotQuality"
|
||||||
| "restartMcpServer"
|
| "openMcpSettings"
|
||||||
| "toggleToolAlwaysAllow"
|
| "restartMcpServer"
|
||||||
| "toggleMcpServer"
|
| "toggleToolAlwaysAllow"
|
||||||
| "fuzzyMatchThreshold"
|
| "toggleMcpServer"
|
||||||
| "preferredLanguage"
|
| "fuzzyMatchThreshold"
|
||||||
| "writeDelayMs"
|
| "preferredLanguage"
|
||||||
| "enhancePrompt"
|
| "writeDelayMs"
|
||||||
| "enhancedPrompt"
|
| "enhancePrompt"
|
||||||
| "draggedImages"
|
| "enhancedPrompt"
|
||||||
| "deleteMessage"
|
| "draggedImages"
|
||||||
| "terminalOutputLineLimit"
|
| "deleteMessage"
|
||||||
| "mcpEnabled"
|
| "terminalOutputLineLimit"
|
||||||
|
| "mcpEnabled"
|
||||||
text?: string
|
text?: string
|
||||||
disabled?: boolean
|
disabled?: boolean
|
||||||
askResponse?: ClineAskResponse
|
askResponse?: ClineAskResponse
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
import * as vscode from 'vscode';
|
||||||
|
|
||||||
export type ApiProvider =
|
export type ApiProvider =
|
||||||
| "anthropic"
|
| "anthropic"
|
||||||
| "openrouter"
|
| "openrouter"
|
||||||
@@ -9,11 +11,13 @@ export type ApiProvider =
|
|||||||
| "gemini"
|
| "gemini"
|
||||||
| "openai-native"
|
| "openai-native"
|
||||||
| "deepseek"
|
| "deepseek"
|
||||||
|
| "vscode-lm"
|
||||||
|
|
||||||
export interface ApiHandlerOptions {
|
export interface ApiHandlerOptions {
|
||||||
apiModelId?: string
|
apiModelId?: string
|
||||||
apiKey?: string // anthropic
|
apiKey?: string // anthropic
|
||||||
anthropicBaseUrl?: string
|
anthropicBaseUrl?: string
|
||||||
|
vsCodeLmModelSelector?: vscode.LanguageModelChatSelector
|
||||||
openRouterApiKey?: string
|
openRouterApiKey?: string
|
||||||
openRouterModelId?: string
|
openRouterModelId?: string
|
||||||
openRouterModelInfo?: ModelInfo
|
openRouterModelInfo?: ModelInfo
|
||||||
@@ -47,16 +51,17 @@ export interface ApiHandlerOptions {
|
|||||||
|
|
||||||
export type ApiConfiguration = ApiHandlerOptions & {
|
export type ApiConfiguration = ApiHandlerOptions & {
|
||||||
apiProvider?: ApiProvider
|
apiProvider?: ApiProvider
|
||||||
|
vsCodeLmModelSelector?: vscode.LanguageModelChatSelector;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Models
|
// Models
|
||||||
|
|
||||||
export interface ModelInfo {
|
export interface ModelInfo {
|
||||||
maxTokens?: number
|
maxTokens?: number
|
||||||
contextWindow?: number
|
contextWindow: number
|
||||||
supportsImages?: boolean
|
supportsImages?: boolean
|
||||||
supportsComputerUse?: boolean
|
supportsComputerUse?: boolean
|
||||||
supportsPromptCache: boolean // this value is hardcoded for now
|
supportsPromptCache: boolean
|
||||||
inputPrice?: number
|
inputPrice?: number
|
||||||
outputPrice?: number
|
outputPrice?: number
|
||||||
cacheWritesPrice?: number
|
cacheWritesPrice?: number
|
||||||
@@ -115,24 +120,24 @@ export const anthropicModels = {
|
|||||||
// AWS Bedrock
|
// AWS Bedrock
|
||||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html
|
// https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html
|
||||||
export interface MessageContent {
|
export interface MessageContent {
|
||||||
type: 'text' | 'image' | 'video' | 'tool_use' | 'tool_result';
|
type: 'text' | 'image' | 'video' | 'tool_use' | 'tool_result';
|
||||||
text?: string;
|
text?: string;
|
||||||
source?: {
|
source?: {
|
||||||
type: 'base64';
|
type: 'base64';
|
||||||
data: string | Uint8Array; // string for Anthropic, Uint8Array for Bedrock
|
data: string | Uint8Array; // string for Anthropic, Uint8Array for Bedrock
|
||||||
media_type: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp';
|
media_type: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp';
|
||||||
};
|
};
|
||||||
// Video specific fields
|
// Video specific fields
|
||||||
format?: string;
|
format?: string;
|
||||||
s3Location?: {
|
s3Location?: {
|
||||||
uri: string;
|
uri: string;
|
||||||
bucketOwner?: string;
|
bucketOwner?: string;
|
||||||
};
|
};
|
||||||
// Tool use and result fields
|
// Tool use and result fields
|
||||||
toolUseId?: string;
|
toolUseId?: string;
|
||||||
name?: string;
|
name?: string;
|
||||||
input?: any;
|
input?: any;
|
||||||
output?: any; // Used for tool_result type
|
output?: any; // Used for tool_result type
|
||||||
}
|
}
|
||||||
|
|
||||||
export type BedrockModelId = keyof typeof bedrockModels
|
export type BedrockModelId = keyof typeof bedrockModels
|
||||||
@@ -226,7 +231,7 @@ export const bedrockModels = {
|
|||||||
inputPrice: 0.25,
|
inputPrice: 0.25,
|
||||||
outputPrice: 1.25,
|
outputPrice: 1.25,
|
||||||
},
|
},
|
||||||
"meta.llama3-2-90b-instruct-v1:0" : {
|
"meta.llama3-2-90b-instruct-v1:0": {
|
||||||
maxTokens: 8192,
|
maxTokens: 8192,
|
||||||
contextWindow: 128_000,
|
contextWindow: 128_000,
|
||||||
supportsImages: true,
|
supportsImages: true,
|
||||||
@@ -235,7 +240,7 @@ export const bedrockModels = {
|
|||||||
inputPrice: 0.72,
|
inputPrice: 0.72,
|
||||||
outputPrice: 0.72,
|
outputPrice: 0.72,
|
||||||
},
|
},
|
||||||
"meta.llama3-2-11b-instruct-v1:0" : {
|
"meta.llama3-2-11b-instruct-v1:0": {
|
||||||
maxTokens: 8192,
|
maxTokens: 8192,
|
||||||
contextWindow: 128_000,
|
contextWindow: 128_000,
|
||||||
supportsImages: true,
|
supportsImages: true,
|
||||||
@@ -244,7 +249,7 @@ export const bedrockModels = {
|
|||||||
inputPrice: 0.16,
|
inputPrice: 0.16,
|
||||||
outputPrice: 0.16,
|
outputPrice: 0.16,
|
||||||
},
|
},
|
||||||
"meta.llama3-2-3b-instruct-v1:0" : {
|
"meta.llama3-2-3b-instruct-v1:0": {
|
||||||
maxTokens: 8192,
|
maxTokens: 8192,
|
||||||
contextWindow: 128_000,
|
contextWindow: 128_000,
|
||||||
supportsImages: false,
|
supportsImages: false,
|
||||||
@@ -253,7 +258,7 @@ export const bedrockModels = {
|
|||||||
inputPrice: 0.15,
|
inputPrice: 0.15,
|
||||||
outputPrice: 0.15,
|
outputPrice: 0.15,
|
||||||
},
|
},
|
||||||
"meta.llama3-2-1b-instruct-v1:0" : {
|
"meta.llama3-2-1b-instruct-v1:0": {
|
||||||
maxTokens: 8192,
|
maxTokens: 8192,
|
||||||
contextWindow: 128_000,
|
contextWindow: 128_000,
|
||||||
supportsImages: false,
|
supportsImages: false,
|
||||||
@@ -262,7 +267,7 @@ export const bedrockModels = {
|
|||||||
inputPrice: 0.1,
|
inputPrice: 0.1,
|
||||||
outputPrice: 0.1,
|
outputPrice: 0.1,
|
||||||
},
|
},
|
||||||
"meta.llama3-1-405b-instruct-v1:0" : {
|
"meta.llama3-1-405b-instruct-v1:0": {
|
||||||
maxTokens: 8192,
|
maxTokens: 8192,
|
||||||
contextWindow: 128_000,
|
contextWindow: 128_000,
|
||||||
supportsImages: false,
|
supportsImages: false,
|
||||||
@@ -271,7 +276,7 @@ export const bedrockModels = {
|
|||||||
inputPrice: 2.4,
|
inputPrice: 2.4,
|
||||||
outputPrice: 2.4,
|
outputPrice: 2.4,
|
||||||
},
|
},
|
||||||
"meta.llama3-1-70b-instruct-v1:0" : {
|
"meta.llama3-1-70b-instruct-v1:0": {
|
||||||
maxTokens: 8192,
|
maxTokens: 8192,
|
||||||
contextWindow: 128_000,
|
contextWindow: 128_000,
|
||||||
supportsImages: false,
|
supportsImages: false,
|
||||||
@@ -280,7 +285,7 @@ export const bedrockModels = {
|
|||||||
inputPrice: 0.72,
|
inputPrice: 0.72,
|
||||||
outputPrice: 0.72,
|
outputPrice: 0.72,
|
||||||
},
|
},
|
||||||
"meta.llama3-1-8b-instruct-v1:0" : {
|
"meta.llama3-1-8b-instruct-v1:0": {
|
||||||
maxTokens: 8192,
|
maxTokens: 8192,
|
||||||
contextWindow: 8_000,
|
contextWindow: 8_000,
|
||||||
supportsImages: false,
|
supportsImages: false,
|
||||||
@@ -289,8 +294,8 @@ export const bedrockModels = {
|
|||||||
inputPrice: 0.22,
|
inputPrice: 0.22,
|
||||||
outputPrice: 0.22,
|
outputPrice: 0.22,
|
||||||
},
|
},
|
||||||
"meta.llama3-70b-instruct-v1:0" : {
|
"meta.llama3-70b-instruct-v1:0": {
|
||||||
maxTokens: 2048 ,
|
maxTokens: 2048,
|
||||||
contextWindow: 8_000,
|
contextWindow: 8_000,
|
||||||
supportsImages: false,
|
supportsImages: false,
|
||||||
supportsComputerUse: false,
|
supportsComputerUse: false,
|
||||||
@@ -298,8 +303,8 @@ export const bedrockModels = {
|
|||||||
inputPrice: 2.65,
|
inputPrice: 2.65,
|
||||||
outputPrice: 3.5,
|
outputPrice: 3.5,
|
||||||
},
|
},
|
||||||
"meta.llama3-8b-instruct-v1:0" : {
|
"meta.llama3-8b-instruct-v1:0": {
|
||||||
maxTokens: 2048 ,
|
maxTokens: 2048,
|
||||||
contextWindow: 4_000,
|
contextWindow: 4_000,
|
||||||
supportsImages: false,
|
supportsImages: false,
|
||||||
supportsComputerUse: false,
|
supportsComputerUse: false,
|
||||||
@@ -514,4 +519,3 @@ export const deepSeekModels = {
|
|||||||
// https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation
|
// https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation
|
||||||
// https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs
|
// https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs
|
||||||
export const azureOpenAiDefaultApiVersion = "2024-08-01-preview"
|
export const azureOpenAiDefaultApiVersion = "2024-08-01-preview"
|
||||||
|
|
||||||
|
|||||||
14
src/shared/vsCodeSelectorUtils.ts
Normal file
14
src/shared/vsCodeSelectorUtils.ts
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
import { LanguageModelChatSelector } from 'vscode';
|
||||||
|
|
||||||
|
export const SELECTOR_SEPARATOR = '/';
|
||||||
|
|
||||||
|
export function stringifyVsCodeLmModelSelector(selector: LanguageModelChatSelector): string {
|
||||||
|
return [
|
||||||
|
selector.vendor,
|
||||||
|
selector.family,
|
||||||
|
selector.version,
|
||||||
|
selector.id
|
||||||
|
]
|
||||||
|
.filter(Boolean)
|
||||||
|
.join(SELECTOR_SEPARATOR);
|
||||||
|
}
|
||||||
86
src/types/vscode.d.ts
vendored
Normal file
86
src/types/vscode.d.ts
vendored
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
declare namespace vscode {
|
||||||
|
enum LanguageModelChatMessageRole {
|
||||||
|
User = 1,
|
||||||
|
Assistant = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
enum LanguageModelChatToolMode {
|
||||||
|
Auto = 1,
|
||||||
|
Required = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
interface LanguageModelChatSelector {
|
||||||
|
vendor?: string;
|
||||||
|
family?: string;
|
||||||
|
version?: string;
|
||||||
|
id?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface LanguageModelChatTool {
|
||||||
|
name: string;
|
||||||
|
description: string;
|
||||||
|
inputSchema?: object;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface LanguageModelChatRequestOptions {
|
||||||
|
justification?: string;
|
||||||
|
modelOptions?: { [name: string]: any; };
|
||||||
|
tools?: LanguageModelChatTool[];
|
||||||
|
toolMode?: LanguageModelChatToolMode;
|
||||||
|
}
|
||||||
|
|
||||||
|
class LanguageModelTextPart {
|
||||||
|
value: string;
|
||||||
|
constructor(value: string);
|
||||||
|
}
|
||||||
|
|
||||||
|
class LanguageModelToolCallPart {
|
||||||
|
callId: string;
|
||||||
|
name: string;
|
||||||
|
input: object;
|
||||||
|
constructor(callId: string, name: string, input: object);
|
||||||
|
}
|
||||||
|
|
||||||
|
interface LanguageModelChatResponse {
|
||||||
|
stream: AsyncIterable<LanguageModelTextPart | LanguageModelToolCallPart | unknown>;
|
||||||
|
text: AsyncIterable<string>;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface LanguageModelChat {
|
||||||
|
readonly name: string;
|
||||||
|
readonly id: string;
|
||||||
|
readonly vendor: string;
|
||||||
|
readonly family: string;
|
||||||
|
readonly version: string;
|
||||||
|
readonly maxInputTokens: number;
|
||||||
|
|
||||||
|
sendRequest(messages: LanguageModelChatMessage[], options?: LanguageModelChatRequestOptions, token?: CancellationToken): Thenable<LanguageModelChatResponse>;
|
||||||
|
countTokens(text: string | LanguageModelChatMessage, token?: CancellationToken): Thenable<number>;
|
||||||
|
}
|
||||||
|
|
||||||
|
class LanguageModelPromptTsxPart {
|
||||||
|
value: unknown;
|
||||||
|
constructor(value: unknown);
|
||||||
|
}
|
||||||
|
|
||||||
|
class LanguageModelToolResultPart {
|
||||||
|
callId: string;
|
||||||
|
content: Array<LanguageModelTextPart | LanguageModelPromptTsxPart | unknown>;
|
||||||
|
constructor(callId: string, content: Array<LanguageModelTextPart | LanguageModelPromptTsxPart | unknown>);
|
||||||
|
}
|
||||||
|
|
||||||
|
class LanguageModelChatMessage {
|
||||||
|
static User(content: string | Array<LanguageModelTextPart | LanguageModelToolResultPart>, name?: string): LanguageModelChatMessage;
|
||||||
|
static Assistant(content: string | Array<LanguageModelTextPart | LanguageModelToolCallPart>, name?: string): LanguageModelChatMessage;
|
||||||
|
|
||||||
|
role: LanguageModelChatMessageRole;
|
||||||
|
content: Array<LanguageModelTextPart | LanguageModelToolResultPart | LanguageModelToolCallPart>;
|
||||||
|
name: string | undefined;
|
||||||
|
|
||||||
|
constructor(role: LanguageModelChatMessageRole, content: string | Array<LanguageModelTextPart | LanguageModelToolResultPart | LanguageModelToolCallPart>, name?: string);
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace lm {
|
||||||
|
function selectChatModels(selector?: LanguageModelChatSelector): Thenable<LanguageModelChat[]>;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -49,6 +49,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
|
|||||||
const { apiConfiguration, setApiConfiguration, uriScheme } = useExtensionState()
|
const { apiConfiguration, setApiConfiguration, uriScheme } = useExtensionState()
|
||||||
const [ollamaModels, setOllamaModels] = useState<string[]>([])
|
const [ollamaModels, setOllamaModels] = useState<string[]>([])
|
||||||
const [lmStudioModels, setLmStudioModels] = useState<string[]>([])
|
const [lmStudioModels, setLmStudioModels] = useState<string[]>([])
|
||||||
|
const [vsCodeLmModels, setVsCodeLmModels] = useState<vscode.LanguageModelChatSelector[]>([])
|
||||||
const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl)
|
const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl)
|
||||||
const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion)
|
const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion)
|
||||||
const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false)
|
const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false)
|
||||||
@@ -67,21 +68,24 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
|
|||||||
vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl })
|
vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl })
|
||||||
} else if (selectedProvider === "lmstudio") {
|
} else if (selectedProvider === "lmstudio") {
|
||||||
vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl })
|
vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl })
|
||||||
|
} else if (selectedProvider === "vscode-lm") {
|
||||||
|
vscode.postMessage({ type: "requestVsCodeLmModels" })
|
||||||
}
|
}
|
||||||
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl])
|
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl])
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (selectedProvider === "ollama" || selectedProvider === "lmstudio") {
|
if (selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm") {
|
||||||
requestLocalModels()
|
requestLocalModels()
|
||||||
}
|
}
|
||||||
}, [selectedProvider, requestLocalModels])
|
}, [selectedProvider, requestLocalModels])
|
||||||
useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" ? 2000 : null)
|
useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm" ? 2000 : null)
|
||||||
|
|
||||||
const handleMessage = useCallback((event: MessageEvent) => {
|
const handleMessage = useCallback((event: MessageEvent) => {
|
||||||
const message: ExtensionMessage = event.data
|
const message: ExtensionMessage = event.data
|
||||||
if (message.type === "ollamaModels" && message.ollamaModels) {
|
if (message.type === "ollamaModels" && message.ollamaModels) {
|
||||||
setOllamaModels(message.ollamaModels)
|
setOllamaModels(message.ollamaModels)
|
||||||
} else if (message.type === "lmStudioModels" && message.lmStudioModels) {
|
} else if (message.type === "lmStudioModels" && message.lmStudioModels) {
|
||||||
setLmStudioModels(message.lmStudioModels)
|
setLmStudioModels(message.lmStudioModels)
|
||||||
|
} else if (message.type === "vsCodeLmModels" && message.vsCodeLmModels) {
|
||||||
|
setVsCodeLmModels(message.vsCodeLmModels)
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
useEvent("message", handleMessage)
|
useEvent("message", handleMessage)
|
||||||
@@ -139,6 +143,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
|
|||||||
<VSCodeOption value="bedrock">AWS Bedrock</VSCodeOption>
|
<VSCodeOption value="bedrock">AWS Bedrock</VSCodeOption>
|
||||||
<VSCodeOption value="lmstudio">LM Studio</VSCodeOption>
|
<VSCodeOption value="lmstudio">LM Studio</VSCodeOption>
|
||||||
<VSCodeOption value="ollama">Ollama</VSCodeOption>
|
<VSCodeOption value="ollama">Ollama</VSCodeOption>
|
||||||
|
<VSCodeOption value="vscode-lm">VS Code LM API</VSCodeOption>
|
||||||
</VSCodeDropdown>
|
</VSCodeDropdown>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@@ -261,7 +266,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
|
|||||||
}}>
|
}}>
|
||||||
Compress prompts and message chains to the context size (<a href="https://openrouter.ai/docs/transforms">OpenRouter Transforms</a>)
|
Compress prompts and message chains to the context size (<a href="https://openrouter.ai/docs/transforms">OpenRouter Transforms</a>)
|
||||||
</VSCodeCheckbox>
|
</VSCodeCheckbox>
|
||||||
<br/>
|
<br />
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
@@ -591,6 +596,50 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{selectedProvider === "vscode-lm" && (
|
||||||
|
<div>
|
||||||
|
<div className="dropdown-container">
|
||||||
|
<label htmlFor="vscode-lm-model">
|
||||||
|
<span style={{ fontWeight: 500 }}>Language Model</span>
|
||||||
|
</label>
|
||||||
|
{vsCodeLmModels.length > 0 ? (
|
||||||
|
<VSCodeDropdown
|
||||||
|
id="vscode-lm-model"
|
||||||
|
value={apiConfiguration?.vsCodeLmModelSelector ?
|
||||||
|
`${apiConfiguration.vsCodeLmModelSelector.vendor ?? ""}/${apiConfiguration.vsCodeLmModelSelector.family ?? ""}` :
|
||||||
|
""}
|
||||||
|
onChange={(e) => {
|
||||||
|
const value = (e.target as HTMLInputElement).value;
|
||||||
|
const [vendor, family] = value.split('/');
|
||||||
|
setApiConfiguration({
|
||||||
|
...apiConfiguration,
|
||||||
|
vsCodeLmModelSelector: value ? { vendor, family } : undefined
|
||||||
|
});
|
||||||
|
}}
|
||||||
|
style={{ width: "100%" }}>
|
||||||
|
<VSCodeOption value="">Select a model...</VSCodeOption>
|
||||||
|
{vsCodeLmModels.map((model) => (
|
||||||
|
<VSCodeOption
|
||||||
|
key={`${model.vendor}/${model.family}`}
|
||||||
|
value={`${model.vendor}/${model.family}`}>
|
||||||
|
{model.vendor} - {model.family}
|
||||||
|
</VSCodeOption>
|
||||||
|
))}
|
||||||
|
</VSCodeDropdown>
|
||||||
|
) : (
|
||||||
|
<p style={{
|
||||||
|
fontSize: "12px",
|
||||||
|
marginTop: "5px",
|
||||||
|
color: "var(--vscode-descriptionForeground)",
|
||||||
|
}}>
|
||||||
|
No language models available.<br />
|
||||||
|
You can use any VS Code extension that provides language model capabilities.
|
||||||
|
</p>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
{selectedProvider === "ollama" && (
|
{selectedProvider === "ollama" && (
|
||||||
<div>
|
<div>
|
||||||
<VSCodeTextField
|
<VSCodeTextField
|
||||||
@@ -896,6 +945,18 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
|
|||||||
selectedModelId: apiConfiguration?.lmStudioModelId || "",
|
selectedModelId: apiConfiguration?.lmStudioModelId || "",
|
||||||
selectedModelInfo: openAiModelInfoSaneDefaults,
|
selectedModelInfo: openAiModelInfoSaneDefaults,
|
||||||
}
|
}
|
||||||
|
case "vscode-lm":
|
||||||
|
return {
|
||||||
|
selectedProvider: provider,
|
||||||
|
selectedModelId: apiConfiguration?.vsCodeLmModelSelector ?
|
||||||
|
`${apiConfiguration.vsCodeLmModelSelector.vendor}/${apiConfiguration.vsCodeLmModelSelector.family}` :
|
||||||
|
"",
|
||||||
|
selectedModelInfo: {
|
||||||
|
...openAiModelInfoSaneDefaults,
|
||||||
|
supportsImages: false, // VSCode LM API currently doesn't support images
|
||||||
|
supportsComputerUse: true // All VSCode LM models support tools
|
||||||
|
},
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return getProviderData(anthropicModels, anthropicDefaultModelId)
|
return getProviderData(anthropicModels, anthropicDefaultModelId)
|
||||||
}
|
}
|
||||||
|
|||||||
8
webview-ui/src/types/vscode.d.ts
vendored
Normal file
8
webview-ui/src/types/vscode.d.ts
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
declare namespace vscode {
|
||||||
|
interface LanguageModelChatSelector {
|
||||||
|
vendor?: string;
|
||||||
|
family?: string;
|
||||||
|
version?: string;
|
||||||
|
id?: string;
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user