Spacing adjustments

This commit is contained in:
Matt Rubens
2025-01-08 17:21:23 -05:00
parent d5fd2bbb82
commit 7137f8c528
8 changed files with 787 additions and 860 deletions

View File

@@ -15,43 +15,40 @@ import { VsCodeLmHandler } from "./providers/vscode-lm"
import { ApiStream } from "./transform/stream"
export interface SingleCompletionHandler {
completePrompt(prompt: string): Promise<string>
completePrompt(prompt: string): Promise<string>
}
export interface ApiHandler {
createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
getModel(): { id: string; info: ModelInfo }
createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
getModel(): { id: string; info: ModelInfo }
}
export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
const { apiProvider, ...options } = configuration
switch (apiProvider) {
case "anthropic":
return new AnthropicHandler(options)
case "glama":
return new GlamaHandler(options)
case "openrouter":
return new OpenRouterHandler(options)
case "bedrock":
return new AwsBedrockHandler(options)
case "vertex":
return new VertexHandler(options)
case "openai":
return new OpenAiHandler(options)
case "ollama":
return new OllamaHandler(options)
case "lmstudio":
return new LmStudioHandler(options)
case "gemini":
return new GeminiHandler(options)
case "openai-native":
return new OpenAiNativeHandler(options)
case "deepseek":
return new DeepSeekHandler(options)
case "vscode-lm":
return new VsCodeLmHandler(options)
default:
return new AnthropicHandler(options)
}
const { apiProvider, ...options } = configuration
switch (apiProvider) {
case "anthropic":
return new AnthropicHandler(options)
case "glama":
return new GlamaHandler(options)
case "openrouter":
return new OpenRouterHandler(options)
case "bedrock":
return new AwsBedrockHandler(options)
case "vertex":
return new VertexHandler(options)
case "openai":
return new OpenAiHandler(options)
case "ollama":
return new OllamaHandler(options)
case "lmstudio":
return new LmStudioHandler(options)
case "gemini":
return new GeminiHandler(options)
case "openai-native":
return new OpenAiNativeHandler(options)
case "deepseek":
return new DeepSeekHandler(options)
default:
return new AnthropicHandler(options)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,205 +5,205 @@ import * as vscode from 'vscode';
* Safely converts a value into a plain object.
*/
function asObjectSafe(value: any): object {
// Handle null/undefined
if (!value) {
return {};
}
// Handle null/undefined
if (!value) {
return {};
}
try {
// Handle strings that might be JSON
if (typeof value === 'string') {
return JSON.parse(value);
}
try {
// Handle strings that might be JSON
if (typeof value === 'string') {
return JSON.parse(value);
}
// Handle pre-existing objects
if (typeof value === 'object') {
return Object.assign({}, value);
}
// Handle pre-existing objects
if (typeof value === 'object') {
return Object.assign({}, value);
}
return {};
}
catch (error) {
console.warn('Cline <Language Model API>: Failed to parse object:', error);
return {};
}
return {};
}
catch (error) {
console.warn('Cline <Language Model API>: Failed to parse object:', error);
return {};
}
}
export function convertToVsCodeLmMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): vscode.LanguageModelChatMessage[] {
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
for (const anthropicMessage of anthropicMessages) {
// Handle simple string messages
if (typeof anthropicMessage.content === "string") {
vsCodeLmMessages.push(
anthropicMessage.role === "assistant"
? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
: vscode.LanguageModelChatMessage.User(anthropicMessage.content)
);
continue;
}
for (const anthropicMessage of anthropicMessages) {
// Handle simple string messages
if (typeof anthropicMessage.content === "string") {
vsCodeLmMessages.push(
anthropicMessage.role === "assistant"
? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
: vscode.LanguageModelChatMessage.User(anthropicMessage.content)
);
continue;
}
// Handle complex message structures
switch (anthropicMessage.role) {
case "user": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolResultBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_result") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
// Handle complex message structures
switch (anthropicMessage.role) {
case "user": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolResultBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_result") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolResultParts
...toolMessages.map((toolMessage) => {
// Process tool result content into TextParts
const toolContentParts: vscode.LanguageModelTextPart[] = (
typeof toolMessage.content === "string"
? [new vscode.LanguageModelTextPart(toolMessage.content)]
: (
toolMessage.content?.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
?? [new vscode.LanguageModelTextPart("")]
)
);
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolResultParts
...toolMessages.map((toolMessage) => {
// Process tool result content into TextParts
const toolContentParts: vscode.LanguageModelTextPart[] = (
typeof toolMessage.content === "string"
? [new vscode.LanguageModelTextPart(toolMessage.content)]
: (
toolMessage.content?.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
?? [new vscode.LanguageModelTextPart("")]
)
);
return new vscode.LanguageModelToolResultPart(
toolMessage.tool_use_id,
toolContentParts
);
}),
return new vscode.LanguageModelToolResultPart(
toolMessage.tool_use_id,
toolContentParts
);
}),
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Add single user message with all content parts
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
break;
}
// Add single user message with all content parts
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
break;
}
case "assistant": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolUseBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_use") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
case "assistant": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolUseBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_use") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolCallParts first
...toolMessages.map((toolMessage) =>
new vscode.LanguageModelToolCallPart(
toolMessage.id,
toolMessage.name,
asObjectSafe(toolMessage.input)
)
),
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolCallParts first
...toolMessages.map((toolMessage) =>
new vscode.LanguageModelToolCallPart(
toolMessage.id,
toolMessage.name,
asObjectSafe(toolMessage.input)
)
),
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Add the assistant message to the list of messages
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
break;
}
}
}
// Add the assistant message to the list of messages
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
break;
}
}
}
return vsCodeLmMessages;
return vsCodeLmMessages;
}
export function convertToAnthropicRole(vsCodeLmMessageRole: vscode.LanguageModelChatMessageRole): string | null {
switch (vsCodeLmMessageRole) {
case vscode.LanguageModelChatMessageRole.Assistant:
return "assistant";
case vscode.LanguageModelChatMessageRole.User:
return "user";
default:
return null;
}
switch (vsCodeLmMessageRole) {
case vscode.LanguageModelChatMessageRole.Assistant:
return "assistant";
case vscode.LanguageModelChatMessageRole.User:
return "user";
default:
return null;
}
}
export async function convertToAnthropicMessage(vsCodeLmMessage: vscode.LanguageModelChatMessage): Promise<Anthropic.Messages.Message> {
const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
if (anthropicRole !== "assistant") {
throw new Error("Cline <Language Model API>: Only assistant messages are supported.");
}
const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
if (anthropicRole !== "assistant") {
throw new Error("Cline <Language Model API>: Only assistant messages are supported.");
}
return {
id: crypto.randomUUID(),
type: "message",
model: "vscode-lm",
role: anthropicRole,
content: (
vsCodeLmMessage.content
.map((part): Anthropic.ContentBlock | null => {
if (part instanceof vscode.LanguageModelTextPart) {
return {
type: "text",
text: part.value
};
}
return {
id: crypto.randomUUID(),
type: "message",
model: "vscode-lm",
role: anthropicRole,
content: (
vsCodeLmMessage.content
.map((part): Anthropic.ContentBlock | null => {
if (part instanceof vscode.LanguageModelTextPart) {
return {
type: "text",
text: part.value
};
}
if (part instanceof vscode.LanguageModelToolCallPart) {
return {
type: "tool_use",
id: part.callId || crypto.randomUUID(),
name: part.name,
input: asObjectSafe(part.input)
};
}
if (part instanceof vscode.LanguageModelToolCallPart) {
return {
type: "tool_use",
id: part.callId || crypto.randomUUID(),
name: part.name,
input: asObjectSafe(part.input)
};
}
return null;
})
.filter(
(part): part is Anthropic.ContentBlock => part !== null
)
),
stop_reason: null,
stop_sequence: null,
usage: {
input_tokens: 0,
output_tokens: 0,
}
};
return null;
})
.filter(
(part): part is Anthropic.ContentBlock => part !== null
)
),
stop_reason: null,
stop_sequence: null,
usage: {
input_tokens: 0,
output_tokens: 0,
}
};
}

View File

@@ -45,7 +45,6 @@ type SecretKey =
| "geminiApiKey"
| "openAiNativeApiKey"
| "deepSeekApiKey"
type GlobalStateKey =
| "apiProvider"
| "apiModelId"
@@ -482,72 +481,6 @@ export class ClineProvider implements vscode.WebviewViewProvider {
break
case "apiConfiguration":
if (message.apiConfiguration) {
const {
apiProvider,
apiModelId,
apiKey,
glamaModelId,
glamaModelInfo,
glamaApiKey,
openRouterApiKey,
awsAccessKey,
awsSecretKey,
awsSessionToken,
awsRegion,
awsUseCrossRegionInference,
vertexProjectId,
vertexRegion,
openAiBaseUrl,
openAiApiKey,
openAiModelId,
ollamaModelId,
ollamaBaseUrl,
lmStudioModelId,
lmStudioBaseUrl,
anthropicBaseUrl,
geminiApiKey,
openAiNativeApiKey,
azureApiVersion,
openAiStreamingEnabled,
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
vsCodeLmModelSelector,
} = message.apiConfiguration
await this.updateGlobalState("apiProvider", apiProvider)
await this.updateGlobalState("apiModelId", apiModelId)
await this.storeSecret("apiKey", apiKey)
await this.updateGlobalState("glamaModelId", glamaModelId)
await this.updateGlobalState("glamaModelInfo", glamaModelInfo)
await this.storeSecret("glamaApiKey", glamaApiKey)
await this.storeSecret("openRouterApiKey", openRouterApiKey)
await this.storeSecret("awsAccessKey", awsAccessKey)
await this.storeSecret("awsSecretKey", awsSecretKey)
await this.storeSecret("awsSessionToken", awsSessionToken)
await this.updateGlobalState("awsRegion", awsRegion)
await this.updateGlobalState("awsUseCrossRegionInference", awsUseCrossRegionInference)
await this.updateGlobalState("vertexProjectId", vertexProjectId)
await this.updateGlobalState("vertexRegion", vertexRegion)
await this.updateGlobalState("openAiBaseUrl", openAiBaseUrl)
await this.storeSecret("openAiApiKey", openAiApiKey)
await this.updateGlobalState("openAiModelId", openAiModelId)
await this.updateGlobalState("ollamaModelId", ollamaModelId)
await this.updateGlobalState("ollamaBaseUrl", ollamaBaseUrl)
await this.updateGlobalState("lmStudioModelId", lmStudioModelId)
await this.updateGlobalState("lmStudioBaseUrl", lmStudioBaseUrl)
await this.updateGlobalState("anthropicBaseUrl", anthropicBaseUrl)
await this.storeSecret("geminiApiKey", geminiApiKey)
await this.storeSecret("openAiNativeApiKey", openAiNativeApiKey)
await this.storeSecret("deepSeekApiKey", message.apiConfiguration.deepSeekApiKey)
await this.updateGlobalState("azureApiVersion", azureApiVersion)
await this.updateGlobalState("openAiStreamingEnabled", openAiStreamingEnabled)
await this.updateGlobalState("openRouterModelId", openRouterModelId)
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
if (this.cline) {
this.cline.api = buildApiHandler(message.apiConfiguration)
}
await this.updateApiConfiguration(message.apiConfiguration)
}
await this.postStateToWebview()
@@ -1016,6 +949,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openRouterModelId,
openRouterModelInfo,
openRouterUseMiddleOutTransform,
vsCodeLmModelSelector,
} = apiConfiguration
await this.updateGlobalState("apiProvider", apiProvider)
await this.updateGlobalState("apiModelId", apiModelId)
@@ -1047,6 +981,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.updateGlobalState("openRouterModelId", openRouterModelId)
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
if (this.cline) {
this.cline.api = buildApiHandler(apiConfiguration)
}

View File

@@ -36,7 +36,7 @@ export function activate(context: vscode.ExtensionContext) {
context.globalState.update('allowedCommands', defaultCommands);
}
const sidebarProvider = new ClineProvider(context, outputChannel);
const sidebarProvider = new ClineProvider(context, outputChannel)
context.subscriptions.push(
vscode.window.registerWebviewViewProvider(ClineProvider.sideBarId, sidebarProvider, {

View File

@@ -7,7 +7,6 @@ import { GitCommit } from "../utils/git"
// webview will hold state
export interface ExtensionMessage {
type:
| "action"
| "state"
@@ -26,15 +25,15 @@ export interface ExtensionMessage {
| "commitSearchResults"
| "listApiConfig"
| "vsCodeLmModels"
| "vsCodeLmApiAvailable"
| "requestVsCodeLmModels"
| "vsCodeLmApiAvailable"
| "requestVsCodeLmModels"
text?: string
action?:
| "chatButtonClicked"
| "mcpButtonClicked"
| "settingsButtonClicked"
| "historyButtonClicked"
| "didBecomeVisible"
| "chatButtonClicked"
| "mcpButtonClicked"
| "settingsButtonClicked"
| "historyButtonClicked"
| "didBecomeVisible"
invoke?: "sendMessage" | "primaryButtonClick" | "secondaryButtonClick"
state?: ExtensionState
images?: string[]
@@ -131,14 +130,14 @@ export type ClineSay =
export interface ClineSayTool {
tool:
| "editedExistingFile"
| "appliedDiff"
| "newFileCreated"
| "readFile"
| "listFilesTopLevel"
| "listFilesRecursive"
| "listCodeDefinitionNames"
| "searchFiles"
| "editedExistingFile"
| "appliedDiff"
| "newFileCreated"
| "readFile"
| "listFilesTopLevel"
| "listFilesRecursive"
| "listCodeDefinitionNames"
| "searchFiles"
path?: string
diff?: string
content?: string

View File

@@ -65,7 +65,7 @@ export interface ModelInfo {
contextWindow: number
supportsImages?: boolean
supportsComputerUse?: boolean
supportsPromptCache: boolean
supportsPromptCache: boolean // this value is hardcoded for now
inputPrice?: number
outputPrice?: number
cacheWritesPrice?: number
@@ -124,24 +124,24 @@ export const anthropicModels = {
// AWS Bedrock
// https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html
export interface MessageContent {
type: 'text' | 'image' | 'video' | 'tool_use' | 'tool_result';
text?: string;
source?: {
type: 'base64';
data: string | Uint8Array; // string for Anthropic, Uint8Array for Bedrock
media_type: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp';
};
// Video specific fields
format?: string;
s3Location?: {
uri: string;
bucketOwner?: string;
};
// Tool use and result fields
toolUseId?: string;
name?: string;
input?: any;
output?: any; // Used for tool_result type
type: 'text' | 'image' | 'video' | 'tool_use' | 'tool_result';
text?: string;
source?: {
type: 'base64';
data: string | Uint8Array; // string for Anthropic, Uint8Array for Bedrock
media_type: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp';
};
// Video specific fields
format?: string;
s3Location?: {
uri: string;
bucketOwner?: string;
};
// Tool use and result fields
toolUseId?: string;
name?: string;
input?: any;
output?: any; // Used for tool_result type
}
export type BedrockModelId = keyof typeof bedrockModels
@@ -235,7 +235,7 @@ export const bedrockModels = {
inputPrice: 0.25,
outputPrice: 1.25,
},
"meta.llama3-2-90b-instruct-v1:0": {
"meta.llama3-2-90b-instruct-v1:0" : {
maxTokens: 8192,
contextWindow: 128_000,
supportsImages: true,
@@ -244,7 +244,7 @@ export const bedrockModels = {
inputPrice: 0.72,
outputPrice: 0.72,
},
"meta.llama3-2-11b-instruct-v1:0": {
"meta.llama3-2-11b-instruct-v1:0" : {
maxTokens: 8192,
contextWindow: 128_000,
supportsImages: true,
@@ -253,7 +253,7 @@ export const bedrockModels = {
inputPrice: 0.16,
outputPrice: 0.16,
},
"meta.llama3-2-3b-instruct-v1:0": {
"meta.llama3-2-3b-instruct-v1:0" : {
maxTokens: 8192,
contextWindow: 128_000,
supportsImages: false,
@@ -262,7 +262,7 @@ export const bedrockModels = {
inputPrice: 0.15,
outputPrice: 0.15,
},
"meta.llama3-2-1b-instruct-v1:0": {
"meta.llama3-2-1b-instruct-v1:0" : {
maxTokens: 8192,
contextWindow: 128_000,
supportsImages: false,
@@ -271,7 +271,7 @@ export const bedrockModels = {
inputPrice: 0.1,
outputPrice: 0.1,
},
"meta.llama3-1-405b-instruct-v1:0": {
"meta.llama3-1-405b-instruct-v1:0" : {
maxTokens: 8192,
contextWindow: 128_000,
supportsImages: false,
@@ -280,7 +280,7 @@ export const bedrockModels = {
inputPrice: 2.4,
outputPrice: 2.4,
},
"meta.llama3-1-70b-instruct-v1:0": {
"meta.llama3-1-70b-instruct-v1:0" : {
maxTokens: 8192,
contextWindow: 128_000,
supportsImages: false,
@@ -289,7 +289,7 @@ export const bedrockModels = {
inputPrice: 0.72,
outputPrice: 0.72,
},
"meta.llama3-1-8b-instruct-v1:0": {
"meta.llama3-1-8b-instruct-v1:0" : {
maxTokens: 8192,
contextWindow: 8_000,
supportsImages: false,
@@ -298,8 +298,8 @@ export const bedrockModels = {
inputPrice: 0.22,
outputPrice: 0.22,
},
"meta.llama3-70b-instruct-v1:0": {
maxTokens: 2048,
"meta.llama3-70b-instruct-v1:0" : {
maxTokens: 2048 ,
contextWindow: 8_000,
supportsImages: false,
supportsComputerUse: false,
@@ -307,8 +307,8 @@ export const bedrockModels = {
inputPrice: 2.65,
outputPrice: 3.5,
},
"meta.llama3-8b-instruct-v1:0": {
maxTokens: 2048,
"meta.llama3-8b-instruct-v1:0" : {
maxTokens: 2048 ,
contextWindow: 4_000,
supportsImages: false,
supportsComputerUse: false,
@@ -548,3 +548,4 @@ export const deepSeekModels = {
// https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation
// https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs
export const azureOpenAiDefaultApiVersion = "2024-08-01-preview"

View File

@@ -3,12 +3,12 @@ import { LanguageModelChatSelector } from 'vscode';
export const SELECTOR_SEPARATOR = '/';
export function stringifyVsCodeLmModelSelector(selector: LanguageModelChatSelector): string {
return [
selector.vendor,
selector.family,
selector.version,
selector.id
]
.filter(Boolean)
.join(SELECTOR_SEPARATOR);
return [
selector.vendor,
selector.family,
selector.version,
selector.id
]
.filter(Boolean)
.join(SELECTOR_SEPARATOR);
}