mirror of
https://github.com/pacnpal/Roo-Code.git
synced 2025-12-20 04:11:10 -05:00
Merge remote-tracking branch 'origin/main' into fix/roo-cline-select-api-config
This commit is contained in:
5
.changeset/eleven-papayas-fold.md
Normal file
5
.changeset/eleven-papayas-fold.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"roo-cline": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Experimental support for VS Code Language Models (thanks @RaySinner / @julesmons!)
|
||||||
9
.github/workflows/changeset-release.yml
vendored
9
.github/workflows/changeset-release.yml
vendored
@@ -2,20 +2,23 @@ name: Changeset Release
|
|||||||
run-name: Changeset Release ${{ github.actor != 'R00-B0T' && '- Create PR' || '- Update Changelog' }}
|
run-name: Changeset Release ${{ github.actor != 'R00-B0T' && '- Create PR' || '- Update Changelog' }}
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [closed, opened, labeled]
|
types: [closed, opened, labeled]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
REPO_PATH: ${{ github.repository }}
|
REPO_PATH: ${{ github.repository }}
|
||||||
|
GIT_REF: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || 'main' }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
# Job 1: Create version bump PR when changesets are merged to main
|
# Job 1: Create version bump PR when changesets are merged to main
|
||||||
changeset-pr-version-bump:
|
changeset-pr-version-bump:
|
||||||
if: >
|
if: >
|
||||||
github.event_name == 'pull_request' &&
|
( github.event_name == 'pull_request' &&
|
||||||
github.event.pull_request.merged == true &&
|
github.event.pull_request.merged == true &&
|
||||||
github.event.pull_request.base.ref == 'main' &&
|
github.event.pull_request.base.ref == 'main' &&
|
||||||
github.actor != 'R00-B0T'
|
github.actor != 'R00-B0T' ) ||
|
||||||
|
github.event_name == 'workflow_dispatch'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
@@ -25,7 +28,7 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ env.GIT_REF }}
|
||||||
|
|
||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v4
|
||||||
|
|||||||
@@ -60,6 +60,7 @@ Give it a try and let us know what you think in the reddit: https://www.reddit.c
|
|||||||
- Support for Glama
|
- Support for Glama
|
||||||
- Support for listing models from OpenAI-compatible providers
|
- Support for listing models from OpenAI-compatible providers
|
||||||
- Support for adding OpenAI-compatible models with or without streaming
|
- Support for adding OpenAI-compatible models with or without streaming
|
||||||
|
- Experimental support for VS Code Language Models (e.g. Copilot)
|
||||||
- Per-tool MCP auto-approval
|
- Per-tool MCP auto-approval
|
||||||
- Enable/disable individual MCP servers
|
- Enable/disable individual MCP servers
|
||||||
- Enable/disable the MCP feature overall
|
- Enable/disable the MCP feature overall
|
||||||
@@ -143,7 +144,7 @@ Once your merge is successful:
|
|||||||
<table>
|
<table>
|
||||||
<tbody>
|
<tbody>
|
||||||
<td align="center">
|
<td align="center">
|
||||||
<a href="https://marketplace.visualstudio.com/items?itemName=saoudrizwan.claude-dev" target="_blank"><strong>Download on VS Marketplace</strong></a>
|
<a href="https://marketplace.visualstudio.com/items?itemName=rooveterinaryinc.roo-cline" target="_blank"><strong>Download on VS Marketplace</strong></a>
|
||||||
</td>
|
</td>
|
||||||
<td align="center">
|
<td align="center">
|
||||||
<a href="https://discord.gg/cline" target="_blank"><strong>Join the Discord</strong></a>
|
<a href="https://discord.gg/cline" target="_blank"><strong>Join the Discord</strong></a>
|
||||||
|
|||||||
8
package-lock.json
generated
8
package-lock.json
generated
@@ -31,7 +31,7 @@
|
|||||||
"isbinaryfile": "^5.0.2",
|
"isbinaryfile": "^5.0.2",
|
||||||
"mammoth": "^1.8.0",
|
"mammoth": "^1.8.0",
|
||||||
"monaco-vscode-textmate-theme-converter": "^0.1.7",
|
"monaco-vscode-textmate-theme-converter": "^0.1.7",
|
||||||
"openai": "^4.73.1",
|
"openai": "^4.78.1",
|
||||||
"os-name": "^6.0.0",
|
"os-name": "^6.0.0",
|
||||||
"p-wait-for": "^5.0.2",
|
"p-wait-for": "^5.0.2",
|
||||||
"pdf-parse": "^1.1.1",
|
"pdf-parse": "^1.1.1",
|
||||||
@@ -12546,9 +12546,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/openai": {
|
"node_modules/openai": {
|
||||||
"version": "4.76.0",
|
"version": "4.78.1",
|
||||||
"resolved": "https://registry.npmjs.org/openai/-/openai-4.76.0.tgz",
|
"resolved": "https://registry.npmjs.org/openai/-/openai-4.78.1.tgz",
|
||||||
"integrity": "sha512-QBGIetjX1C9xDp5XGa/3mPnfKI9BgAe2xHQX6PmO98wuW9qQaurBaumcYptQWc9LHZZq7cH/Y1Rjnsr6uUDdVw==",
|
"integrity": "sha512-drt0lHZBd2lMyORckOXFPQTmnGLWSLt8VK0W9BhOKWpMFBEoHMoz5gxMPmVq5icp+sOrsbMnsmZTVHUlKvD1Ow==",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@types/node": "^18.11.18",
|
"@types/node": "^18.11.18",
|
||||||
"@types/node-fetch": "^2.6.4",
|
"@types/node-fetch": "^2.6.4",
|
||||||
|
|||||||
21
package.json
21
package.json
@@ -42,7 +42,10 @@
|
|||||||
"ai",
|
"ai",
|
||||||
"llama"
|
"llama"
|
||||||
],
|
],
|
||||||
"activationEvents": [],
|
"activationEvents": [
|
||||||
|
"onLanguage",
|
||||||
|
"onStartupFinished"
|
||||||
|
],
|
||||||
"main": "./dist/extension.js",
|
"main": "./dist/extension.js",
|
||||||
"contributes": {
|
"contributes": {
|
||||||
"viewsContainers": {
|
"viewsContainers": {
|
||||||
@@ -151,6 +154,20 @@
|
|||||||
"git show"
|
"git show"
|
||||||
],
|
],
|
||||||
"description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled"
|
"description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled"
|
||||||
|
},
|
||||||
|
"roo-cline.vsCodeLmModelSelector": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"vendor": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The vendor of the language model (e.g. copilot)"
|
||||||
|
},
|
||||||
|
"family": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The family of the language model (e.g. gpt-4)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Settings for VSCode Language Model API"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -227,7 +244,7 @@
|
|||||||
"isbinaryfile": "^5.0.2",
|
"isbinaryfile": "^5.0.2",
|
||||||
"mammoth": "^1.8.0",
|
"mammoth": "^1.8.0",
|
||||||
"monaco-vscode-textmate-theme-converter": "^0.1.7",
|
"monaco-vscode-textmate-theme-converter": "^0.1.7",
|
||||||
"openai": "^4.73.1",
|
"openai": "^4.78.1",
|
||||||
"os-name": "^6.0.0",
|
"os-name": "^6.0.0",
|
||||||
"p-wait-for": "^5.0.2",
|
"p-wait-for": "^5.0.2",
|
||||||
"pdf-parse": "^1.1.1",
|
"pdf-parse": "^1.1.1",
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import { LmStudioHandler } from "./providers/lmstudio"
|
|||||||
import { GeminiHandler } from "./providers/gemini"
|
import { GeminiHandler } from "./providers/gemini"
|
||||||
import { OpenAiNativeHandler } from "./providers/openai-native"
|
import { OpenAiNativeHandler } from "./providers/openai-native"
|
||||||
import { DeepSeekHandler } from "./providers/deepseek"
|
import { DeepSeekHandler } from "./providers/deepseek"
|
||||||
|
import { VsCodeLmHandler } from "./providers/vscode-lm"
|
||||||
import { ApiStream } from "./transform/stream"
|
import { ApiStream } from "./transform/stream"
|
||||||
|
|
||||||
export interface SingleCompletionHandler {
|
export interface SingleCompletionHandler {
|
||||||
|
|||||||
@@ -60,6 +60,13 @@ jest.mock('openai', () => {
|
|||||||
describe('OpenAiNativeHandler', () => {
|
describe('OpenAiNativeHandler', () => {
|
||||||
let handler: OpenAiNativeHandler;
|
let handler: OpenAiNativeHandler;
|
||||||
let mockOptions: ApiHandlerOptions;
|
let mockOptions: ApiHandlerOptions;
|
||||||
|
const systemPrompt = 'You are a helpful assistant.';
|
||||||
|
const messages: Anthropic.Messages.MessageParam[] = [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: 'Hello!'
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
mockOptions = {
|
mockOptions = {
|
||||||
@@ -86,14 +93,6 @@ describe('OpenAiNativeHandler', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
describe('createMessage', () => {
|
describe('createMessage', () => {
|
||||||
const systemPrompt = 'You are a helpful assistant.';
|
|
||||||
const messages: Anthropic.Messages.MessageParam[] = [
|
|
||||||
{
|
|
||||||
role: 'user',
|
|
||||||
content: 'Hello!'
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
it('should handle streaming responses', async () => {
|
it('should handle streaming responses', async () => {
|
||||||
const stream = handler.createMessage(systemPrompt, messages);
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
const chunks: any[] = [];
|
const chunks: any[] = [];
|
||||||
@@ -109,15 +108,126 @@ describe('OpenAiNativeHandler', () => {
|
|||||||
|
|
||||||
it('should handle API errors', async () => {
|
it('should handle API errors', async () => {
|
||||||
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
||||||
|
|
||||||
const stream = handler.createMessage(systemPrompt, messages);
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
|
|
||||||
await expect(async () => {
|
await expect(async () => {
|
||||||
for await (const chunk of stream) {
|
for await (const chunk of stream) {
|
||||||
// Should not reach here
|
// Should not reach here
|
||||||
}
|
}
|
||||||
}).rejects.toThrow('API Error');
|
}).rejects.toThrow('API Error');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should handle missing content in response for o1 model', async () => {
|
||||||
|
// Use o1 model which supports developer role
|
||||||
|
handler = new OpenAiNativeHandler({
|
||||||
|
...mockOptions,
|
||||||
|
apiModelId: 'o1'
|
||||||
|
});
|
||||||
|
|
||||||
|
mockCreate.mockResolvedValueOnce({
|
||||||
|
choices: [{ message: { content: null } }],
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: 0,
|
||||||
|
completion_tokens: 0,
|
||||||
|
total_tokens: 0
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const generator = handler.createMessage(systemPrompt, messages);
|
||||||
|
const results = [];
|
||||||
|
for await (const result of generator) {
|
||||||
|
results.push(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(results).toEqual([
|
||||||
|
{ type: 'text', text: '' },
|
||||||
|
{ type: 'usage', inputTokens: 0, outputTokens: 0 }
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Verify developer role is used for system prompt with o1 model
|
||||||
|
expect(mockCreate).toHaveBeenCalledWith({
|
||||||
|
model: 'o1',
|
||||||
|
messages: [
|
||||||
|
{ role: 'developer', content: systemPrompt },
|
||||||
|
{ role: 'user', content: 'Hello!' }
|
||||||
|
]
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('streaming models', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
handler = new OpenAiNativeHandler({
|
||||||
|
...mockOptions,
|
||||||
|
apiModelId: 'gpt-4o',
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle streaming response', async () => {
|
||||||
|
const mockStream = [
|
||||||
|
{ choices: [{ delta: { content: 'Hello' } }], usage: null },
|
||||||
|
{ choices: [{ delta: { content: ' there' } }], usage: null },
|
||||||
|
{ choices: [{ delta: { content: '!' } }], usage: { prompt_tokens: 10, completion_tokens: 5 } },
|
||||||
|
];
|
||||||
|
|
||||||
|
mockCreate.mockResolvedValueOnce(
|
||||||
|
(async function* () {
|
||||||
|
for (const chunk of mockStream) {
|
||||||
|
yield chunk;
|
||||||
|
}
|
||||||
|
})()
|
||||||
|
);
|
||||||
|
|
||||||
|
const generator = handler.createMessage(systemPrompt, messages);
|
||||||
|
const results = [];
|
||||||
|
for await (const result of generator) {
|
||||||
|
results.push(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(results).toEqual([
|
||||||
|
{ type: 'text', text: 'Hello' },
|
||||||
|
{ type: 'text', text: ' there' },
|
||||||
|
{ type: 'text', text: '!' },
|
||||||
|
{ type: 'usage', inputTokens: 10, outputTokens: 5 },
|
||||||
|
]);
|
||||||
|
|
||||||
|
expect(mockCreate).toHaveBeenCalledWith({
|
||||||
|
model: 'gpt-4o',
|
||||||
|
temperature: 0,
|
||||||
|
messages: [
|
||||||
|
{ role: 'system', content: systemPrompt },
|
||||||
|
{ role: 'user', content: 'Hello!' },
|
||||||
|
],
|
||||||
|
stream: true,
|
||||||
|
stream_options: { include_usage: true },
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty delta content', async () => {
|
||||||
|
const mockStream = [
|
||||||
|
{ choices: [{ delta: {} }], usage: null },
|
||||||
|
{ choices: [{ delta: { content: null } }], usage: null },
|
||||||
|
{ choices: [{ delta: { content: 'Hello' } }], usage: { prompt_tokens: 10, completion_tokens: 5 } },
|
||||||
|
];
|
||||||
|
|
||||||
|
mockCreate.mockResolvedValueOnce(
|
||||||
|
(async function* () {
|
||||||
|
for (const chunk of mockStream) {
|
||||||
|
yield chunk;
|
||||||
|
}
|
||||||
|
})()
|
||||||
|
);
|
||||||
|
|
||||||
|
const generator = handler.createMessage(systemPrompt, messages);
|
||||||
|
const results = [];
|
||||||
|
for await (const result of generator) {
|
||||||
|
results.push(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(results).toEqual([
|
||||||
|
{ type: 'text', text: 'Hello' },
|
||||||
|
{ type: 'usage', inputTokens: 10, outputTokens: 5 },
|
||||||
|
]);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('completePrompt', () => {
|
describe('completePrompt', () => {
|
||||||
@@ -206,4 +316,4 @@ describe('OpenAiNativeHandler', () => {
|
|||||||
expect(modelInfo.info).toBeDefined();
|
expect(modelInfo.info).toBeDefined();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
289
src/api/providers/__tests__/vscode-lm.test.ts
Normal file
289
src/api/providers/__tests__/vscode-lm.test.ts
Normal file
@@ -0,0 +1,289 @@
|
|||||||
|
import * as vscode from 'vscode';
|
||||||
|
import { VsCodeLmHandler } from '../vscode-lm';
|
||||||
|
import { ApiHandlerOptions } from '../../../shared/api';
|
||||||
|
import { Anthropic } from '@anthropic-ai/sdk';
|
||||||
|
|
||||||
|
// Mock vscode namespace
|
||||||
|
jest.mock('vscode', () => {
|
||||||
|
class MockLanguageModelTextPart {
|
||||||
|
type = 'text';
|
||||||
|
constructor(public value: string) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
class MockLanguageModelToolCallPart {
|
||||||
|
type = 'tool_call';
|
||||||
|
constructor(
|
||||||
|
public callId: string,
|
||||||
|
public name: string,
|
||||||
|
public input: any
|
||||||
|
) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
workspace: {
|
||||||
|
onDidChangeConfiguration: jest.fn((callback) => ({
|
||||||
|
dispose: jest.fn()
|
||||||
|
}))
|
||||||
|
},
|
||||||
|
CancellationTokenSource: jest.fn(() => ({
|
||||||
|
token: {
|
||||||
|
isCancellationRequested: false,
|
||||||
|
onCancellationRequested: jest.fn()
|
||||||
|
},
|
||||||
|
cancel: jest.fn(),
|
||||||
|
dispose: jest.fn()
|
||||||
|
})),
|
||||||
|
CancellationError: class CancellationError extends Error {
|
||||||
|
constructor() {
|
||||||
|
super('Operation cancelled');
|
||||||
|
this.name = 'CancellationError';
|
||||||
|
}
|
||||||
|
},
|
||||||
|
LanguageModelChatMessage: {
|
||||||
|
Assistant: jest.fn((content) => ({
|
||||||
|
role: 'assistant',
|
||||||
|
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
|
||||||
|
})),
|
||||||
|
User: jest.fn((content) => ({
|
||||||
|
role: 'user',
|
||||||
|
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
|
||||||
|
}))
|
||||||
|
},
|
||||||
|
LanguageModelTextPart: MockLanguageModelTextPart,
|
||||||
|
LanguageModelToolCallPart: MockLanguageModelToolCallPart,
|
||||||
|
lm: {
|
||||||
|
selectChatModels: jest.fn()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
const mockLanguageModelChat = {
|
||||||
|
id: 'test-model',
|
||||||
|
name: 'Test Model',
|
||||||
|
vendor: 'test-vendor',
|
||||||
|
family: 'test-family',
|
||||||
|
version: '1.0',
|
||||||
|
maxInputTokens: 4096,
|
||||||
|
sendRequest: jest.fn(),
|
||||||
|
countTokens: jest.fn()
|
||||||
|
};
|
||||||
|
|
||||||
|
describe('VsCodeLmHandler', () => {
|
||||||
|
let handler: VsCodeLmHandler;
|
||||||
|
const defaultOptions: ApiHandlerOptions = {
|
||||||
|
vsCodeLmModelSelector: {
|
||||||
|
vendor: 'test-vendor',
|
||||||
|
family: 'test-family'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
|
handler = new VsCodeLmHandler(defaultOptions);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
handler.dispose();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('constructor', () => {
|
||||||
|
it('should initialize with provided options', () => {
|
||||||
|
expect(handler).toBeDefined();
|
||||||
|
expect(vscode.workspace.onDidChangeConfiguration).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle configuration changes', () => {
|
||||||
|
const callback = (vscode.workspace.onDidChangeConfiguration as jest.Mock).mock.calls[0][0];
|
||||||
|
callback({ affectsConfiguration: () => true });
|
||||||
|
// Should reset client when config changes
|
||||||
|
expect(handler['client']).toBeNull();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('createClient', () => {
|
||||||
|
it('should create client with selector', async () => {
|
||||||
|
const mockModel = { ...mockLanguageModelChat };
|
||||||
|
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
|
||||||
|
|
||||||
|
const client = await handler['createClient']({
|
||||||
|
vendor: 'test-vendor',
|
||||||
|
family: 'test-family'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(client.id).toBe('test-model');
|
||||||
|
expect(vscode.lm.selectChatModels).toHaveBeenCalledWith({
|
||||||
|
vendor: 'test-vendor',
|
||||||
|
family: 'test-family'
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return default client when no models available', async () => {
|
||||||
|
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([]);
|
||||||
|
|
||||||
|
const client = await handler['createClient']({});
|
||||||
|
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(client.id).toBe('default-lm');
|
||||||
|
expect(client.vendor).toBe('vscode');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('createMessage', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
const mockModel = { ...mockLanguageModelChat };
|
||||||
|
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
|
||||||
|
mockLanguageModelChat.countTokens.mockResolvedValue(10);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should stream text responses', async () => {
|
||||||
|
const systemPrompt = 'You are a helpful assistant';
|
||||||
|
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||||
|
role: 'user' as const,
|
||||||
|
content: 'Hello'
|
||||||
|
}];
|
||||||
|
|
||||||
|
const responseText = 'Hello! How can I help you?';
|
||||||
|
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
|
||||||
|
stream: (async function* () {
|
||||||
|
yield new vscode.LanguageModelTextPart(responseText);
|
||||||
|
return;
|
||||||
|
})(),
|
||||||
|
text: (async function* () {
|
||||||
|
yield responseText;
|
||||||
|
return;
|
||||||
|
})()
|
||||||
|
});
|
||||||
|
|
||||||
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
|
const chunks = [];
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
chunks.push(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(chunks).toHaveLength(2); // Text chunk + usage chunk
|
||||||
|
expect(chunks[0]).toEqual({
|
||||||
|
type: 'text',
|
||||||
|
text: responseText
|
||||||
|
});
|
||||||
|
expect(chunks[1]).toMatchObject({
|
||||||
|
type: 'usage',
|
||||||
|
inputTokens: expect.any(Number),
|
||||||
|
outputTokens: expect.any(Number)
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle tool calls', async () => {
|
||||||
|
const systemPrompt = 'You are a helpful assistant';
|
||||||
|
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||||
|
role: 'user' as const,
|
||||||
|
content: 'Calculate 2+2'
|
||||||
|
}];
|
||||||
|
|
||||||
|
const toolCallData = {
|
||||||
|
name: 'calculator',
|
||||||
|
arguments: { operation: 'add', numbers: [2, 2] },
|
||||||
|
callId: 'call-1'
|
||||||
|
};
|
||||||
|
|
||||||
|
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
|
||||||
|
stream: (async function* () {
|
||||||
|
yield new vscode.LanguageModelToolCallPart(
|
||||||
|
toolCallData.callId,
|
||||||
|
toolCallData.name,
|
||||||
|
toolCallData.arguments
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
})(),
|
||||||
|
text: (async function* () {
|
||||||
|
yield JSON.stringify({ type: 'tool_call', ...toolCallData });
|
||||||
|
return;
|
||||||
|
})()
|
||||||
|
});
|
||||||
|
|
||||||
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
|
const chunks = [];
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
chunks.push(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(chunks).toHaveLength(2); // Tool call chunk + usage chunk
|
||||||
|
expect(chunks[0]).toEqual({
|
||||||
|
type: 'text',
|
||||||
|
text: JSON.stringify({ type: 'tool_call', ...toolCallData })
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle errors', async () => {
|
||||||
|
const systemPrompt = 'You are a helpful assistant';
|
||||||
|
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||||
|
role: 'user' as const,
|
||||||
|
content: 'Hello'
|
||||||
|
}];
|
||||||
|
|
||||||
|
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('API Error'));
|
||||||
|
|
||||||
|
await expect(async () => {
|
||||||
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
|
for await (const _ of stream) {
|
||||||
|
// consume stream
|
||||||
|
}
|
||||||
|
}).rejects.toThrow('API Error');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getModel', () => {
|
||||||
|
it('should return model info when client exists', async () => {
|
||||||
|
const mockModel = { ...mockLanguageModelChat };
|
||||||
|
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
|
||||||
|
|
||||||
|
// Initialize client
|
||||||
|
await handler['getClient']();
|
||||||
|
|
||||||
|
const model = handler.getModel();
|
||||||
|
expect(model.id).toBe('test-model');
|
||||||
|
expect(model.info).toBeDefined();
|
||||||
|
expect(model.info.contextWindow).toBe(4096);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return fallback model info when no client exists', () => {
|
||||||
|
const model = handler.getModel();
|
||||||
|
expect(model.id).toBe('test-vendor/test-family');
|
||||||
|
expect(model.info).toBeDefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('completePrompt', () => {
|
||||||
|
it('should complete single prompt', async () => {
|
||||||
|
const mockModel = { ...mockLanguageModelChat };
|
||||||
|
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
|
||||||
|
|
||||||
|
const responseText = 'Completed text';
|
||||||
|
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
|
||||||
|
stream: (async function* () {
|
||||||
|
yield new vscode.LanguageModelTextPart(responseText);
|
||||||
|
return;
|
||||||
|
})(),
|
||||||
|
text: (async function* () {
|
||||||
|
yield responseText;
|
||||||
|
return;
|
||||||
|
})()
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe(responseText);
|
||||||
|
expect(mockLanguageModelChat.sendRequest).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle errors during completion', async () => {
|
||||||
|
const mockModel = { ...mockLanguageModelChat };
|
||||||
|
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
|
||||||
|
|
||||||
|
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('Completion failed'));
|
||||||
|
|
||||||
|
await expect(handler.completePrompt('Test prompt'))
|
||||||
|
.rejects
|
||||||
|
.toThrow('VSCode LM completion error: Completion failed');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -23,14 +23,16 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
|
|||||||
}
|
}
|
||||||
|
|
||||||
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
|
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
|
||||||
switch (this.getModel().id) {
|
const modelId = this.getModel().id
|
||||||
|
switch (modelId) {
|
||||||
case "o1":
|
case "o1":
|
||||||
case "o1-preview":
|
case "o1-preview":
|
||||||
case "o1-mini": {
|
case "o1-mini": {
|
||||||
// o1 doesnt support streaming, non-1 temp, or system prompt
|
// o1-preview and o1-mini don't support streaming, non-1 temp, or system prompt
|
||||||
|
// o1 doesnt support streaming or non-1 temp but does support a developer prompt
|
||||||
const response = await this.client.chat.completions.create({
|
const response = await this.client.chat.completions.create({
|
||||||
model: this.getModel().id,
|
model: modelId,
|
||||||
messages: [{ role: "user", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
|
messages: [{ role: modelId === "o1" ? "developer" : "user", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
|
||||||
})
|
})
|
||||||
yield {
|
yield {
|
||||||
type: "text",
|
type: "text",
|
||||||
@@ -93,7 +95,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
|
|||||||
case "o1":
|
case "o1":
|
||||||
case "o1-preview":
|
case "o1-preview":
|
||||||
case "o1-mini":
|
case "o1-mini":
|
||||||
// o1 doesn't support non-1 temp or system prompt
|
// o1 doesn't support non-1 temp
|
||||||
requestOptions = {
|
requestOptions = {
|
||||||
model: modelId,
|
model: modelId,
|
||||||
messages: [{ role: "user", content: prompt }]
|
messages: [{ role: "user", content: prompt }]
|
||||||
|
|||||||
564
src/api/providers/vscode-lm.ts
Normal file
564
src/api/providers/vscode-lm.ts
Normal file
@@ -0,0 +1,564 @@
|
|||||||
|
import { Anthropic } from "@anthropic-ai/sdk";
|
||||||
|
import * as vscode from 'vscode';
|
||||||
|
import { ApiHandler, SingleCompletionHandler } from "../";
|
||||||
|
import { calculateApiCost } from "../../utils/cost";
|
||||||
|
import { ApiStream } from "../transform/stream";
|
||||||
|
import { convertToVsCodeLmMessages } from "../transform/vscode-lm-format";
|
||||||
|
import { SELECTOR_SEPARATOR, stringifyVsCodeLmModelSelector } from "../../shared/vsCodeSelectorUtils";
|
||||||
|
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles interaction with VS Code's Language Model API for chat-based operations.
|
||||||
|
* This handler implements the ApiHandler interface to provide VS Code LM specific functionality.
|
||||||
|
*
|
||||||
|
* @implements {ApiHandler}
|
||||||
|
*
|
||||||
|
* @remarks
|
||||||
|
* The handler manages a VS Code language model chat client and provides methods to:
|
||||||
|
* - Create and manage chat client instances
|
||||||
|
* - Stream messages using VS Code's Language Model API
|
||||||
|
* - Retrieve model information
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const options = {
|
||||||
|
* vsCodeLmModelSelector: { vendor: "copilot", family: "gpt-4" }
|
||||||
|
* };
|
||||||
|
* const handler = new VsCodeLmHandler(options);
|
||||||
|
*
|
||||||
|
* // Stream a conversation
|
||||||
|
* const systemPrompt = "You are a helpful assistant";
|
||||||
|
* const messages = [{ role: "user", content: "Hello!" }];
|
||||||
|
* for await (const chunk of handler.createMessage(systemPrompt, messages)) {
|
||||||
|
* console.log(chunk);
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export class VsCodeLmHandler implements ApiHandler, SingleCompletionHandler {
|
||||||
|
|
||||||
|
private options: ApiHandlerOptions;
|
||||||
|
private client: vscode.LanguageModelChat | null;
|
||||||
|
private disposable: vscode.Disposable | null;
|
||||||
|
private currentRequestCancellation: vscode.CancellationTokenSource | null;
|
||||||
|
|
||||||
|
constructor(options: ApiHandlerOptions) {
|
||||||
|
this.options = options;
|
||||||
|
this.client = null;
|
||||||
|
this.disposable = null;
|
||||||
|
this.currentRequestCancellation = null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Listen for model changes and reset client
|
||||||
|
this.disposable = vscode.workspace.onDidChangeConfiguration(event => {
|
||||||
|
if (event.affectsConfiguration('lm')) {
|
||||||
|
try {
|
||||||
|
this.client = null;
|
||||||
|
this.ensureCleanState();
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
console.error('Error during configuration change cleanup:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
// Ensure cleanup if constructor fails
|
||||||
|
this.dispose();
|
||||||
|
|
||||||
|
throw new Error(
|
||||||
|
`Cline <Language Model API>: Failed to initialize handler: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a language model chat client based on the provided selector.
|
||||||
|
*
|
||||||
|
* @param selector - Selector criteria to filter language model chat instances
|
||||||
|
* @returns Promise resolving to the first matching language model chat instance
|
||||||
|
* @throws Error when no matching models are found with the given selector
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* const selector = { vendor: "copilot", family: "gpt-4o" };
|
||||||
|
* const chatClient = await createClient(selector);
|
||||||
|
*/
|
||||||
|
async createClient(selector: vscode.LanguageModelChatSelector): Promise<vscode.LanguageModelChat> {
|
||||||
|
try {
|
||||||
|
const models = await vscode.lm.selectChatModels(selector);
|
||||||
|
|
||||||
|
// Use first available model or create a minimal model object
|
||||||
|
if (models && Array.isArray(models) && models.length > 0) {
|
||||||
|
return models[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a minimal model if no models are available
|
||||||
|
return {
|
||||||
|
id: 'default-lm',
|
||||||
|
name: 'Default Language Model',
|
||||||
|
vendor: 'vscode',
|
||||||
|
family: 'lm',
|
||||||
|
version: '1.0',
|
||||||
|
maxInputTokens: 8192,
|
||||||
|
sendRequest: async (messages, options, token) => {
|
||||||
|
// Provide a minimal implementation
|
||||||
|
return {
|
||||||
|
stream: (async function* () {
|
||||||
|
yield new vscode.LanguageModelTextPart(
|
||||||
|
"Language model functionality is limited. Please check VS Code configuration."
|
||||||
|
);
|
||||||
|
})(),
|
||||||
|
text: (async function* () {
|
||||||
|
yield "Language model functionality is limited. Please check VS Code configuration.";
|
||||||
|
})()
|
||||||
|
};
|
||||||
|
},
|
||||||
|
countTokens: async () => 0
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
throw new Error(`Cline <Language Model API>: Failed to select model: ${errorMessage}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates and streams a message using the VS Code Language Model API.
|
||||||
|
*
|
||||||
|
* @param systemPrompt - The system prompt to initialize the conversation context
|
||||||
|
* @param messages - An array of message parameters following the Anthropic message format
|
||||||
|
*
|
||||||
|
* @yields {ApiStream} An async generator that yields either text chunks or tool calls from the model response
|
||||||
|
*
|
||||||
|
* @throws {Error} When vsCodeLmModelSelector option is not provided
|
||||||
|
* @throws {Error} When the response stream encounters an error
|
||||||
|
*
|
||||||
|
* @remarks
|
||||||
|
* This method handles the initialization of the VS Code LM client if not already created,
|
||||||
|
* converts the messages to VS Code LM format, and streams the response chunks.
|
||||||
|
* Tool calls handling is currently a work in progress.
|
||||||
|
*/
|
||||||
|
dispose(): void {
|
||||||
|
|
||||||
|
if (this.disposable) {
|
||||||
|
|
||||||
|
this.disposable.dispose();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.currentRequestCancellation) {
|
||||||
|
|
||||||
|
this.currentRequestCancellation.cancel();
|
||||||
|
this.currentRequestCancellation.dispose();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async countTokens(text: string | vscode.LanguageModelChatMessage): Promise<number> {
|
||||||
|
// Check for required dependencies
|
||||||
|
if (!this.client) {
|
||||||
|
console.warn('Cline <Language Model API>: No client available for token counting');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.currentRequestCancellation) {
|
||||||
|
console.warn('Cline <Language Model API>: No cancellation token available for token counting');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate input
|
||||||
|
if (!text) {
|
||||||
|
console.debug('Cline <Language Model API>: Empty text provided for token counting');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Handle different input types
|
||||||
|
let tokenCount: number;
|
||||||
|
|
||||||
|
if (typeof text === 'string') {
|
||||||
|
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
|
||||||
|
} else if (text instanceof vscode.LanguageModelChatMessage) {
|
||||||
|
// For chat messages, ensure we have content
|
||||||
|
if (!text.content || (Array.isArray(text.content) && text.content.length === 0)) {
|
||||||
|
console.debug('Cline <Language Model API>: Empty chat message content');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
|
||||||
|
} else {
|
||||||
|
console.warn('Cline <Language Model API>: Invalid input type for token counting');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate the result
|
||||||
|
if (typeof tokenCount !== 'number') {
|
||||||
|
console.warn('Cline <Language Model API>: Non-numeric token count received:', tokenCount);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tokenCount < 0) {
|
||||||
|
console.warn('Cline <Language Model API>: Negative token count received:', tokenCount);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return tokenCount;
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
// Handle specific error types
|
||||||
|
if (error instanceof vscode.CancellationError) {
|
||||||
|
console.debug('Cline <Language Model API>: Token counting cancelled by user');
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
console.warn('Cline <Language Model API>: Token counting failed:', errorMessage);
|
||||||
|
|
||||||
|
// Log additional error details if available
|
||||||
|
if (error instanceof Error && error.stack) {
|
||||||
|
console.debug('Token counting error stack:', error.stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0; // Fallback to prevent stream interruption
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async calculateTotalInputTokens(systemPrompt: string, vsCodeLmMessages: vscode.LanguageModelChatMessage[]): Promise<number> {
|
||||||
|
|
||||||
|
const systemTokens: number = await this.countTokens(systemPrompt);
|
||||||
|
|
||||||
|
const messageTokens: number[] = await Promise.all(
|
||||||
|
vsCodeLmMessages.map(msg => this.countTokens(msg))
|
||||||
|
);
|
||||||
|
|
||||||
|
return systemTokens + messageTokens.reduce(
|
||||||
|
(sum: number, tokens: number): number => sum + tokens, 0
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private ensureCleanState(): void {
|
||||||
|
|
||||||
|
if (this.currentRequestCancellation) {
|
||||||
|
|
||||||
|
this.currentRequestCancellation.cancel();
|
||||||
|
this.currentRequestCancellation.dispose();
|
||||||
|
this.currentRequestCancellation = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async getClient(): Promise<vscode.LanguageModelChat> {
|
||||||
|
if (!this.client) {
|
||||||
|
console.debug('Cline <Language Model API>: Getting client with options:', {
|
||||||
|
vsCodeLmModelSelector: this.options.vsCodeLmModelSelector,
|
||||||
|
hasOptions: !!this.options,
|
||||||
|
selectorKeys: this.options.vsCodeLmModelSelector ? Object.keys(this.options.vsCodeLmModelSelector) : []
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Use default empty selector if none provided to get all available models
|
||||||
|
const selector = this.options?.vsCodeLmModelSelector || {};
|
||||||
|
console.debug('Cline <Language Model API>: Creating client with selector:', selector);
|
||||||
|
this.client = await this.createClient(selector);
|
||||||
|
} catch (error) {
|
||||||
|
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
console.error('Cline <Language Model API>: Client creation failed:', message);
|
||||||
|
throw new Error(`Cline <Language Model API>: Failed to create client: ${message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.client;
|
||||||
|
}
|
||||||
|
|
||||||
|
private cleanTerminalOutput(text: string): string {
|
||||||
|
if (!text) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
return text
|
||||||
|
// Нормализуем переносы строк
|
||||||
|
.replace(/\r\n/g, '\n')
|
||||||
|
.replace(/\r/g, '\n')
|
||||||
|
|
||||||
|
// Удаляем ANSI escape sequences
|
||||||
|
.replace(/\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g, '') // Полный набор ANSI sequences
|
||||||
|
.replace(/\x9B[0-?]*[ -/]*[@-~]/g, '') // CSI sequences
|
||||||
|
|
||||||
|
// Удаляем последовательности установки заголовка терминала и прочие OSC sequences
|
||||||
|
.replace(/\x1B\][0-9;]*(?:\x07|\x1B\\)/g, '')
|
||||||
|
|
||||||
|
// Удаляем управляющие символы
|
||||||
|
.replace(/[\x00-\x09\x0B-\x0C\x0E-\x1F\x7F]/g, '')
|
||||||
|
|
||||||
|
// Удаляем escape-последовательности VS Code
|
||||||
|
.replace(/\x1B[PD].*?\x1B\\/g, '') // DCS sequences
|
||||||
|
.replace(/\x1B_.*?\x1B\\/g, '') // APC sequences
|
||||||
|
.replace(/\x1B\^.*?\x1B\\/g, '') // PM sequences
|
||||||
|
.replace(/\x1B\[[\d;]*[HfABCDEFGJKST]/g, '') // Cursor movement and clear screen
|
||||||
|
|
||||||
|
// Удаляем пути Windows и служебную информацию
|
||||||
|
.replace(/^(?:PS )?[A-Z]:\\[^\n]*$/mg, '')
|
||||||
|
.replace(/^;?Cwd=.*$/mg, '')
|
||||||
|
|
||||||
|
// Очищаем экранированные последовательности
|
||||||
|
.replace(/\\x[0-9a-fA-F]{2}/g, '')
|
||||||
|
.replace(/\\u[0-9a-fA-F]{4}/g, '')
|
||||||
|
|
||||||
|
// Финальная очистка
|
||||||
|
.replace(/\n{3,}/g, '\n\n') // Убираем множественные пустые строки
|
||||||
|
.trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
private cleanMessageContent(content: any): any {
|
||||||
|
if (!content) {
|
||||||
|
return content;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof content === 'string') {
|
||||||
|
return this.cleanTerminalOutput(content);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Array.isArray(content)) {
|
||||||
|
return content.map(item => this.cleanMessageContent(item));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof content === 'object') {
|
||||||
|
const cleaned: any = {};
|
||||||
|
for (const [key, value] of Object.entries(content)) {
|
||||||
|
cleaned[key] = this.cleanMessageContent(value);
|
||||||
|
}
|
||||||
|
return cleaned;
|
||||||
|
}
|
||||||
|
|
||||||
|
return content;
|
||||||
|
}
|
||||||
|
|
||||||
|
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
|
||||||
|
|
||||||
|
// Ensure clean state before starting a new request
|
||||||
|
this.ensureCleanState();
|
||||||
|
const client: vscode.LanguageModelChat = await this.getClient();
|
||||||
|
|
||||||
|
// Clean system prompt and messages
|
||||||
|
const cleanedSystemPrompt = this.cleanTerminalOutput(systemPrompt);
|
||||||
|
const cleanedMessages = messages.map(msg => ({
|
||||||
|
...msg,
|
||||||
|
content: this.cleanMessageContent(msg.content)
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Convert Anthropic messages to VS Code LM messages
|
||||||
|
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [
|
||||||
|
vscode.LanguageModelChatMessage.Assistant(cleanedSystemPrompt),
|
||||||
|
...convertToVsCodeLmMessages(cleanedMessages),
|
||||||
|
];
|
||||||
|
|
||||||
|
// Initialize cancellation token for the request
|
||||||
|
this.currentRequestCancellation = new vscode.CancellationTokenSource();
|
||||||
|
|
||||||
|
// Calculate input tokens before starting the stream
|
||||||
|
const totalInputTokens: number = await this.calculateTotalInputTokens(systemPrompt, vsCodeLmMessages);
|
||||||
|
|
||||||
|
// Accumulate the text and count at the end of the stream to reduce token counting overhead.
|
||||||
|
let accumulatedText: string = '';
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
// Create the response stream with minimal required options
|
||||||
|
const requestOptions: vscode.LanguageModelChatRequestOptions = {
|
||||||
|
justification: `Cline would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`
|
||||||
|
};
|
||||||
|
|
||||||
|
// Note: Tool support is currently provided by the VSCode Language Model API directly
|
||||||
|
// Extensions can register tools using vscode.lm.registerTool()
|
||||||
|
|
||||||
|
const response: vscode.LanguageModelChatResponse = await client.sendRequest(
|
||||||
|
vsCodeLmMessages,
|
||||||
|
requestOptions,
|
||||||
|
this.currentRequestCancellation.token
|
||||||
|
);
|
||||||
|
|
||||||
|
// Consume the stream and handle both text and tool call chunks
|
||||||
|
for await (const chunk of response.stream) {
|
||||||
|
if (chunk instanceof vscode.LanguageModelTextPart) {
|
||||||
|
// Validate text part value
|
||||||
|
if (typeof chunk.value !== 'string') {
|
||||||
|
console.warn('Cline <Language Model API>: Invalid text part value received:', chunk.value);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
accumulatedText += chunk.value;
|
||||||
|
yield {
|
||||||
|
type: "text",
|
||||||
|
text: chunk.value,
|
||||||
|
};
|
||||||
|
} else if (chunk instanceof vscode.LanguageModelToolCallPart) {
|
||||||
|
try {
|
||||||
|
// Validate tool call parameters
|
||||||
|
if (!chunk.name || typeof chunk.name !== 'string') {
|
||||||
|
console.warn('Cline <Language Model API>: Invalid tool name received:', chunk.name);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!chunk.callId || typeof chunk.callId !== 'string') {
|
||||||
|
console.warn('Cline <Language Model API>: Invalid tool callId received:', chunk.callId);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure input is a valid object
|
||||||
|
if (!chunk.input || typeof chunk.input !== 'object') {
|
||||||
|
console.warn('Cline <Language Model API>: Invalid tool input received:', chunk.input);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert tool calls to text format with proper error handling
|
||||||
|
const toolCall = {
|
||||||
|
type: "tool_call",
|
||||||
|
name: chunk.name,
|
||||||
|
arguments: chunk.input,
|
||||||
|
callId: chunk.callId
|
||||||
|
};
|
||||||
|
|
||||||
|
const toolCallText = JSON.stringify(toolCall);
|
||||||
|
accumulatedText += toolCallText;
|
||||||
|
|
||||||
|
// Log tool call for debugging
|
||||||
|
console.debug('Cline <Language Model API>: Processing tool call:', {
|
||||||
|
name: chunk.name,
|
||||||
|
callId: chunk.callId,
|
||||||
|
inputSize: JSON.stringify(chunk.input).length
|
||||||
|
});
|
||||||
|
|
||||||
|
yield {
|
||||||
|
type: "text",
|
||||||
|
text: toolCallText,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Cline <Language Model API>: Failed to process tool call:', error);
|
||||||
|
// Continue processing other chunks even if one fails
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.warn('Cline <Language Model API>: Unknown chunk type received:', chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count tokens in the accumulated text after stream completion
|
||||||
|
const totalOutputTokens: number = await this.countTokens(accumulatedText);
|
||||||
|
|
||||||
|
// Report final usage after stream completion
|
||||||
|
yield {
|
||||||
|
type: "usage",
|
||||||
|
inputTokens: totalInputTokens,
|
||||||
|
outputTokens: totalOutputTokens,
|
||||||
|
totalCost: calculateApiCost(
|
||||||
|
this.getModel().info,
|
||||||
|
totalInputTokens,
|
||||||
|
totalOutputTokens
|
||||||
|
)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
catch (error: unknown) {
|
||||||
|
|
||||||
|
this.ensureCleanState();
|
||||||
|
|
||||||
|
if (error instanceof vscode.CancellationError) {
|
||||||
|
|
||||||
|
throw new Error("Cline <Language Model API>: Request cancelled by user");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (error instanceof Error) {
|
||||||
|
console.error('Cline <Language Model API>: Stream error details:', {
|
||||||
|
message: error.message,
|
||||||
|
stack: error.stack,
|
||||||
|
name: error.name
|
||||||
|
});
|
||||||
|
|
||||||
|
// Return original error if it's already an Error instance
|
||||||
|
throw error;
|
||||||
|
} else if (typeof error === 'object' && error !== null) {
|
||||||
|
// Handle error-like objects
|
||||||
|
const errorDetails = JSON.stringify(error, null, 2);
|
||||||
|
console.error('Cline <Language Model API>: Stream error object:', errorDetails);
|
||||||
|
throw new Error(`Cline <Language Model API>: Response stream error: ${errorDetails}`);
|
||||||
|
} else {
|
||||||
|
// Fallback for unknown error types
|
||||||
|
const errorMessage = String(error);
|
||||||
|
console.error('Cline <Language Model API>: Unknown stream error:', errorMessage);
|
||||||
|
throw new Error(`Cline <Language Model API>: Response stream error: ${errorMessage}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return model information based on the current client state
|
||||||
|
getModel(): { id: string; info: ModelInfo; } {
|
||||||
|
if (this.client) {
|
||||||
|
// Validate client properties
|
||||||
|
const requiredProps = {
|
||||||
|
id: this.client.id,
|
||||||
|
vendor: this.client.vendor,
|
||||||
|
family: this.client.family,
|
||||||
|
version: this.client.version,
|
||||||
|
maxInputTokens: this.client.maxInputTokens
|
||||||
|
};
|
||||||
|
|
||||||
|
// Log any missing properties for debugging
|
||||||
|
for (const [prop, value] of Object.entries(requiredProps)) {
|
||||||
|
if (!value && value !== 0) {
|
||||||
|
console.warn(`Cline <Language Model API>: Client missing ${prop} property`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct model ID using available information
|
||||||
|
const modelParts = [
|
||||||
|
this.client.vendor,
|
||||||
|
this.client.family,
|
||||||
|
this.client.version
|
||||||
|
].filter(Boolean);
|
||||||
|
|
||||||
|
const modelId = this.client.id || modelParts.join(SELECTOR_SEPARATOR);
|
||||||
|
|
||||||
|
// Build model info with conservative defaults for missing values
|
||||||
|
const modelInfo: ModelInfo = {
|
||||||
|
maxTokens: -1, // Unlimited tokens by default
|
||||||
|
contextWindow: typeof this.client.maxInputTokens === 'number'
|
||||||
|
? Math.max(0, this.client.maxInputTokens)
|
||||||
|
: openAiModelInfoSaneDefaults.contextWindow,
|
||||||
|
supportsImages: false, // VSCode Language Model API currently doesn't support image inputs
|
||||||
|
supportsPromptCache: true,
|
||||||
|
inputPrice: 0,
|
||||||
|
outputPrice: 0,
|
||||||
|
description: `VSCode Language Model: ${modelId}`
|
||||||
|
};
|
||||||
|
|
||||||
|
return { id: modelId, info: modelInfo };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback when no client is available
|
||||||
|
const fallbackId = this.options.vsCodeLmModelSelector
|
||||||
|
? stringifyVsCodeLmModelSelector(this.options.vsCodeLmModelSelector)
|
||||||
|
: "vscode-lm";
|
||||||
|
|
||||||
|
console.debug('Cline <Language Model API>: No client available, using fallback model info');
|
||||||
|
|
||||||
|
return {
|
||||||
|
id: fallbackId,
|
||||||
|
info: {
|
||||||
|
...openAiModelInfoSaneDefaults,
|
||||||
|
description: `VSCode Language Model (Fallback): ${fallbackId}`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
async completePrompt(prompt: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
const client = await this.getClient();
|
||||||
|
const response = await client.sendRequest([vscode.LanguageModelChatMessage.User(prompt)], {}, new vscode.CancellationTokenSource().token);
|
||||||
|
let result = "";
|
||||||
|
for await (const chunk of response.stream) {
|
||||||
|
if (chunk instanceof vscode.LanguageModelTextPart) {
|
||||||
|
result += chunk.value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`VSCode LM completion error: ${error.message}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
246
src/api/transform/__tests__/vscode-lm-format.test.ts
Normal file
246
src/api/transform/__tests__/vscode-lm-format.test.ts
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
import { Anthropic } from "@anthropic-ai/sdk";
|
||||||
|
import * as vscode from 'vscode';
|
||||||
|
import { convertToVsCodeLmMessages, convertToAnthropicRole, convertToAnthropicMessage } from '../vscode-lm-format';
|
||||||
|
|
||||||
|
// Mock crypto
|
||||||
|
const mockCrypto = {
|
||||||
|
randomUUID: () => 'test-uuid'
|
||||||
|
};
|
||||||
|
global.crypto = mockCrypto as any;
|
||||||
|
|
||||||
|
// Define types for our mocked classes
|
||||||
|
interface MockLanguageModelTextPart {
|
||||||
|
type: 'text';
|
||||||
|
value: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface MockLanguageModelToolCallPart {
|
||||||
|
type: 'tool_call';
|
||||||
|
callId: string;
|
||||||
|
name: string;
|
||||||
|
input: any;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface MockLanguageModelToolResultPart {
|
||||||
|
type: 'tool_result';
|
||||||
|
toolUseId: string;
|
||||||
|
parts: MockLanguageModelTextPart[];
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockMessageContent = MockLanguageModelTextPart | MockLanguageModelToolCallPart | MockLanguageModelToolResultPart;
|
||||||
|
|
||||||
|
interface MockLanguageModelChatMessage {
|
||||||
|
role: string;
|
||||||
|
name?: string;
|
||||||
|
content: MockMessageContent[];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mock vscode namespace
|
||||||
|
jest.mock('vscode', () => {
|
||||||
|
const LanguageModelChatMessageRole = {
|
||||||
|
Assistant: 'assistant',
|
||||||
|
User: 'user'
|
||||||
|
};
|
||||||
|
|
||||||
|
class MockLanguageModelTextPart {
|
||||||
|
type = 'text';
|
||||||
|
constructor(public value: string) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
class MockLanguageModelToolCallPart {
|
||||||
|
type = 'tool_call';
|
||||||
|
constructor(
|
||||||
|
public callId: string,
|
||||||
|
public name: string,
|
||||||
|
public input: any
|
||||||
|
) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
class MockLanguageModelToolResultPart {
|
||||||
|
type = 'tool_result';
|
||||||
|
constructor(
|
||||||
|
public toolUseId: string,
|
||||||
|
public parts: MockLanguageModelTextPart[]
|
||||||
|
) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
LanguageModelChatMessage: {
|
||||||
|
Assistant: jest.fn((content) => ({
|
||||||
|
role: LanguageModelChatMessageRole.Assistant,
|
||||||
|
name: 'assistant',
|
||||||
|
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
|
||||||
|
})),
|
||||||
|
User: jest.fn((content) => ({
|
||||||
|
role: LanguageModelChatMessageRole.User,
|
||||||
|
name: 'user',
|
||||||
|
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
|
||||||
|
}))
|
||||||
|
},
|
||||||
|
LanguageModelChatMessageRole,
|
||||||
|
LanguageModelTextPart: MockLanguageModelTextPart,
|
||||||
|
LanguageModelToolCallPart: MockLanguageModelToolCallPart,
|
||||||
|
LanguageModelToolResultPart: MockLanguageModelToolResultPart
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('vscode-lm-format', () => {
|
||||||
|
describe('convertToVsCodeLmMessages', () => {
|
||||||
|
it('should convert simple string messages', () => {
|
||||||
|
const messages: Anthropic.Messages.MessageParam[] = [
|
||||||
|
{ role: 'user', content: 'Hello' },
|
||||||
|
{ role: 'assistant', content: 'Hi there' }
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = convertToVsCodeLmMessages(messages);
|
||||||
|
|
||||||
|
expect(result).toHaveLength(2);
|
||||||
|
expect(result[0].role).toBe('user');
|
||||||
|
expect((result[0].content[0] as MockLanguageModelTextPart).value).toBe('Hello');
|
||||||
|
expect(result[1].role).toBe('assistant');
|
||||||
|
expect((result[1].content[0] as MockLanguageModelTextPart).value).toBe('Hi there');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle complex user messages with tool results', () => {
|
||||||
|
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||||
|
role: 'user',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Here is the result:' },
|
||||||
|
{
|
||||||
|
type: 'tool_result',
|
||||||
|
tool_use_id: 'tool-1',
|
||||||
|
content: 'Tool output'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}];
|
||||||
|
|
||||||
|
const result = convertToVsCodeLmMessages(messages);
|
||||||
|
|
||||||
|
expect(result).toHaveLength(1);
|
||||||
|
expect(result[0].role).toBe('user');
|
||||||
|
expect(result[0].content).toHaveLength(2);
|
||||||
|
const [toolResult, textContent] = result[0].content as [MockLanguageModelToolResultPart, MockLanguageModelTextPart];
|
||||||
|
expect(toolResult.type).toBe('tool_result');
|
||||||
|
expect(textContent.type).toBe('text');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle complex assistant messages with tool calls', () => {
|
||||||
|
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||||
|
role: 'assistant',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Let me help you with that.' },
|
||||||
|
{
|
||||||
|
type: 'tool_use',
|
||||||
|
id: 'tool-1',
|
||||||
|
name: 'calculator',
|
||||||
|
input: { operation: 'add', numbers: [2, 2] }
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}];
|
||||||
|
|
||||||
|
const result = convertToVsCodeLmMessages(messages);
|
||||||
|
|
||||||
|
expect(result).toHaveLength(1);
|
||||||
|
expect(result[0].role).toBe('assistant');
|
||||||
|
expect(result[0].content).toHaveLength(2);
|
||||||
|
const [toolCall, textContent] = result[0].content as [MockLanguageModelToolCallPart, MockLanguageModelTextPart];
|
||||||
|
expect(toolCall.type).toBe('tool_call');
|
||||||
|
expect(textContent.type).toBe('text');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle image blocks with appropriate placeholders', () => {
|
||||||
|
const messages: Anthropic.Messages.MessageParam[] = [{
|
||||||
|
role: 'user',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Look at this:' },
|
||||||
|
{
|
||||||
|
type: 'image',
|
||||||
|
source: {
|
||||||
|
type: 'base64',
|
||||||
|
media_type: 'image/png',
|
||||||
|
data: 'base64data'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}];
|
||||||
|
|
||||||
|
const result = convertToVsCodeLmMessages(messages);
|
||||||
|
|
||||||
|
expect(result).toHaveLength(1);
|
||||||
|
const imagePlaceholder = result[0].content[1] as MockLanguageModelTextPart;
|
||||||
|
expect(imagePlaceholder.value).toContain('[Image (base64): image/png not supported by VSCode LM API]');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('convertToAnthropicRole', () => {
|
||||||
|
it('should convert assistant role correctly', () => {
|
||||||
|
const result = convertToAnthropicRole('assistant' as any);
|
||||||
|
expect(result).toBe('assistant');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should convert user role correctly', () => {
|
||||||
|
const result = convertToAnthropicRole('user' as any);
|
||||||
|
expect(result).toBe('user');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null for unknown roles', () => {
|
||||||
|
const result = convertToAnthropicRole('unknown' as any);
|
||||||
|
expect(result).toBeNull();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('convertToAnthropicMessage', () => {
|
||||||
|
it('should convert assistant message with text content', async () => {
|
||||||
|
const vsCodeMessage = {
|
||||||
|
role: 'assistant',
|
||||||
|
name: 'assistant',
|
||||||
|
content: [new vscode.LanguageModelTextPart('Hello')]
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await convertToAnthropicMessage(vsCodeMessage as any);
|
||||||
|
|
||||||
|
expect(result.role).toBe('assistant');
|
||||||
|
expect(result.content).toHaveLength(1);
|
||||||
|
expect(result.content[0]).toEqual({
|
||||||
|
type: 'text',
|
||||||
|
text: 'Hello'
|
||||||
|
});
|
||||||
|
expect(result.id).toBe('test-uuid');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should convert assistant message with tool calls', async () => {
|
||||||
|
const vsCodeMessage = {
|
||||||
|
role: 'assistant',
|
||||||
|
name: 'assistant',
|
||||||
|
content: [new vscode.LanguageModelToolCallPart(
|
||||||
|
'call-1',
|
||||||
|
'calculator',
|
||||||
|
{ operation: 'add', numbers: [2, 2] }
|
||||||
|
)]
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await convertToAnthropicMessage(vsCodeMessage as any);
|
||||||
|
|
||||||
|
expect(result.content).toHaveLength(1);
|
||||||
|
expect(result.content[0]).toEqual({
|
||||||
|
type: 'tool_use',
|
||||||
|
id: 'call-1',
|
||||||
|
name: 'calculator',
|
||||||
|
input: { operation: 'add', numbers: [2, 2] }
|
||||||
|
});
|
||||||
|
expect(result.id).toBe('test-uuid');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error for non-assistant messages', async () => {
|
||||||
|
const vsCodeMessage = {
|
||||||
|
role: 'user',
|
||||||
|
name: 'user',
|
||||||
|
content: [new vscode.LanguageModelTextPart('Hello')]
|
||||||
|
};
|
||||||
|
|
||||||
|
await expect(convertToAnthropicMessage(vsCodeMessage as any))
|
||||||
|
.rejects
|
||||||
|
.toThrow('Cline <Language Model API>: Only assistant messages are supported.');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
209
src/api/transform/vscode-lm-format.ts
Normal file
209
src/api/transform/vscode-lm-format.ts
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
import { Anthropic } from "@anthropic-ai/sdk";
|
||||||
|
import * as vscode from 'vscode';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Safely converts a value into a plain object.
|
||||||
|
*/
|
||||||
|
function asObjectSafe(value: any): object {
|
||||||
|
// Handle null/undefined
|
||||||
|
if (!value) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Handle strings that might be JSON
|
||||||
|
if (typeof value === 'string') {
|
||||||
|
return JSON.parse(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle pre-existing objects
|
||||||
|
if (typeof value === 'object') {
|
||||||
|
return Object.assign({}, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
console.warn('Cline <Language Model API>: Failed to parse object:', error);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function convertToVsCodeLmMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): vscode.LanguageModelChatMessage[] {
|
||||||
|
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
|
||||||
|
|
||||||
|
for (const anthropicMessage of anthropicMessages) {
|
||||||
|
// Handle simple string messages
|
||||||
|
if (typeof anthropicMessage.content === "string") {
|
||||||
|
vsCodeLmMessages.push(
|
||||||
|
anthropicMessage.role === "assistant"
|
||||||
|
? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
|
||||||
|
: vscode.LanguageModelChatMessage.User(anthropicMessage.content)
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle complex message structures
|
||||||
|
switch (anthropicMessage.role) {
|
||||||
|
case "user": {
|
||||||
|
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
|
||||||
|
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
|
||||||
|
toolMessages: Anthropic.ToolResultBlockParam[];
|
||||||
|
}>(
|
||||||
|
(acc, part) => {
|
||||||
|
if (part.type === "tool_result") {
|
||||||
|
acc.toolMessages.push(part);
|
||||||
|
}
|
||||||
|
else if (part.type === "text" || part.type === "image") {
|
||||||
|
acc.nonToolMessages.push(part);
|
||||||
|
}
|
||||||
|
return acc;
|
||||||
|
},
|
||||||
|
{ nonToolMessages: [], toolMessages: [] },
|
||||||
|
);
|
||||||
|
|
||||||
|
// Process tool messages first then non-tool messages
|
||||||
|
const contentParts = [
|
||||||
|
// Convert tool messages to ToolResultParts
|
||||||
|
...toolMessages.map((toolMessage) => {
|
||||||
|
// Process tool result content into TextParts
|
||||||
|
const toolContentParts: vscode.LanguageModelTextPart[] = (
|
||||||
|
typeof toolMessage.content === "string"
|
||||||
|
? [new vscode.LanguageModelTextPart(toolMessage.content)]
|
||||||
|
: (
|
||||||
|
toolMessage.content?.map((part) => {
|
||||||
|
if (part.type === "image") {
|
||||||
|
return new vscode.LanguageModelTextPart(
|
||||||
|
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return new vscode.LanguageModelTextPart(part.text);
|
||||||
|
})
|
||||||
|
?? [new vscode.LanguageModelTextPart("")]
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
return new vscode.LanguageModelToolResultPart(
|
||||||
|
toolMessage.tool_use_id,
|
||||||
|
toolContentParts
|
||||||
|
);
|
||||||
|
}),
|
||||||
|
|
||||||
|
// Convert non-tool messages to TextParts after tool messages
|
||||||
|
...nonToolMessages.map((part) => {
|
||||||
|
if (part.type === "image") {
|
||||||
|
return new vscode.LanguageModelTextPart(
|
||||||
|
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return new vscode.LanguageModelTextPart(part.text);
|
||||||
|
})
|
||||||
|
];
|
||||||
|
|
||||||
|
// Add single user message with all content parts
|
||||||
|
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case "assistant": {
|
||||||
|
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
|
||||||
|
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
|
||||||
|
toolMessages: Anthropic.ToolUseBlockParam[];
|
||||||
|
}>(
|
||||||
|
(acc, part) => {
|
||||||
|
if (part.type === "tool_use") {
|
||||||
|
acc.toolMessages.push(part);
|
||||||
|
}
|
||||||
|
else if (part.type === "text" || part.type === "image") {
|
||||||
|
acc.nonToolMessages.push(part);
|
||||||
|
}
|
||||||
|
return acc;
|
||||||
|
},
|
||||||
|
{ nonToolMessages: [], toolMessages: [] },
|
||||||
|
);
|
||||||
|
|
||||||
|
// Process tool messages first then non-tool messages
|
||||||
|
const contentParts = [
|
||||||
|
// Convert tool messages to ToolCallParts first
|
||||||
|
...toolMessages.map((toolMessage) =>
|
||||||
|
new vscode.LanguageModelToolCallPart(
|
||||||
|
toolMessage.id,
|
||||||
|
toolMessage.name,
|
||||||
|
asObjectSafe(toolMessage.input)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
|
||||||
|
// Convert non-tool messages to TextParts after tool messages
|
||||||
|
...nonToolMessages.map((part) => {
|
||||||
|
if (part.type === "image") {
|
||||||
|
return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
|
||||||
|
}
|
||||||
|
return new vscode.LanguageModelTextPart(part.text);
|
||||||
|
})
|
||||||
|
];
|
||||||
|
|
||||||
|
// Add the assistant message to the list of messages
|
||||||
|
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return vsCodeLmMessages;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function convertToAnthropicRole(vsCodeLmMessageRole: vscode.LanguageModelChatMessageRole): string | null {
|
||||||
|
switch (vsCodeLmMessageRole) {
|
||||||
|
case vscode.LanguageModelChatMessageRole.Assistant:
|
||||||
|
return "assistant";
|
||||||
|
case vscode.LanguageModelChatMessageRole.User:
|
||||||
|
return "user";
|
||||||
|
default:
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function convertToAnthropicMessage(vsCodeLmMessage: vscode.LanguageModelChatMessage): Promise<Anthropic.Messages.Message> {
|
||||||
|
const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
|
||||||
|
if (anthropicRole !== "assistant") {
|
||||||
|
throw new Error("Cline <Language Model API>: Only assistant messages are supported.");
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
id: crypto.randomUUID(),
|
||||||
|
type: "message",
|
||||||
|
model: "vscode-lm",
|
||||||
|
role: anthropicRole,
|
||||||
|
content: (
|
||||||
|
vsCodeLmMessage.content
|
||||||
|
.map((part): Anthropic.ContentBlock | null => {
|
||||||
|
if (part instanceof vscode.LanguageModelTextPart) {
|
||||||
|
return {
|
||||||
|
type: "text",
|
||||||
|
text: part.value
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (part instanceof vscode.LanguageModelToolCallPart) {
|
||||||
|
return {
|
||||||
|
type: "tool_use",
|
||||||
|
id: part.callId || crypto.randomUUID(),
|
||||||
|
name: part.name,
|
||||||
|
input: asObjectSafe(part.input)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
})
|
||||||
|
.filter(
|
||||||
|
(part): part is Anthropic.ContentBlock => part !== null
|
||||||
|
)
|
||||||
|
),
|
||||||
|
stop_reason: null,
|
||||||
|
stop_sequence: null,
|
||||||
|
usage: {
|
||||||
|
input_tokens: 0,
|
||||||
|
output_tokens: 0,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -93,6 +93,7 @@ type GlobalStateKey =
|
|||||||
| "requestDelaySeconds"
|
| "requestDelaySeconds"
|
||||||
| "currentApiConfigName"
|
| "currentApiConfigName"
|
||||||
| "listApiConfigMeta"
|
| "listApiConfigMeta"
|
||||||
|
| "vsCodeLmModelSelector"
|
||||||
| "mode"
|
| "mode"
|
||||||
| "modeApiConfigs"
|
| "modeApiConfigs"
|
||||||
| "customPrompts"
|
| "customPrompts"
|
||||||
@@ -571,8 +572,12 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
const lmStudioModels = await this.getLmStudioModels(message.text)
|
const lmStudioModels = await this.getLmStudioModels(message.text)
|
||||||
this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels })
|
this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels })
|
||||||
break
|
break
|
||||||
|
case "requestVsCodeLmModels":
|
||||||
|
const vsCodeLmModels = await this.getVsCodeLmModels()
|
||||||
|
this.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
|
||||||
|
break
|
||||||
case "refreshGlamaModels":
|
case "refreshGlamaModels":
|
||||||
await this.refreshGlamaModels()
|
await this.refreshGlamaModels()
|
||||||
break
|
break
|
||||||
case "refreshOpenRouterModels":
|
case "refreshOpenRouterModels":
|
||||||
await this.refreshOpenRouterModels()
|
await this.refreshOpenRouterModels()
|
||||||
@@ -1109,6 +1114,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
openRouterModelId,
|
openRouterModelId,
|
||||||
openRouterModelInfo,
|
openRouterModelInfo,
|
||||||
openRouterUseMiddleOutTransform,
|
openRouterUseMiddleOutTransform,
|
||||||
|
vsCodeLmModelSelector,
|
||||||
} = apiConfiguration
|
} = apiConfiguration
|
||||||
await this.updateGlobalState("apiProvider", apiProvider)
|
await this.updateGlobalState("apiProvider", apiProvider)
|
||||||
await this.updateGlobalState("apiModelId", apiModelId)
|
await this.updateGlobalState("apiModelId", apiModelId)
|
||||||
@@ -1140,6 +1146,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
await this.updateGlobalState("openRouterModelId", openRouterModelId)
|
await this.updateGlobalState("openRouterModelId", openRouterModelId)
|
||||||
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
|
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
|
||||||
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
|
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
|
||||||
|
await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
|
||||||
if (this.cline) {
|
if (this.cline) {
|
||||||
this.cline.api = buildApiHandler(apiConfiguration)
|
this.cline.api = buildApiHandler(apiConfiguration)
|
||||||
}
|
}
|
||||||
@@ -1210,6 +1217,17 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VSCode LM API
|
||||||
|
private async getVsCodeLmModels() {
|
||||||
|
try {
|
||||||
|
const models = await vscode.lm.selectChatModels({});
|
||||||
|
return models || [];
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error fetching VS Code LM models:', error);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// OpenAi
|
// OpenAi
|
||||||
|
|
||||||
async getOpenAiModels(baseUrl?: string, apiKey?: string) {
|
async getOpenAiModels(baseUrl?: string, apiKey?: string) {
|
||||||
@@ -1268,6 +1286,33 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
return cacheDir
|
return cacheDir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async handleGlamaCallback(code: string) {
|
||||||
|
let apiKey: string
|
||||||
|
try {
|
||||||
|
const response = await axios.post("https://glama.ai/api/gateway/v1/auth/exchange-code", { code })
|
||||||
|
if (response.data && response.data.apiKey) {
|
||||||
|
apiKey = response.data.apiKey
|
||||||
|
} else {
|
||||||
|
throw new Error("Invalid response from Glama API")
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error exchanging code for API key:", error)
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
const glama: ApiProvider = "glama"
|
||||||
|
await this.updateGlobalState("apiProvider", glama)
|
||||||
|
await this.storeSecret("glamaApiKey", apiKey)
|
||||||
|
await this.postStateToWebview()
|
||||||
|
if (this.cline) {
|
||||||
|
this.cline.api = buildApiHandler({
|
||||||
|
apiProvider: glama,
|
||||||
|
glamaApiKey: apiKey,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// await this.postMessageToWebview({ type: "action", action: "settingsButtonClicked" }) // bad ux if user is on welcome
|
||||||
|
}
|
||||||
|
|
||||||
async readGlamaModels(): Promise<Record<string, ModelInfo> | undefined> {
|
async readGlamaModels(): Promise<Record<string, ModelInfo> | undefined> {
|
||||||
const glamaModelsFilePath = path.join(
|
const glamaModelsFilePath = path.join(
|
||||||
await this.ensureCacheDirectoryExists(),
|
await this.ensureCacheDirectoryExists(),
|
||||||
@@ -1742,6 +1787,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
requestDelaySeconds,
|
requestDelaySeconds,
|
||||||
currentApiConfigName,
|
currentApiConfigName,
|
||||||
listApiConfigMeta,
|
listApiConfigMeta,
|
||||||
|
vsCodeLmModelSelector,
|
||||||
mode,
|
mode,
|
||||||
modeApiConfigs,
|
modeApiConfigs,
|
||||||
customPrompts,
|
customPrompts,
|
||||||
@@ -1800,6 +1846,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
this.getGlobalState("requestDelaySeconds") as Promise<number | undefined>,
|
this.getGlobalState("requestDelaySeconds") as Promise<number | undefined>,
|
||||||
this.getGlobalState("currentApiConfigName") as Promise<string | undefined>,
|
this.getGlobalState("currentApiConfigName") as Promise<string | undefined>,
|
||||||
this.getGlobalState("listApiConfigMeta") as Promise<ApiConfigMeta[] | undefined>,
|
this.getGlobalState("listApiConfigMeta") as Promise<ApiConfigMeta[] | undefined>,
|
||||||
|
this.getGlobalState("vsCodeLmModelSelector") as Promise<vscode.LanguageModelChatSelector | undefined>,
|
||||||
this.getGlobalState("mode") as Promise<Mode | undefined>,
|
this.getGlobalState("mode") as Promise<Mode | undefined>,
|
||||||
this.getGlobalState("modeApiConfigs") as Promise<Record<Mode, string> | undefined>,
|
this.getGlobalState("modeApiConfigs") as Promise<Record<Mode, string> | undefined>,
|
||||||
this.getGlobalState("customPrompts") as Promise<CustomPrompts | undefined>,
|
this.getGlobalState("customPrompts") as Promise<CustomPrompts | undefined>,
|
||||||
@@ -1852,6 +1899,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
openRouterModelId,
|
openRouterModelId,
|
||||||
openRouterModelInfo,
|
openRouterModelInfo,
|
||||||
openRouterUseMiddleOutTransform,
|
openRouterUseMiddleOutTransform,
|
||||||
|
vsCodeLmModelSelector,
|
||||||
},
|
},
|
||||||
lastShownAnnouncementId,
|
lastShownAnnouncementId,
|
||||||
customInstructions,
|
customInstructions,
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ The Cline extension exposes an API that can be used by other extensions. To use
|
|||||||
3. Get access to the API with the following code:
|
3. Get access to the API with the following code:
|
||||||
|
|
||||||
```ts
|
```ts
|
||||||
const clineExtension = vscode.extensions.getExtension<ClineAPI>("saoudrizwan.claude-dev")
|
const clineExtension = vscode.extensions.getExtension<ClineAPI>("rooveterinaryinc.roo-cline")
|
||||||
|
|
||||||
if (!clineExtension?.isActive) {
|
if (!clineExtension?.isActive) {
|
||||||
throw new Error("Cline extension is not activated")
|
throw new Error("Cline extension is not activated")
|
||||||
@@ -44,11 +44,11 @@ The Cline extension exposes an API that can be used by other extensions. To use
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note:** To ensure that the `saoudrizwan.claude-dev` extension is activated before your extension, add it to the `extensionDependencies` in your `package.json`:
|
**Note:** To ensure that the `rooveterinaryinc.roo-cline` extension is activated before your extension, add it to the `extensionDependencies` in your `package.json`:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"extensionDependencies": [
|
"extensionDependencies": [
|
||||||
"saoudrizwan.claude-dev"
|
"rooveterinaryinc.roo-cline"
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -139,6 +139,14 @@ export function activate(context: vscode.ExtensionContext) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
switch (path) {
|
switch (path) {
|
||||||
|
case "/glama": {
|
||||||
|
const code = query.get("code")
|
||||||
|
if (code) {
|
||||||
|
await visibleProvider.handleGlamaCallback(code)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
case "/openrouter": {
|
case "/openrouter": {
|
||||||
const code = query.get("code")
|
const code = query.get("code")
|
||||||
if (code) {
|
if (code) {
|
||||||
|
|||||||
@@ -141,5 +141,5 @@ export function mergeJson(
|
|||||||
}
|
}
|
||||||
|
|
||||||
function getExtensionUri(): vscode.Uri {
|
function getExtensionUri(): vscode.Uri {
|
||||||
return vscode.extensions.getExtension("saoudrizwan.claude-dev")!.extensionUri
|
return vscode.extensions.getExtension("rooveterinaryinc.roo-cline")!.extensionUri
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,9 @@ export interface ExtensionMessage {
|
|||||||
| "enhancedPrompt"
|
| "enhancedPrompt"
|
||||||
| "commitSearchResults"
|
| "commitSearchResults"
|
||||||
| "listApiConfig"
|
| "listApiConfig"
|
||||||
|
| "vsCodeLmModels"
|
||||||
|
| "vsCodeLmApiAvailable"
|
||||||
|
| "requestVsCodeLmModels"
|
||||||
| "updatePrompt"
|
| "updatePrompt"
|
||||||
| "systemPrompt"
|
| "systemPrompt"
|
||||||
text?: string
|
text?: string
|
||||||
@@ -40,6 +43,7 @@ export interface ExtensionMessage {
|
|||||||
images?: string[]
|
images?: string[]
|
||||||
ollamaModels?: string[]
|
ollamaModels?: string[]
|
||||||
lmStudioModels?: string[]
|
lmStudioModels?: string[]
|
||||||
|
vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[]
|
||||||
filePaths?: string[]
|
filePaths?: string[]
|
||||||
partialMessage?: ClineMessage
|
partialMessage?: ClineMessage
|
||||||
glamaModels?: Record<string, ModelInfo>
|
glamaModels?: Record<string, ModelInfo>
|
||||||
|
|||||||
@@ -61,9 +61,11 @@ export interface WebviewMessage {
|
|||||||
| "terminalOutputLineLimit"
|
| "terminalOutputLineLimit"
|
||||||
| "mcpEnabled"
|
| "mcpEnabled"
|
||||||
| "searchCommits"
|
| "searchCommits"
|
||||||
|
| "refreshGlamaModels"
|
||||||
| "alwaysApproveResubmit"
|
| "alwaysApproveResubmit"
|
||||||
| "requestDelaySeconds"
|
| "requestDelaySeconds"
|
||||||
| "setApiConfigPassword"
|
| "setApiConfigPassword"
|
||||||
|
| "requestVsCodeLmModels"
|
||||||
| "mode"
|
| "mode"
|
||||||
| "updatePrompt"
|
| "updatePrompt"
|
||||||
| "updateEnhancedPrompt"
|
| "updateEnhancedPrompt"
|
||||||
|
|||||||
56
src/shared/__tests__/checkExistApiConfig.test.ts
Normal file
56
src/shared/__tests__/checkExistApiConfig.test.ts
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
import { checkExistKey } from '../checkExistApiConfig';
|
||||||
|
import { ApiConfiguration } from '../api';
|
||||||
|
|
||||||
|
describe('checkExistKey', () => {
|
||||||
|
it('should return false for undefined config', () => {
|
||||||
|
expect(checkExistKey(undefined)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for empty config', () => {
|
||||||
|
const config: ApiConfiguration = {};
|
||||||
|
expect(checkExistKey(config)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true when one key is defined', () => {
|
||||||
|
const config: ApiConfiguration = {
|
||||||
|
apiKey: 'test-key'
|
||||||
|
};
|
||||||
|
expect(checkExistKey(config)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true when multiple keys are defined', () => {
|
||||||
|
const config: ApiConfiguration = {
|
||||||
|
apiKey: 'test-key',
|
||||||
|
glamaApiKey: 'glama-key',
|
||||||
|
openRouterApiKey: 'openrouter-key'
|
||||||
|
};
|
||||||
|
expect(checkExistKey(config)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true when only non-key fields are undefined', () => {
|
||||||
|
const config: ApiConfiguration = {
|
||||||
|
apiKey: 'test-key',
|
||||||
|
apiProvider: undefined,
|
||||||
|
anthropicBaseUrl: undefined
|
||||||
|
};
|
||||||
|
expect(checkExistKey(config)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false when all key fields are undefined', () => {
|
||||||
|
const config: ApiConfiguration = {
|
||||||
|
apiKey: undefined,
|
||||||
|
glamaApiKey: undefined,
|
||||||
|
openRouterApiKey: undefined,
|
||||||
|
awsRegion: undefined,
|
||||||
|
vertexProjectId: undefined,
|
||||||
|
openAiApiKey: undefined,
|
||||||
|
ollamaModelId: undefined,
|
||||||
|
lmStudioModelId: undefined,
|
||||||
|
geminiApiKey: undefined,
|
||||||
|
openAiNativeApiKey: undefined,
|
||||||
|
deepSeekApiKey: undefined,
|
||||||
|
vsCodeLmModelSelector: undefined
|
||||||
|
};
|
||||||
|
expect(checkExistKey(config)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
44
src/shared/__tests__/vsCodeSelectorUtils.test.ts
Normal file
44
src/shared/__tests__/vsCodeSelectorUtils.test.ts
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
import { stringifyVsCodeLmModelSelector, SELECTOR_SEPARATOR } from '../vsCodeSelectorUtils';
|
||||||
|
import { LanguageModelChatSelector } from 'vscode';
|
||||||
|
|
||||||
|
describe('vsCodeSelectorUtils', () => {
|
||||||
|
describe('stringifyVsCodeLmModelSelector', () => {
|
||||||
|
it('should join all defined selector properties with separator', () => {
|
||||||
|
const selector: LanguageModelChatSelector = {
|
||||||
|
vendor: 'test-vendor',
|
||||||
|
family: 'test-family',
|
||||||
|
version: 'v1',
|
||||||
|
id: 'test-id'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = stringifyVsCodeLmModelSelector(selector);
|
||||||
|
expect(result).toBe('test-vendor/test-family/v1/test-id');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should skip undefined properties', () => {
|
||||||
|
const selector: LanguageModelChatSelector = {
|
||||||
|
vendor: 'test-vendor',
|
||||||
|
family: 'test-family'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = stringifyVsCodeLmModelSelector(selector);
|
||||||
|
expect(result).toBe('test-vendor/test-family');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty selector', () => {
|
||||||
|
const selector: LanguageModelChatSelector = {};
|
||||||
|
|
||||||
|
const result = stringifyVsCodeLmModelSelector(selector);
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle selector with only one property', () => {
|
||||||
|
const selector: LanguageModelChatSelector = {
|
||||||
|
vendor: 'test-vendor'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = stringifyVsCodeLmModelSelector(selector);
|
||||||
|
expect(result).toBe('test-vendor');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
import * as vscode from 'vscode';
|
||||||
|
|
||||||
export type ApiProvider =
|
export type ApiProvider =
|
||||||
| "anthropic"
|
| "anthropic"
|
||||||
| "glama"
|
| "glama"
|
||||||
@@ -10,11 +12,13 @@ export type ApiProvider =
|
|||||||
| "gemini"
|
| "gemini"
|
||||||
| "openai-native"
|
| "openai-native"
|
||||||
| "deepseek"
|
| "deepseek"
|
||||||
|
| "vscode-lm"
|
||||||
|
|
||||||
export interface ApiHandlerOptions {
|
export interface ApiHandlerOptions {
|
||||||
apiModelId?: string
|
apiModelId?: string
|
||||||
apiKey?: string // anthropic
|
apiKey?: string // anthropic
|
||||||
anthropicBaseUrl?: string
|
anthropicBaseUrl?: string
|
||||||
|
vsCodeLmModelSelector?: vscode.LanguageModelChatSelector
|
||||||
glamaModelId?: string
|
glamaModelId?: string
|
||||||
glamaModelInfo?: ModelInfo
|
glamaModelInfo?: ModelInfo
|
||||||
glamaApiKey?: string
|
glamaApiKey?: string
|
||||||
@@ -58,7 +62,7 @@ export type ApiConfiguration = ApiHandlerOptions & {
|
|||||||
|
|
||||||
export interface ModelInfo {
|
export interface ModelInfo {
|
||||||
maxTokens?: number
|
maxTokens?: number
|
||||||
contextWindow?: number
|
contextWindow: number
|
||||||
supportsImages?: boolean
|
supportsImages?: boolean
|
||||||
supportsComputerUse?: boolean
|
supportsComputerUse?: boolean
|
||||||
supportsPromptCache: boolean // this value is hardcoded for now
|
supportsPromptCache: boolean // this value is hardcoded for now
|
||||||
|
|||||||
@@ -13,7 +13,8 @@ export function checkExistKey(config: ApiConfiguration | undefined) {
|
|||||||
config.lmStudioModelId,
|
config.lmStudioModelId,
|
||||||
config.geminiApiKey,
|
config.geminiApiKey,
|
||||||
config.openAiNativeApiKey,
|
config.openAiNativeApiKey,
|
||||||
config.deepSeekApiKey
|
config.deepSeekApiKey,
|
||||||
|
config.vsCodeLmModelSelector,
|
||||||
].some((key) => key !== undefined)
|
].some((key) => key !== undefined)
|
||||||
: false;
|
: false;
|
||||||
}
|
}
|
||||||
|
|||||||
14
src/shared/vsCodeSelectorUtils.ts
Normal file
14
src/shared/vsCodeSelectorUtils.ts
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
import { LanguageModelChatSelector } from 'vscode';
|
||||||
|
|
||||||
|
export const SELECTOR_SEPARATOR = '/';
|
||||||
|
|
||||||
|
export function stringifyVsCodeLmModelSelector(selector: LanguageModelChatSelector): string {
|
||||||
|
return [
|
||||||
|
selector.vendor,
|
||||||
|
selector.family,
|
||||||
|
selector.version,
|
||||||
|
selector.id
|
||||||
|
]
|
||||||
|
.filter(Boolean)
|
||||||
|
.join(SELECTOR_SEPARATOR);
|
||||||
|
}
|
||||||
@@ -4,7 +4,7 @@ import {
|
|||||||
VSCodeLink,
|
VSCodeLink,
|
||||||
VSCodeRadio,
|
VSCodeRadio,
|
||||||
VSCodeRadioGroup,
|
VSCodeRadioGroup,
|
||||||
VSCodeTextField,
|
VSCodeTextField
|
||||||
} from "@vscode/webview-ui-toolkit/react"
|
} from "@vscode/webview-ui-toolkit/react"
|
||||||
import { Fragment, memo, useCallback, useEffect, useMemo, useState } from "react"
|
import { Fragment, memo, useCallback, useEffect, useMemo, useState } from "react"
|
||||||
import { useEvent, useInterval } from "react-use"
|
import { useEvent, useInterval } from "react-use"
|
||||||
@@ -33,6 +33,7 @@ import {
|
|||||||
import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage"
|
import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage"
|
||||||
import { useExtensionState } from "../../context/ExtensionStateContext"
|
import { useExtensionState } from "../../context/ExtensionStateContext"
|
||||||
import { vscode } from "../../utils/vscode"
|
import { vscode } from "../../utils/vscode"
|
||||||
|
import * as vscodemodels from "vscode"
|
||||||
import VSCodeButtonLink from "../common/VSCodeButtonLink"
|
import VSCodeButtonLink from "../common/VSCodeButtonLink"
|
||||||
import OpenRouterModelPicker, {
|
import OpenRouterModelPicker, {
|
||||||
ModelDescriptionMarkdown,
|
ModelDescriptionMarkdown,
|
||||||
@@ -50,6 +51,7 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
|
|||||||
const { apiConfiguration, setApiConfiguration, uriScheme, onUpdateApiConfig } = useExtensionState()
|
const { apiConfiguration, setApiConfiguration, uriScheme, onUpdateApiConfig } = useExtensionState()
|
||||||
const [ollamaModels, setOllamaModels] = useState<string[]>([])
|
const [ollamaModels, setOllamaModels] = useState<string[]>([])
|
||||||
const [lmStudioModels, setLmStudioModels] = useState<string[]>([])
|
const [lmStudioModels, setLmStudioModels] = useState<string[]>([])
|
||||||
|
const [vsCodeLmModels, setVsCodeLmModels] = useState<vscodemodels.LanguageModelChatSelector[]>([])
|
||||||
const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl)
|
const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl)
|
||||||
const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion)
|
const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion)
|
||||||
const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false)
|
const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false)
|
||||||
@@ -70,21 +72,24 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
|
|||||||
vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl })
|
vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl })
|
||||||
} else if (selectedProvider === "lmstudio") {
|
} else if (selectedProvider === "lmstudio") {
|
||||||
vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl })
|
vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl })
|
||||||
|
} else if (selectedProvider === "vscode-lm") {
|
||||||
|
vscode.postMessage({ type: "requestVsCodeLmModels" })
|
||||||
}
|
}
|
||||||
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl])
|
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl])
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (selectedProvider === "ollama" || selectedProvider === "lmstudio") {
|
if (selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm") {
|
||||||
requestLocalModels()
|
requestLocalModels()
|
||||||
}
|
}
|
||||||
}, [selectedProvider, requestLocalModels])
|
}, [selectedProvider, requestLocalModels])
|
||||||
useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" ? 2000 : null)
|
useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm" ? 2000 : null)
|
||||||
|
|
||||||
const handleMessage = useCallback((event: MessageEvent) => {
|
const handleMessage = useCallback((event: MessageEvent) => {
|
||||||
const message: ExtensionMessage = event.data
|
const message: ExtensionMessage = event.data
|
||||||
if (message.type === "ollamaModels" && message.ollamaModels) {
|
if (message.type === "ollamaModels" && message.ollamaModels) {
|
||||||
setOllamaModels(message.ollamaModels)
|
setOllamaModels(message.ollamaModels)
|
||||||
} else if (message.type === "lmStudioModels" && message.lmStudioModels) {
|
} else if (message.type === "lmStudioModels" && message.lmStudioModels) {
|
||||||
setLmStudioModels(message.lmStudioModels)
|
setLmStudioModels(message.lmStudioModels)
|
||||||
|
} else if (message.type === "vsCodeLmModels" && message.vsCodeLmModels) {
|
||||||
|
setVsCodeLmModels(message.vsCodeLmModels)
|
||||||
}
|
}
|
||||||
}, [])
|
}, [])
|
||||||
useEvent("message", handleMessage)
|
useEvent("message", handleMessage)
|
||||||
@@ -139,6 +144,7 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
|
|||||||
{ value: "vertex", label: "GCP Vertex AI" },
|
{ value: "vertex", label: "GCP Vertex AI" },
|
||||||
{ value: "bedrock", label: "AWS Bedrock" },
|
{ value: "bedrock", label: "AWS Bedrock" },
|
||||||
{ value: "glama", label: "Glama" },
|
{ value: "glama", label: "Glama" },
|
||||||
|
{ value: "vscode-lm", label: "VS Code LM API" },
|
||||||
{ value: "lmstudio", label: "LM Studio" },
|
{ value: "lmstudio", label: "LM Studio" },
|
||||||
{ value: "ollama", label: "Ollama" }
|
{ value: "ollama", label: "Ollama" }
|
||||||
]}
|
]}
|
||||||
@@ -206,11 +212,12 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
|
|||||||
<span style={{ fontWeight: 500 }}>Glama API Key</span>
|
<span style={{ fontWeight: 500 }}>Glama API Key</span>
|
||||||
</VSCodeTextField>
|
</VSCodeTextField>
|
||||||
{!apiConfiguration?.glamaApiKey && (
|
{!apiConfiguration?.glamaApiKey && (
|
||||||
<VSCodeLink
|
<VSCodeButtonLink
|
||||||
href="https://glama.ai/settings/api-keys"
|
href={getGlamaAuthUrl(uriScheme)}
|
||||||
style={{ display: "inline", fontSize: "inherit" }}>
|
style={{ margin: "5px 0 0 0" }}
|
||||||
You can get an Glama API key by signing up here.
|
appearance="secondary">
|
||||||
</VSCodeLink>
|
Get Glama API Key
|
||||||
|
</VSCodeButtonLink>
|
||||||
)}
|
)}
|
||||||
<p
|
<p
|
||||||
style={{
|
style={{
|
||||||
@@ -619,6 +626,59 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{selectedProvider === "vscode-lm" && (
|
||||||
|
<div>
|
||||||
|
<div className="dropdown-container">
|
||||||
|
<label htmlFor="vscode-lm-model">
|
||||||
|
<span style={{ fontWeight: 500 }}>Language Model</span>
|
||||||
|
</label>
|
||||||
|
{vsCodeLmModels.length > 0 ? (
|
||||||
|
<Dropdown
|
||||||
|
id="vscode-lm-model"
|
||||||
|
value={apiConfiguration?.vsCodeLmModelSelector ?
|
||||||
|
`${apiConfiguration.vsCodeLmModelSelector.vendor ?? ""}/${apiConfiguration.vsCodeLmModelSelector.family ?? ""}` :
|
||||||
|
""}
|
||||||
|
onChange={(value: unknown) => {
|
||||||
|
const valueStr = (value as DropdownOption).value;
|
||||||
|
const [vendor, family] = valueStr.split('/');
|
||||||
|
setApiConfiguration({
|
||||||
|
...apiConfiguration,
|
||||||
|
vsCodeLmModelSelector: valueStr ? { vendor, family } : undefined
|
||||||
|
});
|
||||||
|
}}
|
||||||
|
style={{ width: "100%" }}
|
||||||
|
options={[
|
||||||
|
{ value: "", label: "Select a model..." },
|
||||||
|
...vsCodeLmModels.map((model) => ({
|
||||||
|
value: `${model.vendor}/${model.family}`,
|
||||||
|
label: `${model.vendor} - ${model.family}`
|
||||||
|
}))
|
||||||
|
]}
|
||||||
|
/>
|
||||||
|
) : (
|
||||||
|
<p style={{
|
||||||
|
fontSize: "12px",
|
||||||
|
marginTop: "5px",
|
||||||
|
color: "var(--vscode-descriptionForeground)",
|
||||||
|
}}>
|
||||||
|
The VS Code Language Model API allows you to run models provided by other VS Code extensions (including but not limited to GitHub Copilot).
|
||||||
|
The easiest way to get started is to install the Copilot and Copilot Chat extensions from the VS Code Marketplace.
|
||||||
|
</p>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<p
|
||||||
|
style={{
|
||||||
|
fontSize: "12px",
|
||||||
|
marginTop: "5px",
|
||||||
|
color: "var(--vscode-errorForeground)",
|
||||||
|
fontWeight: 500,
|
||||||
|
}}>
|
||||||
|
Note: This is a very experimental integration and may not work as expected. Please report any issues to the Roo-Cline GitHub repository.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
{selectedProvider === "ollama" && (
|
{selectedProvider === "ollama" && (
|
||||||
<div>
|
<div>
|
||||||
<VSCodeTextField
|
<VSCodeTextField
|
||||||
@@ -739,8 +799,14 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function getGlamaAuthUrl(uriScheme?: string) {
|
||||||
|
const callbackUrl = `${uriScheme || "vscode"}://rooveterinaryinc.roo-cline/glama`
|
||||||
|
|
||||||
|
return `https://glama.ai/oauth/authorize?callback_url=${encodeURIComponent(callbackUrl)}`
|
||||||
|
}
|
||||||
|
|
||||||
export function getOpenRouterAuthUrl(uriScheme?: string) {
|
export function getOpenRouterAuthUrl(uriScheme?: string) {
|
||||||
return `https://openrouter.ai/auth?callback_url=${uriScheme || "vscode"}://saoudrizwan.claude-dev/openrouter`
|
return `https://openrouter.ai/auth?callback_url=${uriScheme || "vscode"}://rooveterinaryinc.roo-cline/openrouter`
|
||||||
}
|
}
|
||||||
|
|
||||||
export const formatPrice = (price: number) => {
|
export const formatPrice = (price: number) => {
|
||||||
@@ -932,6 +998,17 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
|
|||||||
selectedModelId: apiConfiguration?.lmStudioModelId || "",
|
selectedModelId: apiConfiguration?.lmStudioModelId || "",
|
||||||
selectedModelInfo: openAiModelInfoSaneDefaults,
|
selectedModelInfo: openAiModelInfoSaneDefaults,
|
||||||
}
|
}
|
||||||
|
case "vscode-lm":
|
||||||
|
return {
|
||||||
|
selectedProvider: provider,
|
||||||
|
selectedModelId: apiConfiguration?.vsCodeLmModelSelector ?
|
||||||
|
`${apiConfiguration.vsCodeLmModelSelector.vendor}/${apiConfiguration.vsCodeLmModelSelector.family}` :
|
||||||
|
"",
|
||||||
|
selectedModelInfo: {
|
||||||
|
...openAiModelInfoSaneDefaults,
|
||||||
|
supportsImages: false, // VSCode LM API currently doesn't support images
|
||||||
|
},
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return getProviderData(anthropicModels, anthropicDefaultModelId)
|
return getProviderData(anthropicModels, anthropicDefaultModelId)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -57,6 +57,11 @@ export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): s
|
|||||||
return "You must provide a valid model ID."
|
return "You must provide a valid model ID."
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
case "vscode-lm":
|
||||||
|
if (!apiConfiguration.vsCodeLmModelSelector) {
|
||||||
|
return "You must provide a valid model selector."
|
||||||
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return undefined
|
return undefined
|
||||||
|
|||||||
Reference in New Issue
Block a user