Merge remote-tracking branch 'origin/main' into new_unified

This commit is contained in:
Matt Rubens
2025-01-16 21:53:36 -05:00
40 changed files with 2911 additions and 254 deletions

View File

@@ -1,5 +0,0 @@
---
"roo-cline": patch
---
Improvements to fuzzy search in mentions, history, and model lists

View File

@@ -2,20 +2,23 @@ name: Changeset Release
run-name: Changeset Release ${{ github.actor != 'R00-B0T' && '- Create PR' || '- Update Changelog' }} run-name: Changeset Release ${{ github.actor != 'R00-B0T' && '- Create PR' || '- Update Changelog' }}
on: on:
workflow_dispatch:
pull_request: pull_request:
types: [closed, opened, labeled] types: [closed, opened, labeled]
env: env:
REPO_PATH: ${{ github.repository }} REPO_PATH: ${{ github.repository }}
GIT_REF: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || 'main' }}
jobs: jobs:
# Job 1: Create version bump PR when changesets are merged to main # Job 1: Create version bump PR when changesets are merged to main
changeset-pr-version-bump: changeset-pr-version-bump:
if: > if: >
github.event_name == 'pull_request' && ( github.event_name == 'pull_request' &&
github.event.pull_request.merged == true && github.event.pull_request.merged == true &&
github.event.pull_request.base.ref == 'main' && github.event.pull_request.base.ref == 'main' &&
github.actor != 'R00-B0T' github.actor != 'R00-B0T' ) ||
github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: write contents: write
@@ -25,7 +28,7 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ env.GIT_REF }}
- name: Setup Node.js - name: Setup Node.js
uses: actions/setup-node@v4 uses: actions/setup-node@v4

View File

@@ -1,5 +1,22 @@
# Roo Cline Changelog # Roo Cline Changelog
## [3.1.4 - 3.1.5]
- Bug fixes to the auto approve menu
## [3.1.3]
- Add auto-approve chat bar (thanks Cline!)
- Fix bug with VS Code Language Models integration
## [3.1.2]
- Experimental support for VS Code Language Models including Copilot (thanks @RaySinner / @julesmons!)
- Fix bug related to configuration profile switching (thanks @samhvw8!)
- Improvements to fuzzy search in mentions, history, and model lists (thanks @samhvw8!)
- PKCE support for Glama (thanks @punkpeye!)
- Use 'developer' message for o1 system prompt
## [3.1.1] ## [3.1.1]
- Visual fixes to chat input and settings for the light+ themes - Visual fixes to chat input and settings for the light+ themes

View File

@@ -2,6 +2,8 @@
A fork of Cline, an autonomous coding agent, with some additional experimental features. Its been mainly writing itself recently, with a light touch of human guidance here and there. A fork of Cline, an autonomous coding agent, with some additional experimental features. Its been mainly writing itself recently, with a light touch of human guidance here and there.
You can track what's new at our [CHANGELOG](CHANGELOG.md), with some highlights below.
## New in 3.1: Chat Mode Prompt Customization & Prompt Enhancements ## New in 3.1: Chat Mode Prompt Customization & Prompt Enhancements
Hot off the heels of **v3.0** introducing Code, Architect, and Ask chat modes, one of the most requested features has arrived: **customizable prompts for each mode**! 🎉 Hot off the heels of **v3.0** introducing Code, Architect, and Ask chat modes, one of the most requested features has arrived: **customizable prompts for each mode**! 🎉
@@ -21,7 +23,7 @@ As always, wed love to hear your thoughts and ideas! What features do you wan
You can now choose between different prompts for Roo Cline to better suit your workflow. Heres whats available: You can now choose between different prompts for Roo Cline to better suit your workflow. Heres whats available:
- **Code:** (existing behavior): The default mode where Cline helps you write code and execute tasks. - **Code:** (existing behavior) The default mode where Cline helps you write code and execute tasks.
- **Architect:** "You are Cline, a software architecture expert..." Ideal for thinking through high-level technical design and system architecture. Cant write code or run commands. - **Architect:** "You are Cline, a software architecture expert..." Ideal for thinking through high-level technical design and system architecture. Cant write code or run commands.
@@ -37,37 +39,6 @@ Its super simple! Theres a dropdown in the bottom left of the chat input t
Right now, switching modes is a manual process. In the future, wed love to give Cline the ability to suggest mode switches based on context. For now, wed really appreciate your feedback on this feature. Right now, switching modes is a manual process. In the future, wed love to give Cline the ability to suggest mode switches based on context. For now, wed really appreciate your feedback on this feature.
Give it a try and let us know what you think in the reddit: https://www.reddit.com/r/roocline 🚀
## Experimental Features
- Different chat modes for coding, architecting code, and asking questions about the codebase
- Drag and drop images into chats
- Delete messages from chats
- @-mention Git commits to include their context in the chat
- Save different API configurations to quickly switch between providers and settings
- "Enhance prompt" button (OpenRouter models only for now)
- Sound effects for feedback
- Option to use browsers of different sizes and adjust screenshot quality
- Quick prompt copying from history
- Copy markdown from chat messages
- OpenRouter compression support
- Includes current time in the system prompt
- Uses a file system watcher to more reliably watch for file system changes
- Language selection for Cline's communication (English, Japanese, Spanish, French, German, and more)
- Support for DeepSeek V3
- Support for Amazon Nova and Meta 3, 3.1, and 3.2 models via AWS Bedrock
- Support for Glama
- Support for listing models from OpenAI-compatible providers
- Support for adding OpenAI-compatible models with or without streaming
- Per-tool MCP auto-approval
- Enable/disable individual MCP servers
- Enable/disable the MCP feature overall
- Automatically retry failed API requests with a configurable delay
- Configurable delay after auto-writes to allow diagnostics to detect potential problems
- Control the number of terminal output lines to pass to the model when executing commands
- Runs alongside the original Cline
## Disclaimer ## Disclaimer
**Please note** that Roo Veterinary, Inc does **not** make any representations or warranties regarding any code, models, or other tools provided or made available in connection with Roo-Cline, any associated third-party tools, or any resulting outputs. You assume **all risks** associated with the use of any such tools or outputs; such tools are provided on an **"AS IS"** and **"AS AVAILABLE"** basis. Such risks may include, without limitation, intellectual property infringement, cyber vulnerabilities or attacks, bias, inaccuracies, errors, defects, viruses, downtime, property loss or damage, and/or personal injury. You are solely responsible for your use of any such tools or outputs (including, without limitation, the legality, appropriateness, and results thereof). **Please note** that Roo Veterinary, Inc does **not** make any representations or warranties regarding any code, models, or other tools provided or made available in connection with Roo-Cline, any associated third-party tools, or any resulting outputs. You assume **all risks** associated with the use of any such tools or outputs; such tools are provided on an **"AS IS"** and **"AS AVAILABLE"** basis. Such risks may include, without limitation, intellectual property infringement, cyber vulnerabilities or attacks, bias, inaccuracies, errors, defects, viruses, downtime, property loss or damage, and/or personal injury. You are solely responsible for your use of any such tools or outputs (including, without limitation, the legality, appropriateness, and results thereof).
@@ -79,10 +50,9 @@ Here's an example of Roo-Cline autonomously creating a snake game with "Always a
https://github.com/user-attachments/assets/c2bb31dc-e9b2-4d73-885d-17f1471a4987 https://github.com/user-attachments/assets/c2bb31dc-e9b2-4d73-885d-17f1471a4987
## Contributing ## Contributing
To contribute to the project, start by exploring [open issues](https://github.com/RooVetGit/Roo-Cline/issues) or checking our [feature request board](https://github.com/cline/cline/discussions/categories/feature-requests?discussions_q=is%3Aopen+category%3A%22Feature+Requests%22+sort%3Atop). We'd also love to have you join the [Roo Cline Reddit](https://www.reddit.com/r/roocline/) and the [Cline Discord](https://discord.gg/cline) to share ideas and connect with other contributors. To contribute to the project, start by exploring [open issues](https://github.com/RooVetGit/Roo-Cline/issues) or checking our [feature request board](https://github.com/RooVetGit/Roo-Cline/discussions/categories/feature-requests). We'd also love to have you join the [Roo Cline Reddit](https://www.reddit.com/r/roocline/) to share ideas and connect with other contributors.
<details> ### Local Setup
<summary>Local Setup</summary>
1. Install dependencies: 1. Install dependencies:
```bash ```bash
@@ -107,10 +77,8 @@ To contribute to the project, start by exploring [open issues](https://github.co
5. Launch by pressing `F5` (or `Run`->`Start Debugging`) to open a new VSCode window with the extension loaded. (You may need to install the [esbuild problem matchers extension](https://marketplace.visualstudio.com/items?itemName=connor4312.esbuild-problem-matchers) if you run into issues building the project.) 5. Launch by pressing `F5` (or `Run`->`Start Debugging`) to open a new VSCode window with the extension loaded. (You may need to install the [esbuild problem matchers extension](https://marketplace.visualstudio.com/items?itemName=connor4312.esbuild-problem-matchers) if you run into issues building the project.)
</details> ### Publishing
<details>
<summary>Publishing</summary>
We use [changesets](https://github.com/changesets/changesets) for versioning and publishing this package. To make changes: We use [changesets](https://github.com/changesets/changesets) for versioning and publishing this package. To make changes:
1. Create a PR with your changes 1. Create a PR with your changes
@@ -125,11 +93,7 @@ Once your merge is successful:
- This PR will: - This PR will:
- Update the version based on your changeset - Update the version based on your changeset
- Update the `CHANGELOG.md` file - Update the `CHANGELOG.md` file
- Create a git tag - Once the PR is approved and merged, a new version will be published
- The PR will be automatically approved and merged
- A new version and git release will be published
</details>
--- ---

12
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{ {
"name": "roo-cline", "name": "roo-cline",
"version": "3.1.1", "version": "3.1.5",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "roo-cline", "name": "roo-cline",
"version": "3.1.1", "version": "3.1.5",
"dependencies": { "dependencies": {
"@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/bedrock-sdk": "^0.10.2",
"@anthropic-ai/sdk": "^0.26.0", "@anthropic-ai/sdk": "^0.26.0",
@@ -34,7 +34,7 @@
"isbinaryfile": "^5.0.2", "isbinaryfile": "^5.0.2",
"mammoth": "^1.8.0", "mammoth": "^1.8.0",
"monaco-vscode-textmate-theme-converter": "^0.1.7", "monaco-vscode-textmate-theme-converter": "^0.1.7",
"openai": "^4.73.1", "openai": "^4.78.1",
"os-name": "^6.0.0", "os-name": "^6.0.0",
"p-wait-for": "^5.0.2", "p-wait-for": "^5.0.2",
"pdf-parse": "^1.1.1", "pdf-parse": "^1.1.1",
@@ -12617,9 +12617,9 @@
} }
}, },
"node_modules/openai": { "node_modules/openai": {
"version": "4.76.0", "version": "4.78.1",
"resolved": "https://registry.npmjs.org/openai/-/openai-4.76.0.tgz", "resolved": "https://registry.npmjs.org/openai/-/openai-4.78.1.tgz",
"integrity": "sha512-QBGIetjX1C9xDp5XGa/3mPnfKI9BgAe2xHQX6PmO98wuW9qQaurBaumcYptQWc9LHZZq7cH/Y1Rjnsr6uUDdVw==", "integrity": "sha512-drt0lHZBd2lMyORckOXFPQTmnGLWSLt8VK0W9BhOKWpMFBEoHMoz5gxMPmVq5icp+sOrsbMnsmZTVHUlKvD1Ow==",
"dependencies": { "dependencies": {
"@types/node": "^18.11.18", "@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4", "@types/node-fetch": "^2.6.4",

View File

@@ -3,7 +3,7 @@
"displayName": "Roo Cline", "displayName": "Roo Cline",
"description": "A fork of Cline, an autonomous coding agent, with some added experimental configuration and automation features.", "description": "A fork of Cline, an autonomous coding agent, with some added experimental configuration and automation features.",
"publisher": "RooVeterinaryInc", "publisher": "RooVeterinaryInc",
"version": "3.1.1", "version": "3.1.5",
"icon": "assets/icons/rocket.png", "icon": "assets/icons/rocket.png",
"galleryBanner": { "galleryBanner": {
"color": "#617A91", "color": "#617A91",
@@ -42,7 +42,10 @@
"ai", "ai",
"llama" "llama"
], ],
"activationEvents": [], "activationEvents": [
"onLanguage",
"onStartupFinished"
],
"main": "./dist/extension.js", "main": "./dist/extension.js",
"contributes": { "contributes": {
"viewsContainers": { "viewsContainers": {
@@ -151,6 +154,20 @@
"git show" "git show"
], ],
"description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled" "description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled"
},
"roo-cline.vsCodeLmModelSelector": {
"type": "object",
"properties": {
"vendor": {
"type": "string",
"description": "The vendor of the language model (e.g. copilot)"
},
"family": {
"type": "string",
"description": "The family of the language model (e.g. gpt-4)"
}
},
"description": "Settings for VSCode Language Model API"
} }
} }
} }
@@ -232,7 +249,7 @@
"isbinaryfile": "^5.0.2", "isbinaryfile": "^5.0.2",
"mammoth": "^1.8.0", "mammoth": "^1.8.0",
"monaco-vscode-textmate-theme-converter": "^0.1.7", "monaco-vscode-textmate-theme-converter": "^0.1.7",
"openai": "^4.73.1", "openai": "^4.78.1",
"os-name": "^6.0.0", "os-name": "^6.0.0",
"p-wait-for": "^5.0.2", "p-wait-for": "^5.0.2",
"pdf-parse": "^1.1.1", "pdf-parse": "^1.1.1",

View File

@@ -11,6 +11,7 @@ import { LmStudioHandler } from "./providers/lmstudio"
import { GeminiHandler } from "./providers/gemini" import { GeminiHandler } from "./providers/gemini"
import { OpenAiNativeHandler } from "./providers/openai-native" import { OpenAiNativeHandler } from "./providers/openai-native"
import { DeepSeekHandler } from "./providers/deepseek" import { DeepSeekHandler } from "./providers/deepseek"
import { VsCodeLmHandler } from "./providers/vscode-lm"
import { ApiStream } from "./transform/stream" import { ApiStream } from "./transform/stream"
export interface SingleCompletionHandler { export interface SingleCompletionHandler {
@@ -47,6 +48,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
return new OpenAiNativeHandler(options) return new OpenAiNativeHandler(options)
case "deepseek": case "deepseek":
return new DeepSeekHandler(options) return new DeepSeekHandler(options)
case "vscode-lm":
return new VsCodeLmHandler(options)
default: default:
return new AnthropicHandler(options) return new AnthropicHandler(options)
} }

View File

@@ -60,6 +60,13 @@ jest.mock('openai', () => {
describe('OpenAiNativeHandler', () => { describe('OpenAiNativeHandler', () => {
let handler: OpenAiNativeHandler; let handler: OpenAiNativeHandler;
let mockOptions: ApiHandlerOptions; let mockOptions: ApiHandlerOptions;
const systemPrompt = 'You are a helpful assistant.';
const messages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: 'Hello!'
}
];
beforeEach(() => { beforeEach(() => {
mockOptions = { mockOptions = {
@@ -86,14 +93,6 @@ describe('OpenAiNativeHandler', () => {
}); });
describe('createMessage', () => { describe('createMessage', () => {
const systemPrompt = 'You are a helpful assistant.';
const messages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: 'Hello!'
}
];
it('should handle streaming responses', async () => { it('should handle streaming responses', async () => {
const stream = handler.createMessage(systemPrompt, messages); const stream = handler.createMessage(systemPrompt, messages);
const chunks: any[] = []; const chunks: any[] = [];
@@ -109,15 +108,126 @@ describe('OpenAiNativeHandler', () => {
it('should handle API errors', async () => { it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error')); mockCreate.mockRejectedValueOnce(new Error('API Error'));
const stream = handler.createMessage(systemPrompt, messages); const stream = handler.createMessage(systemPrompt, messages);
await expect(async () => { await expect(async () => {
for await (const chunk of stream) { for await (const chunk of stream) {
// Should not reach here // Should not reach here
} }
}).rejects.toThrow('API Error'); }).rejects.toThrow('API Error');
}); });
it('should handle missing content in response for o1 model', async () => {
// Use o1 model which supports developer role
handler = new OpenAiNativeHandler({
...mockOptions,
apiModelId: 'o1'
});
mockCreate.mockResolvedValueOnce({
choices: [{ message: { content: null } }],
usage: {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0
}
});
const generator = handler.createMessage(systemPrompt, messages);
const results = [];
for await (const result of generator) {
results.push(result);
}
expect(results).toEqual([
{ type: 'text', text: '' },
{ type: 'usage', inputTokens: 0, outputTokens: 0 }
]);
// Verify developer role is used for system prompt with o1 model
expect(mockCreate).toHaveBeenCalledWith({
model: 'o1',
messages: [
{ role: 'developer', content: systemPrompt },
{ role: 'user', content: 'Hello!' }
]
});
});
});
describe('streaming models', () => {
beforeEach(() => {
handler = new OpenAiNativeHandler({
...mockOptions,
apiModelId: 'gpt-4o',
});
});
it('should handle streaming response', async () => {
const mockStream = [
{ choices: [{ delta: { content: 'Hello' } }], usage: null },
{ choices: [{ delta: { content: ' there' } }], usage: null },
{ choices: [{ delta: { content: '!' } }], usage: { prompt_tokens: 10, completion_tokens: 5 } },
];
mockCreate.mockResolvedValueOnce(
(async function* () {
for (const chunk of mockStream) {
yield chunk;
}
})()
);
const generator = handler.createMessage(systemPrompt, messages);
const results = [];
for await (const result of generator) {
results.push(result);
}
expect(results).toEqual([
{ type: 'text', text: 'Hello' },
{ type: 'text', text: ' there' },
{ type: 'text', text: '!' },
{ type: 'usage', inputTokens: 10, outputTokens: 5 },
]);
expect(mockCreate).toHaveBeenCalledWith({
model: 'gpt-4o',
temperature: 0,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: 'Hello!' },
],
stream: true,
stream_options: { include_usage: true },
});
});
it('should handle empty delta content', async () => {
const mockStream = [
{ choices: [{ delta: {} }], usage: null },
{ choices: [{ delta: { content: null } }], usage: null },
{ choices: [{ delta: { content: 'Hello' } }], usage: { prompt_tokens: 10, completion_tokens: 5 } },
];
mockCreate.mockResolvedValueOnce(
(async function* () {
for (const chunk of mockStream) {
yield chunk;
}
})()
);
const generator = handler.createMessage(systemPrompt, messages);
const results = [];
for await (const result of generator) {
results.push(result);
}
expect(results).toEqual([
{ type: 'text', text: 'Hello' },
{ type: 'usage', inputTokens: 10, outputTokens: 5 },
]);
});
}); });
describe('completePrompt', () => { describe('completePrompt', () => {
@@ -206,4 +316,4 @@ describe('OpenAiNativeHandler', () => {
expect(modelInfo.info).toBeDefined(); expect(modelInfo.info).toBeDefined();
}); });
}); });
}); });

View File

@@ -0,0 +1,289 @@
import * as vscode from 'vscode';
import { VsCodeLmHandler } from '../vscode-lm';
import { ApiHandlerOptions } from '../../../shared/api';
import { Anthropic } from '@anthropic-ai/sdk';
// Mock vscode namespace
jest.mock('vscode', () => {
class MockLanguageModelTextPart {
type = 'text';
constructor(public value: string) {}
}
class MockLanguageModelToolCallPart {
type = 'tool_call';
constructor(
public callId: string,
public name: string,
public input: any
) {}
}
return {
workspace: {
onDidChangeConfiguration: jest.fn((callback) => ({
dispose: jest.fn()
}))
},
CancellationTokenSource: jest.fn(() => ({
token: {
isCancellationRequested: false,
onCancellationRequested: jest.fn()
},
cancel: jest.fn(),
dispose: jest.fn()
})),
CancellationError: class CancellationError extends Error {
constructor() {
super('Operation cancelled');
this.name = 'CancellationError';
}
},
LanguageModelChatMessage: {
Assistant: jest.fn((content) => ({
role: 'assistant',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
})),
User: jest.fn((content) => ({
role: 'user',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
}))
},
LanguageModelTextPart: MockLanguageModelTextPart,
LanguageModelToolCallPart: MockLanguageModelToolCallPart,
lm: {
selectChatModels: jest.fn()
}
};
});
const mockLanguageModelChat = {
id: 'test-model',
name: 'Test Model',
vendor: 'test-vendor',
family: 'test-family',
version: '1.0',
maxInputTokens: 4096,
sendRequest: jest.fn(),
countTokens: jest.fn()
};
describe('VsCodeLmHandler', () => {
let handler: VsCodeLmHandler;
const defaultOptions: ApiHandlerOptions = {
vsCodeLmModelSelector: {
vendor: 'test-vendor',
family: 'test-family'
}
};
beforeEach(() => {
jest.clearAllMocks();
handler = new VsCodeLmHandler(defaultOptions);
});
afterEach(() => {
handler.dispose();
});
describe('constructor', () => {
it('should initialize with provided options', () => {
expect(handler).toBeDefined();
expect(vscode.workspace.onDidChangeConfiguration).toHaveBeenCalled();
});
it('should handle configuration changes', () => {
const callback = (vscode.workspace.onDidChangeConfiguration as jest.Mock).mock.calls[0][0];
callback({ affectsConfiguration: () => true });
// Should reset client when config changes
expect(handler['client']).toBeNull();
});
});
describe('createClient', () => {
it('should create client with selector', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
const client = await handler['createClient']({
vendor: 'test-vendor',
family: 'test-family'
});
expect(client).toBeDefined();
expect(client.id).toBe('test-model');
expect(vscode.lm.selectChatModels).toHaveBeenCalledWith({
vendor: 'test-vendor',
family: 'test-family'
});
});
it('should return default client when no models available', async () => {
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([]);
const client = await handler['createClient']({});
expect(client).toBeDefined();
expect(client.id).toBe('default-lm');
expect(client.vendor).toBe('vscode');
});
});
describe('createMessage', () => {
beforeEach(() => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
mockLanguageModelChat.countTokens.mockResolvedValue(10);
});
it('should stream text responses', async () => {
const systemPrompt = 'You are a helpful assistant';
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user' as const,
content: 'Hello'
}];
const responseText = 'Hello! How can I help you?';
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelTextPart(responseText);
return;
})(),
text: (async function* () {
yield responseText;
return;
})()
});
const stream = handler.createMessage(systemPrompt, messages);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks).toHaveLength(2); // Text chunk + usage chunk
expect(chunks[0]).toEqual({
type: 'text',
text: responseText
});
expect(chunks[1]).toMatchObject({
type: 'usage',
inputTokens: expect.any(Number),
outputTokens: expect.any(Number)
});
});
it('should handle tool calls', async () => {
const systemPrompt = 'You are a helpful assistant';
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user' as const,
content: 'Calculate 2+2'
}];
const toolCallData = {
name: 'calculator',
arguments: { operation: 'add', numbers: [2, 2] },
callId: 'call-1'
};
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelToolCallPart(
toolCallData.callId,
toolCallData.name,
toolCallData.arguments
);
return;
})(),
text: (async function* () {
yield JSON.stringify({ type: 'tool_call', ...toolCallData });
return;
})()
});
const stream = handler.createMessage(systemPrompt, messages);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks).toHaveLength(2); // Tool call chunk + usage chunk
expect(chunks[0]).toEqual({
type: 'text',
text: JSON.stringify({ type: 'tool_call', ...toolCallData })
});
});
it('should handle errors', async () => {
const systemPrompt = 'You are a helpful assistant';
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user' as const,
content: 'Hello'
}];
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('API Error'));
await expect(async () => {
const stream = handler.createMessage(systemPrompt, messages);
for await (const _ of stream) {
// consume stream
}
}).rejects.toThrow('API Error');
});
});
describe('getModel', () => {
it('should return model info when client exists', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
// Initialize client
await handler['getClient']();
const model = handler.getModel();
expect(model.id).toBe('test-model');
expect(model.info).toBeDefined();
expect(model.info.contextWindow).toBe(4096);
});
it('should return fallback model info when no client exists', () => {
const model = handler.getModel();
expect(model.id).toBe('test-vendor/test-family');
expect(model.info).toBeDefined();
});
});
describe('completePrompt', () => {
it('should complete single prompt', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
const responseText = 'Completed text';
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelTextPart(responseText);
return;
})(),
text: (async function* () {
yield responseText;
return;
})()
});
const result = await handler.completePrompt('Test prompt');
expect(result).toBe(responseText);
expect(mockLanguageModelChat.sendRequest).toHaveBeenCalled();
});
it('should handle errors during completion', async () => {
const mockModel = { ...mockLanguageModelChat };
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('Completion failed'));
await expect(handler.completePrompt('Test prompt'))
.rejects
.toThrow('VSCode LM completion error: Completion failed');
});
});
});

View File

@@ -23,14 +23,16 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
} }
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
switch (this.getModel().id) { const modelId = this.getModel().id
switch (modelId) {
case "o1": case "o1":
case "o1-preview": case "o1-preview":
case "o1-mini": { case "o1-mini": {
// o1 doesnt support streaming, non-1 temp, or system prompt // o1-preview and o1-mini don't support streaming, non-1 temp, or system prompt
// o1 doesnt support streaming or non-1 temp but does support a developer prompt
const response = await this.client.chat.completions.create({ const response = await this.client.chat.completions.create({
model: this.getModel().id, model: modelId,
messages: [{ role: "user", content: systemPrompt }, ...convertToOpenAiMessages(messages)], messages: [{ role: modelId === "o1" ? "developer" : "user", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
}) })
yield { yield {
type: "text", type: "text",
@@ -93,7 +95,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
case "o1": case "o1":
case "o1-preview": case "o1-preview":
case "o1-mini": case "o1-mini":
// o1 doesn't support non-1 temp or system prompt // o1 doesn't support non-1 temp
requestOptions = { requestOptions = {
model: modelId, model: modelId,
messages: [{ role: "user", content: prompt }] messages: [{ role: "user", content: prompt }]

View File

@@ -0,0 +1,564 @@
import { Anthropic } from "@anthropic-ai/sdk";
import * as vscode from 'vscode';
import { ApiHandler, SingleCompletionHandler } from "../";
import { calculateApiCost } from "../../utils/cost";
import { ApiStream } from "../transform/stream";
import { convertToVsCodeLmMessages } from "../transform/vscode-lm-format";
import { SELECTOR_SEPARATOR, stringifyVsCodeLmModelSelector } from "../../shared/vsCodeSelectorUtils";
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api";
/**
* Handles interaction with VS Code's Language Model API for chat-based operations.
* This handler implements the ApiHandler interface to provide VS Code LM specific functionality.
*
* @implements {ApiHandler}
*
* @remarks
* The handler manages a VS Code language model chat client and provides methods to:
* - Create and manage chat client instances
* - Stream messages using VS Code's Language Model API
* - Retrieve model information
*
* @example
* ```typescript
* const options = {
* vsCodeLmModelSelector: { vendor: "copilot", family: "gpt-4" }
* };
* const handler = new VsCodeLmHandler(options);
*
* // Stream a conversation
* const systemPrompt = "You are a helpful assistant";
* const messages = [{ role: "user", content: "Hello!" }];
* for await (const chunk of handler.createMessage(systemPrompt, messages)) {
* console.log(chunk);
* }
* ```
*/
export class VsCodeLmHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions;
private client: vscode.LanguageModelChat | null;
private disposable: vscode.Disposable | null;
private currentRequestCancellation: vscode.CancellationTokenSource | null;
constructor(options: ApiHandlerOptions) {
this.options = options;
this.client = null;
this.disposable = null;
this.currentRequestCancellation = null;
try {
// Listen for model changes and reset client
this.disposable = vscode.workspace.onDidChangeConfiguration(event => {
if (event.affectsConfiguration('lm')) {
try {
this.client = null;
this.ensureCleanState();
}
catch (error) {
console.error('Error during configuration change cleanup:', error);
}
}
});
}
catch (error) {
// Ensure cleanup if constructor fails
this.dispose();
throw new Error(
`Cline <Language Model API>: Failed to initialize handler: ${error instanceof Error ? error.message : 'Unknown error'}`
);
}
}
/**
* Creates a language model chat client based on the provided selector.
*
* @param selector - Selector criteria to filter language model chat instances
* @returns Promise resolving to the first matching language model chat instance
* @throws Error when no matching models are found with the given selector
*
* @example
* const selector = { vendor: "copilot", family: "gpt-4o" };
* const chatClient = await createClient(selector);
*/
async createClient(selector: vscode.LanguageModelChatSelector): Promise<vscode.LanguageModelChat> {
try {
const models = await vscode.lm.selectChatModels(selector);
// Use first available model or create a minimal model object
if (models && Array.isArray(models) && models.length > 0) {
return models[0];
}
// Create a minimal model if no models are available
return {
id: 'default-lm',
name: 'Default Language Model',
vendor: 'vscode',
family: 'lm',
version: '1.0',
maxInputTokens: 8192,
sendRequest: async (messages, options, token) => {
// Provide a minimal implementation
return {
stream: (async function* () {
yield new vscode.LanguageModelTextPart(
"Language model functionality is limited. Please check VS Code configuration."
);
})(),
text: (async function* () {
yield "Language model functionality is limited. Please check VS Code configuration.";
})()
};
},
countTokens: async () => 0
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
throw new Error(`Cline <Language Model API>: Failed to select model: ${errorMessage}`);
}
}
/**
* Creates and streams a message using the VS Code Language Model API.
*
* @param systemPrompt - The system prompt to initialize the conversation context
* @param messages - An array of message parameters following the Anthropic message format
*
* @yields {ApiStream} An async generator that yields either text chunks or tool calls from the model response
*
* @throws {Error} When vsCodeLmModelSelector option is not provided
* @throws {Error} When the response stream encounters an error
*
* @remarks
* This method handles the initialization of the VS Code LM client if not already created,
* converts the messages to VS Code LM format, and streams the response chunks.
* Tool calls handling is currently a work in progress.
*/
dispose(): void {
if (this.disposable) {
this.disposable.dispose();
}
if (this.currentRequestCancellation) {
this.currentRequestCancellation.cancel();
this.currentRequestCancellation.dispose();
}
}
private async countTokens(text: string | vscode.LanguageModelChatMessage): Promise<number> {
// Check for required dependencies
if (!this.client) {
console.warn('Cline <Language Model API>: No client available for token counting');
return 0;
}
if (!this.currentRequestCancellation) {
console.warn('Cline <Language Model API>: No cancellation token available for token counting');
return 0;
}
// Validate input
if (!text) {
console.debug('Cline <Language Model API>: Empty text provided for token counting');
return 0;
}
try {
// Handle different input types
let tokenCount: number;
if (typeof text === 'string') {
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
} else if (text instanceof vscode.LanguageModelChatMessage) {
// For chat messages, ensure we have content
if (!text.content || (Array.isArray(text.content) && text.content.length === 0)) {
console.debug('Cline <Language Model API>: Empty chat message content');
return 0;
}
tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
} else {
console.warn('Cline <Language Model API>: Invalid input type for token counting');
return 0;
}
// Validate the result
if (typeof tokenCount !== 'number') {
console.warn('Cline <Language Model API>: Non-numeric token count received:', tokenCount);
return 0;
}
if (tokenCount < 0) {
console.warn('Cline <Language Model API>: Negative token count received:', tokenCount);
return 0;
}
return tokenCount;
}
catch (error) {
// Handle specific error types
if (error instanceof vscode.CancellationError) {
console.debug('Cline <Language Model API>: Token counting cancelled by user');
return 0;
}
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
console.warn('Cline <Language Model API>: Token counting failed:', errorMessage);
// Log additional error details if available
if (error instanceof Error && error.stack) {
console.debug('Token counting error stack:', error.stack);
}
return 0; // Fallback to prevent stream interruption
}
}
private async calculateTotalInputTokens(systemPrompt: string, vsCodeLmMessages: vscode.LanguageModelChatMessage[]): Promise<number> {
const systemTokens: number = await this.countTokens(systemPrompt);
const messageTokens: number[] = await Promise.all(
vsCodeLmMessages.map(msg => this.countTokens(msg))
);
return systemTokens + messageTokens.reduce(
(sum: number, tokens: number): number => sum + tokens, 0
);
}
private ensureCleanState(): void {
if (this.currentRequestCancellation) {
this.currentRequestCancellation.cancel();
this.currentRequestCancellation.dispose();
this.currentRequestCancellation = null;
}
}
private async getClient(): Promise<vscode.LanguageModelChat> {
if (!this.client) {
console.debug('Cline <Language Model API>: Getting client with options:', {
vsCodeLmModelSelector: this.options.vsCodeLmModelSelector,
hasOptions: !!this.options,
selectorKeys: this.options.vsCodeLmModelSelector ? Object.keys(this.options.vsCodeLmModelSelector) : []
});
try {
// Use default empty selector if none provided to get all available models
const selector = this.options?.vsCodeLmModelSelector || {};
console.debug('Cline <Language Model API>: Creating client with selector:', selector);
this.client = await this.createClient(selector);
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error';
console.error('Cline <Language Model API>: Client creation failed:', message);
throw new Error(`Cline <Language Model API>: Failed to create client: ${message}`);
}
}
return this.client;
}
private cleanTerminalOutput(text: string): string {
if (!text) {
return '';
}
return text
// Нормализуем переносы строк
.replace(/\r\n/g, '\n')
.replace(/\r/g, '\n')
// Удаляем ANSI escape sequences
.replace(/\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g, '') // Полный набор ANSI sequences
.replace(/\x9B[0-?]*[ -/]*[@-~]/g, '') // CSI sequences
// Удаляем последовательности установки заголовка терминала и прочие OSC sequences
.replace(/\x1B\][0-9;]*(?:\x07|\x1B\\)/g, '')
// Удаляем управляющие символы
.replace(/[\x00-\x09\x0B-\x0C\x0E-\x1F\x7F]/g, '')
// Удаляем escape-последовательности VS Code
.replace(/\x1B[PD].*?\x1B\\/g, '') // DCS sequences
.replace(/\x1B_.*?\x1B\\/g, '') // APC sequences
.replace(/\x1B\^.*?\x1B\\/g, '') // PM sequences
.replace(/\x1B\[[\d;]*[HfABCDEFGJKST]/g, '') // Cursor movement and clear screen
// Удаляем пути Windows и служебную информацию
.replace(/^(?:PS )?[A-Z]:\\[^\n]*$/mg, '')
.replace(/^;?Cwd=.*$/mg, '')
// Очищаем экранированные последовательности
.replace(/\\x[0-9a-fA-F]{2}/g, '')
.replace(/\\u[0-9a-fA-F]{4}/g, '')
// Финальная очистка
.replace(/\n{3,}/g, '\n\n') // Убираем множественные пустые строки
.trim();
}
private cleanMessageContent(content: any): any {
if (!content) {
return content;
}
if (typeof content === 'string') {
return this.cleanTerminalOutput(content);
}
if (Array.isArray(content)) {
return content.map(item => this.cleanMessageContent(item));
}
if (typeof content === 'object') {
const cleaned: any = {};
for (const [key, value] of Object.entries(content)) {
cleaned[key] = this.cleanMessageContent(value);
}
return cleaned;
}
return content;
}
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
// Ensure clean state before starting a new request
this.ensureCleanState();
const client: vscode.LanguageModelChat = await this.getClient();
// Clean system prompt and messages
const cleanedSystemPrompt = this.cleanTerminalOutput(systemPrompt);
const cleanedMessages = messages.map(msg => ({
...msg,
content: this.cleanMessageContent(msg.content)
}));
// Convert Anthropic messages to VS Code LM messages
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [
vscode.LanguageModelChatMessage.Assistant(cleanedSystemPrompt),
...convertToVsCodeLmMessages(cleanedMessages),
];
// Initialize cancellation token for the request
this.currentRequestCancellation = new vscode.CancellationTokenSource();
// Calculate input tokens before starting the stream
const totalInputTokens: number = await this.calculateTotalInputTokens(systemPrompt, vsCodeLmMessages);
// Accumulate the text and count at the end of the stream to reduce token counting overhead.
let accumulatedText: string = '';
try {
// Create the response stream with minimal required options
const requestOptions: vscode.LanguageModelChatRequestOptions = {
justification: `Cline would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`
};
// Note: Tool support is currently provided by the VSCode Language Model API directly
// Extensions can register tools using vscode.lm.registerTool()
const response: vscode.LanguageModelChatResponse = await client.sendRequest(
vsCodeLmMessages,
requestOptions,
this.currentRequestCancellation.token
);
// Consume the stream and handle both text and tool call chunks
for await (const chunk of response.stream) {
if (chunk instanceof vscode.LanguageModelTextPart) {
// Validate text part value
if (typeof chunk.value !== 'string') {
console.warn('Cline <Language Model API>: Invalid text part value received:', chunk.value);
continue;
}
accumulatedText += chunk.value;
yield {
type: "text",
text: chunk.value,
};
} else if (chunk instanceof vscode.LanguageModelToolCallPart) {
try {
// Validate tool call parameters
if (!chunk.name || typeof chunk.name !== 'string') {
console.warn('Cline <Language Model API>: Invalid tool name received:', chunk.name);
continue;
}
if (!chunk.callId || typeof chunk.callId !== 'string') {
console.warn('Cline <Language Model API>: Invalid tool callId received:', chunk.callId);
continue;
}
// Ensure input is a valid object
if (!chunk.input || typeof chunk.input !== 'object') {
console.warn('Cline <Language Model API>: Invalid tool input received:', chunk.input);
continue;
}
// Convert tool calls to text format with proper error handling
const toolCall = {
type: "tool_call",
name: chunk.name,
arguments: chunk.input,
callId: chunk.callId
};
const toolCallText = JSON.stringify(toolCall);
accumulatedText += toolCallText;
// Log tool call for debugging
console.debug('Cline <Language Model API>: Processing tool call:', {
name: chunk.name,
callId: chunk.callId,
inputSize: JSON.stringify(chunk.input).length
});
yield {
type: "text",
text: toolCallText,
};
} catch (error) {
console.error('Cline <Language Model API>: Failed to process tool call:', error);
// Continue processing other chunks even if one fails
continue;
}
} else {
console.warn('Cline <Language Model API>: Unknown chunk type received:', chunk);
}
}
// Count tokens in the accumulated text after stream completion
const totalOutputTokens: number = await this.countTokens(accumulatedText);
// Report final usage after stream completion
yield {
type: "usage",
inputTokens: totalInputTokens,
outputTokens: totalOutputTokens,
totalCost: calculateApiCost(
this.getModel().info,
totalInputTokens,
totalOutputTokens
)
};
}
catch (error: unknown) {
this.ensureCleanState();
if (error instanceof vscode.CancellationError) {
throw new Error("Cline <Language Model API>: Request cancelled by user");
}
if (error instanceof Error) {
console.error('Cline <Language Model API>: Stream error details:', {
message: error.message,
stack: error.stack,
name: error.name
});
// Return original error if it's already an Error instance
throw error;
} else if (typeof error === 'object' && error !== null) {
// Handle error-like objects
const errorDetails = JSON.stringify(error, null, 2);
console.error('Cline <Language Model API>: Stream error object:', errorDetails);
throw new Error(`Cline <Language Model API>: Response stream error: ${errorDetails}`);
} else {
// Fallback for unknown error types
const errorMessage = String(error);
console.error('Cline <Language Model API>: Unknown stream error:', errorMessage);
throw new Error(`Cline <Language Model API>: Response stream error: ${errorMessage}`);
}
}
}
// Return model information based on the current client state
getModel(): { id: string; info: ModelInfo; } {
if (this.client) {
// Validate client properties
const requiredProps = {
id: this.client.id,
vendor: this.client.vendor,
family: this.client.family,
version: this.client.version,
maxInputTokens: this.client.maxInputTokens
};
// Log any missing properties for debugging
for (const [prop, value] of Object.entries(requiredProps)) {
if (!value && value !== 0) {
console.warn(`Cline <Language Model API>: Client missing ${prop} property`);
}
}
// Construct model ID using available information
const modelParts = [
this.client.vendor,
this.client.family,
this.client.version
].filter(Boolean);
const modelId = this.client.id || modelParts.join(SELECTOR_SEPARATOR);
// Build model info with conservative defaults for missing values
const modelInfo: ModelInfo = {
maxTokens: -1, // Unlimited tokens by default
contextWindow: typeof this.client.maxInputTokens === 'number'
? Math.max(0, this.client.maxInputTokens)
: openAiModelInfoSaneDefaults.contextWindow,
supportsImages: false, // VSCode Language Model API currently doesn't support image inputs
supportsPromptCache: true,
inputPrice: 0,
outputPrice: 0,
description: `VSCode Language Model: ${modelId}`
};
return { id: modelId, info: modelInfo };
}
// Fallback when no client is available
const fallbackId = this.options.vsCodeLmModelSelector
? stringifyVsCodeLmModelSelector(this.options.vsCodeLmModelSelector)
: "vscode-lm";
console.debug('Cline <Language Model API>: No client available, using fallback model info');
return {
id: fallbackId,
info: {
...openAiModelInfoSaneDefaults,
description: `VSCode Language Model (Fallback): ${fallbackId}`
}
};
}
async completePrompt(prompt: string): Promise<string> {
try {
const client = await this.getClient();
const response = await client.sendRequest([vscode.LanguageModelChatMessage.User(prompt)], {}, new vscode.CancellationTokenSource().token);
let result = "";
for await (const chunk of response.stream) {
if (chunk instanceof vscode.LanguageModelTextPart) {
result += chunk.value;
}
}
return result;
} catch (error) {
if (error instanceof Error) {
throw new Error(`VSCode LM completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -0,0 +1,246 @@
import { Anthropic } from "@anthropic-ai/sdk";
import * as vscode from 'vscode';
import { convertToVsCodeLmMessages, convertToAnthropicRole, convertToAnthropicMessage } from '../vscode-lm-format';
// Mock crypto
const mockCrypto = {
randomUUID: () => 'test-uuid'
};
global.crypto = mockCrypto as any;
// Define types for our mocked classes
interface MockLanguageModelTextPart {
type: 'text';
value: string;
}
interface MockLanguageModelToolCallPart {
type: 'tool_call';
callId: string;
name: string;
input: any;
}
interface MockLanguageModelToolResultPart {
type: 'tool_result';
toolUseId: string;
parts: MockLanguageModelTextPart[];
}
type MockMessageContent = MockLanguageModelTextPart | MockLanguageModelToolCallPart | MockLanguageModelToolResultPart;
interface MockLanguageModelChatMessage {
role: string;
name?: string;
content: MockMessageContent[];
}
// Mock vscode namespace
jest.mock('vscode', () => {
const LanguageModelChatMessageRole = {
Assistant: 'assistant',
User: 'user'
};
class MockLanguageModelTextPart {
type = 'text';
constructor(public value: string) {}
}
class MockLanguageModelToolCallPart {
type = 'tool_call';
constructor(
public callId: string,
public name: string,
public input: any
) {}
}
class MockLanguageModelToolResultPart {
type = 'tool_result';
constructor(
public toolUseId: string,
public parts: MockLanguageModelTextPart[]
) {}
}
return {
LanguageModelChatMessage: {
Assistant: jest.fn((content) => ({
role: LanguageModelChatMessageRole.Assistant,
name: 'assistant',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
})),
User: jest.fn((content) => ({
role: LanguageModelChatMessageRole.User,
name: 'user',
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
}))
},
LanguageModelChatMessageRole,
LanguageModelTextPart: MockLanguageModelTextPart,
LanguageModelToolCallPart: MockLanguageModelToolCallPart,
LanguageModelToolResultPart: MockLanguageModelToolResultPart
};
});
describe('vscode-lm-format', () => {
describe('convertToVsCodeLmMessages', () => {
it('should convert simple string messages', () => {
const messages: Anthropic.Messages.MessageParam[] = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' }
];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(2);
expect(result[0].role).toBe('user');
expect((result[0].content[0] as MockLanguageModelTextPart).value).toBe('Hello');
expect(result[1].role).toBe('assistant');
expect((result[1].content[0] as MockLanguageModelTextPart).value).toBe('Hi there');
});
it('should handle complex user messages with tool results', () => {
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user',
content: [
{ type: 'text', text: 'Here is the result:' },
{
type: 'tool_result',
tool_use_id: 'tool-1',
content: 'Tool output'
}
]
}];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(1);
expect(result[0].role).toBe('user');
expect(result[0].content).toHaveLength(2);
const [toolResult, textContent] = result[0].content as [MockLanguageModelToolResultPart, MockLanguageModelTextPart];
expect(toolResult.type).toBe('tool_result');
expect(textContent.type).toBe('text');
});
it('should handle complex assistant messages with tool calls', () => {
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'assistant',
content: [
{ type: 'text', text: 'Let me help you with that.' },
{
type: 'tool_use',
id: 'tool-1',
name: 'calculator',
input: { operation: 'add', numbers: [2, 2] }
}
]
}];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(1);
expect(result[0].role).toBe('assistant');
expect(result[0].content).toHaveLength(2);
const [toolCall, textContent] = result[0].content as [MockLanguageModelToolCallPart, MockLanguageModelTextPart];
expect(toolCall.type).toBe('tool_call');
expect(textContent.type).toBe('text');
});
it('should handle image blocks with appropriate placeholders', () => {
const messages: Anthropic.Messages.MessageParam[] = [{
role: 'user',
content: [
{ type: 'text', text: 'Look at this:' },
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/png',
data: 'base64data'
}
}
]
}];
const result = convertToVsCodeLmMessages(messages);
expect(result).toHaveLength(1);
const imagePlaceholder = result[0].content[1] as MockLanguageModelTextPart;
expect(imagePlaceholder.value).toContain('[Image (base64): image/png not supported by VSCode LM API]');
});
});
describe('convertToAnthropicRole', () => {
it('should convert assistant role correctly', () => {
const result = convertToAnthropicRole('assistant' as any);
expect(result).toBe('assistant');
});
it('should convert user role correctly', () => {
const result = convertToAnthropicRole('user' as any);
expect(result).toBe('user');
});
it('should return null for unknown roles', () => {
const result = convertToAnthropicRole('unknown' as any);
expect(result).toBeNull();
});
});
describe('convertToAnthropicMessage', () => {
it('should convert assistant message with text content', async () => {
const vsCodeMessage = {
role: 'assistant',
name: 'assistant',
content: [new vscode.LanguageModelTextPart('Hello')]
};
const result = await convertToAnthropicMessage(vsCodeMessage as any);
expect(result.role).toBe('assistant');
expect(result.content).toHaveLength(1);
expect(result.content[0]).toEqual({
type: 'text',
text: 'Hello'
});
expect(result.id).toBe('test-uuid');
});
it('should convert assistant message with tool calls', async () => {
const vsCodeMessage = {
role: 'assistant',
name: 'assistant',
content: [new vscode.LanguageModelToolCallPart(
'call-1',
'calculator',
{ operation: 'add', numbers: [2, 2] }
)]
};
const result = await convertToAnthropicMessage(vsCodeMessage as any);
expect(result.content).toHaveLength(1);
expect(result.content[0]).toEqual({
type: 'tool_use',
id: 'call-1',
name: 'calculator',
input: { operation: 'add', numbers: [2, 2] }
});
expect(result.id).toBe('test-uuid');
});
it('should throw error for non-assistant messages', async () => {
const vsCodeMessage = {
role: 'user',
name: 'user',
content: [new vscode.LanguageModelTextPart('Hello')]
};
await expect(convertToAnthropicMessage(vsCodeMessage as any))
.rejects
.toThrow('Cline <Language Model API>: Only assistant messages are supported.');
});
});
});

View File

@@ -0,0 +1,209 @@
import { Anthropic } from "@anthropic-ai/sdk";
import * as vscode from 'vscode';
/**
* Safely converts a value into a plain object.
*/
function asObjectSafe(value: any): object {
// Handle null/undefined
if (!value) {
return {};
}
try {
// Handle strings that might be JSON
if (typeof value === 'string') {
return JSON.parse(value);
}
// Handle pre-existing objects
if (typeof value === 'object') {
return Object.assign({}, value);
}
return {};
}
catch (error) {
console.warn('Cline <Language Model API>: Failed to parse object:', error);
return {};
}
}
export function convertToVsCodeLmMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): vscode.LanguageModelChatMessage[] {
const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
for (const anthropicMessage of anthropicMessages) {
// Handle simple string messages
if (typeof anthropicMessage.content === "string") {
vsCodeLmMessages.push(
anthropicMessage.role === "assistant"
? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
: vscode.LanguageModelChatMessage.User(anthropicMessage.content)
);
continue;
}
// Handle complex message structures
switch (anthropicMessage.role) {
case "user": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolResultBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_result") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolResultParts
...toolMessages.map((toolMessage) => {
// Process tool result content into TextParts
const toolContentParts: vscode.LanguageModelTextPart[] = (
typeof toolMessage.content === "string"
? [new vscode.LanguageModelTextPart(toolMessage.content)]
: (
toolMessage.content?.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
?? [new vscode.LanguageModelTextPart("")]
)
);
return new vscode.LanguageModelToolResultPart(
toolMessage.tool_use_id,
toolContentParts
);
}),
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart(
`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
);
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Add single user message with all content parts
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
break;
}
case "assistant": {
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
toolMessages: Anthropic.ToolUseBlockParam[];
}>(
(acc, part) => {
if (part.type === "tool_use") {
acc.toolMessages.push(part);
}
else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part);
}
return acc;
},
{ nonToolMessages: [], toolMessages: [] },
);
// Process tool messages first then non-tool messages
const contentParts = [
// Convert tool messages to ToolCallParts first
...toolMessages.map((toolMessage) =>
new vscode.LanguageModelToolCallPart(
toolMessage.id,
toolMessage.name,
asObjectSafe(toolMessage.input)
)
),
// Convert non-tool messages to TextParts after tool messages
...nonToolMessages.map((part) => {
if (part.type === "image") {
return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
}
return new vscode.LanguageModelTextPart(part.text);
})
];
// Add the assistant message to the list of messages
vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
break;
}
}
}
return vsCodeLmMessages;
}
export function convertToAnthropicRole(vsCodeLmMessageRole: vscode.LanguageModelChatMessageRole): string | null {
switch (vsCodeLmMessageRole) {
case vscode.LanguageModelChatMessageRole.Assistant:
return "assistant";
case vscode.LanguageModelChatMessageRole.User:
return "user";
default:
return null;
}
}
export async function convertToAnthropicMessage(vsCodeLmMessage: vscode.LanguageModelChatMessage): Promise<Anthropic.Messages.Message> {
const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
if (anthropicRole !== "assistant") {
throw new Error("Cline <Language Model API>: Only assistant messages are supported.");
}
return {
id: crypto.randomUUID(),
type: "message",
model: "vscode-lm",
role: anthropicRole,
content: (
vsCodeLmMessage.content
.map((part): Anthropic.ContentBlock | null => {
if (part instanceof vscode.LanguageModelTextPart) {
return {
type: "text",
text: part.value
};
}
if (part instanceof vscode.LanguageModelToolCallPart) {
return {
type: "tool_use",
id: part.callId || crypto.randomUUID(),
name: part.name,
input: asObjectSafe(part.input)
};
}
return null;
})
.filter(
(part): part is Anthropic.ContentBlock => part !== null
)
),
stop_reason: null,
stop_sequence: null,
usage: {
input_tokens: 0,
output_tokens: 0,
}
};
}

View File

@@ -93,11 +93,13 @@ type GlobalStateKey =
| "requestDelaySeconds" | "requestDelaySeconds"
| "currentApiConfigName" | "currentApiConfigName"
| "listApiConfigMeta" | "listApiConfigMeta"
| "vsCodeLmModelSelector"
| "mode" | "mode"
| "modeApiConfigs" | "modeApiConfigs"
| "customPrompts" | "customPrompts"
| "enhancementApiConfigId" | "enhancementApiConfigId"
| "experimentalDiffStrategy" | "experimentalDiffStrategy"
| "autoApprovalEnabled"
export const GlobalFileNames = { export const GlobalFileNames = {
apiConversationHistory: "api_conversation_history.json", apiConversationHistory: "api_conversation_history.json",
@@ -577,8 +579,12 @@ export class ClineProvider implements vscode.WebviewViewProvider {
const lmStudioModels = await this.getLmStudioModels(message.text) const lmStudioModels = await this.getLmStudioModels(message.text)
this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels }) this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels })
break break
case "requestVsCodeLmModels":
const vsCodeLmModels = await this.getVsCodeLmModels()
this.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
break
case "refreshGlamaModels": case "refreshGlamaModels":
await this.refreshGlamaModels() await this.refreshGlamaModels()
break break
case "refreshOpenRouterModels": case "refreshOpenRouterModels":
await this.refreshOpenRouterModels() await this.refreshOpenRouterModels()
@@ -876,6 +882,10 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.updateGlobalState("enhancementApiConfigId", message.text) await this.updateGlobalState("enhancementApiConfigId", message.text)
await this.postStateToWebview() await this.postStateToWebview()
break break
case "autoApprovalEnabled":
await this.updateGlobalState("autoApprovalEnabled", message.bool ?? false)
await this.postStateToWebview()
break
case "enhancePrompt": case "enhancePrompt":
if (message.text) { if (message.text) {
try { try {
@@ -962,10 +972,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.configManager.SaveConfig(message.text, message.apiConfiguration); await this.configManager.SaveConfig(message.text, message.apiConfiguration);
let listApiConfig = await this.configManager.ListConfig(); let listApiConfig = await this.configManager.ListConfig();
// Update listApiConfigMeta first to ensure UI has latest data
await this.updateGlobalState("listApiConfigMeta", listApiConfig);
await Promise.all([ await Promise.all([
this.updateGlobalState("listApiConfigMeta", listApiConfig),
this.updateApiConfiguration(message.apiConfiguration), this.updateApiConfiguration(message.apiConfiguration),
this.updateGlobalState("currentApiConfigName", message.text), this.updateGlobalState("currentApiConfigName", message.text),
]) ])
@@ -1007,12 +1015,9 @@ export class ClineProvider implements vscode.WebviewViewProvider {
try { try {
const apiConfig = await this.configManager.LoadConfig(message.text); const apiConfig = await this.configManager.LoadConfig(message.text);
const listApiConfig = await this.configManager.ListConfig(); const listApiConfig = await this.configManager.ListConfig();
const config = listApiConfig?.find(c => c.name === message.text);
// Update listApiConfigMeta first to ensure UI has latest data
await this.updateGlobalState("listApiConfigMeta", listApiConfig);
await Promise.all([ await Promise.all([
this.updateGlobalState("listApiConfigMeta", listApiConfig),
this.updateGlobalState("currentApiConfigName", message.text), this.updateGlobalState("currentApiConfigName", message.text),
this.updateApiConfiguration(apiConfig), this.updateApiConfiguration(apiConfig),
]) ])
@@ -1127,6 +1132,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openRouterModelId, openRouterModelId,
openRouterModelInfo, openRouterModelInfo,
openRouterUseMiddleOutTransform, openRouterUseMiddleOutTransform,
vsCodeLmModelSelector,
} = apiConfiguration } = apiConfiguration
await this.updateGlobalState("apiProvider", apiProvider) await this.updateGlobalState("apiProvider", apiProvider)
await this.updateGlobalState("apiModelId", apiModelId) await this.updateGlobalState("apiModelId", apiModelId)
@@ -1158,6 +1164,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.updateGlobalState("openRouterModelId", openRouterModelId) await this.updateGlobalState("openRouterModelId", openRouterModelId)
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo) await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform) await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
if (this.cline) { if (this.cline) {
this.cline.api = buildApiHandler(apiConfiguration) this.cline.api = buildApiHandler(apiConfiguration)
} }
@@ -1228,6 +1235,17 @@ export class ClineProvider implements vscode.WebviewViewProvider {
} }
} }
// VSCode LM API
private async getVsCodeLmModels() {
try {
const models = await vscode.lm.selectChatModels({});
return models || [];
} catch (error) {
console.error('Error fetching VS Code LM models:', error);
return [];
}
}
// OpenAi // OpenAi
async getOpenAiModels(baseUrl?: string, apiKey?: string) { async getOpenAiModels(baseUrl?: string, apiKey?: string) {
@@ -1286,6 +1304,33 @@ export class ClineProvider implements vscode.WebviewViewProvider {
return cacheDir return cacheDir
} }
async handleGlamaCallback(code: string) {
let apiKey: string
try {
const response = await axios.post("https://glama.ai/api/gateway/v1/auth/exchange-code", { code })
if (response.data && response.data.apiKey) {
apiKey = response.data.apiKey
} else {
throw new Error("Invalid response from Glama API")
}
} catch (error) {
console.error("Error exchanging code for API key:", error)
throw error
}
const glama: ApiProvider = "glama"
await this.updateGlobalState("apiProvider", glama)
await this.storeSecret("glamaApiKey", apiKey)
await this.postStateToWebview()
if (this.cline) {
this.cline.api = buildApiHandler({
apiProvider: glama,
glamaApiKey: apiKey,
})
}
// await this.postMessageToWebview({ type: "action", action: "settingsButtonClicked" }) // bad ux if user is on welcome
}
async readGlamaModels(): Promise<Record<string, ModelInfo> | undefined> { async readGlamaModels(): Promise<Record<string, ModelInfo> | undefined> {
const glamaModelsFilePath = path.join( const glamaModelsFilePath = path.join(
await this.ensureCacheDirectoryExists(), await this.ensureCacheDirectoryExists(),
@@ -1612,7 +1657,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
mode, mode,
customPrompts, customPrompts,
enhancementApiConfigId, enhancementApiConfigId,
experimentalDiffStrategy, experimentalDiffStrategy,
autoApprovalEnabled,
} = await this.getState() } = await this.getState()
const allowedCommands = vscode.workspace const allowedCommands = vscode.workspace
@@ -1652,7 +1698,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
mode: mode ?? codeMode, mode: mode ?? codeMode,
customPrompts: customPrompts ?? {}, customPrompts: customPrompts ?? {},
enhancementApiConfigId, enhancementApiConfigId,
experimentalDiffStrategy: experimentalDiffStrategy ?? false, experimentalDiffStrategy: experimentalDiffStrategy ?? false,
autoApprovalEnabled: autoApprovalEnabled ?? false,
} }
} }
@@ -1762,11 +1809,13 @@ export class ClineProvider implements vscode.WebviewViewProvider {
requestDelaySeconds, requestDelaySeconds,
currentApiConfigName, currentApiConfigName,
listApiConfigMeta, listApiConfigMeta,
vsCodeLmModelSelector,
mode, mode,
modeApiConfigs, modeApiConfigs,
customPrompts, customPrompts,
enhancementApiConfigId, enhancementApiConfigId,
experimentalDiffStrategy, experimentalDiffStrategy,
autoApprovalEnabled,
] = await Promise.all([ ] = await Promise.all([
this.getGlobalState("apiProvider") as Promise<ApiProvider | undefined>, this.getGlobalState("apiProvider") as Promise<ApiProvider | undefined>,
this.getGlobalState("apiModelId") as Promise<string | undefined>, this.getGlobalState("apiModelId") as Promise<string | undefined>,
@@ -1821,11 +1870,13 @@ export class ClineProvider implements vscode.WebviewViewProvider {
this.getGlobalState("requestDelaySeconds") as Promise<number | undefined>, this.getGlobalState("requestDelaySeconds") as Promise<number | undefined>,
this.getGlobalState("currentApiConfigName") as Promise<string | undefined>, this.getGlobalState("currentApiConfigName") as Promise<string | undefined>,
this.getGlobalState("listApiConfigMeta") as Promise<ApiConfigMeta[] | undefined>, this.getGlobalState("listApiConfigMeta") as Promise<ApiConfigMeta[] | undefined>,
this.getGlobalState("vsCodeLmModelSelector") as Promise<vscode.LanguageModelChatSelector | undefined>,
this.getGlobalState("mode") as Promise<Mode | undefined>, this.getGlobalState("mode") as Promise<Mode | undefined>,
this.getGlobalState("modeApiConfigs") as Promise<Record<Mode, string> | undefined>, this.getGlobalState("modeApiConfigs") as Promise<Record<Mode, string> | undefined>,
this.getGlobalState("customPrompts") as Promise<CustomPrompts | undefined>, this.getGlobalState("customPrompts") as Promise<CustomPrompts | undefined>,
this.getGlobalState("enhancementApiConfigId") as Promise<string | undefined>, this.getGlobalState("enhancementApiConfigId") as Promise<string | undefined>,
this.getGlobalState("experimentalDiffStrategy") as Promise<boolean | undefined>, this.getGlobalState("experimentalDiffStrategy") as Promise<boolean | undefined>,
this.getGlobalState("autoApprovalEnabled") as Promise<boolean | undefined>,
]) ])
let apiProvider: ApiProvider let apiProvider: ApiProvider
@@ -1874,6 +1925,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openRouterModelId, openRouterModelId,
openRouterModelInfo, openRouterModelInfo,
openRouterUseMiddleOutTransform, openRouterUseMiddleOutTransform,
vsCodeLmModelSelector,
}, },
lastShownAnnouncementId, lastShownAnnouncementId,
customInstructions, customInstructions,
@@ -1928,7 +1980,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
modeApiConfigs: modeApiConfigs ?? {} as Record<Mode, string>, modeApiConfigs: modeApiConfigs ?? {} as Record<Mode, string>,
customPrompts: customPrompts ?? {}, customPrompts: customPrompts ?? {},
enhancementApiConfigId, enhancementApiConfigId,
experimentalDiffStrategy: experimentalDiffStrategy ?? false, experimentalDiffStrategy: experimentalDiffStrategy ?? false,
autoApprovalEnabled: autoApprovalEnabled ?? false,
} }
} }

View File

@@ -7,7 +7,7 @@ The Cline extension exposes an API that can be used by other extensions. To use
3. Get access to the API with the following code: 3. Get access to the API with the following code:
```ts ```ts
const clineExtension = vscode.extensions.getExtension<ClineAPI>("saoudrizwan.claude-dev") const clineExtension = vscode.extensions.getExtension<ClineAPI>("rooveterinaryinc.roo-cline")
if (!clineExtension?.isActive) { if (!clineExtension?.isActive) {
throw new Error("Cline extension is not activated") throw new Error("Cline extension is not activated")
@@ -44,11 +44,11 @@ The Cline extension exposes an API that can be used by other extensions. To use
} }
``` ```
**Note:** To ensure that the `saoudrizwan.claude-dev` extension is activated before your extension, add it to the `extensionDependencies` in your `package.json`: **Note:** To ensure that the `rooveterinaryinc.roo-cline` extension is activated before your extension, add it to the `extensionDependencies` in your `package.json`:
```json ```json
"extensionDependencies": [ "extensionDependencies": [
"saoudrizwan.claude-dev" "rooveterinaryinc.roo-cline"
] ]
``` ```

View File

@@ -139,6 +139,14 @@ export function activate(context: vscode.ExtensionContext) {
return return
} }
switch (path) { switch (path) {
case "/glama": {
const code = query.get("code")
if (code) {
await visibleProvider.handleGlamaCallback(code)
}
break
}
case "/openrouter": { case "/openrouter": {
const code = query.get("code") const code = query.get("code")
if (code) { if (code) {

View File

@@ -141,5 +141,5 @@ export function mergeJson(
} }
function getExtensionUri(): vscode.Uri { function getExtensionUri(): vscode.Uri {
return vscode.extensions.getExtension("saoudrizwan.claude-dev")!.extensionUri return vscode.extensions.getExtension("rooveterinaryinc.roo-cline")!.extensionUri
} }

View File

@@ -25,8 +25,12 @@ export interface ExtensionMessage {
| "enhancedPrompt" | "enhancedPrompt"
| "commitSearchResults" | "commitSearchResults"
| "listApiConfig" | "listApiConfig"
| "vsCodeLmModels"
| "vsCodeLmApiAvailable"
| "requestVsCodeLmModels"
| "updatePrompt" | "updatePrompt"
| "systemPrompt" | "systemPrompt"
| "autoApprovalEnabled"
text?: string text?: string
action?: action?:
| "chatButtonClicked" | "chatButtonClicked"
@@ -40,6 +44,7 @@ export interface ExtensionMessage {
images?: string[] images?: string[]
ollamaModels?: string[] ollamaModels?: string[]
lmStudioModels?: string[] lmStudioModels?: string[]
vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[]
filePaths?: string[] filePaths?: string[]
partialMessage?: ClineMessage partialMessage?: ClineMessage
glamaModels?: Record<string, ModelInfo> glamaModels?: Record<string, ModelInfo>
@@ -89,7 +94,8 @@ export interface ExtensionState {
mode: Mode mode: Mode
modeApiConfigs?: Record<Mode, string> modeApiConfigs?: Record<Mode, string>
enhancementApiConfigId?: string enhancementApiConfigId?: string
experimentalDiffStrategy?: boolean experimentalDiffStrategy?: boolean
autoApprovalEnabled?: boolean
} }
export interface ClineMessage { export interface ClineMessage {

View File

@@ -61,17 +61,19 @@ export interface WebviewMessage {
| "terminalOutputLineLimit" | "terminalOutputLineLimit"
| "mcpEnabled" | "mcpEnabled"
| "searchCommits" | "searchCommits"
| "refreshGlamaModels"
| "alwaysApproveResubmit" | "alwaysApproveResubmit"
| "requestDelaySeconds" | "requestDelaySeconds"
| "setApiConfigPassword" | "setApiConfigPassword"
| "requestVsCodeLmModels"
| "mode" | "mode"
| "updatePrompt" | "updatePrompt"
| "updateEnhancedPrompt" | "updateEnhancedPrompt"
| "getSystemPrompt" | "getSystemPrompt"
| "systemPrompt" | "systemPrompt"
| "enhancementApiConfigId" | "enhancementApiConfigId"
| "experimentalDiffStrategy" | "experimentalDiffStrategy"
| "autoApprovalEnabled"
text?: string text?: string
disabled?: boolean disabled?: boolean
askResponse?: ClineAskResponse askResponse?: ClineAskResponse

View File

@@ -0,0 +1,56 @@
import { checkExistKey } from '../checkExistApiConfig';
import { ApiConfiguration } from '../api';
describe('checkExistKey', () => {
it('should return false for undefined config', () => {
expect(checkExistKey(undefined)).toBe(false);
});
it('should return false for empty config', () => {
const config: ApiConfiguration = {};
expect(checkExistKey(config)).toBe(false);
});
it('should return true when one key is defined', () => {
const config: ApiConfiguration = {
apiKey: 'test-key'
};
expect(checkExistKey(config)).toBe(true);
});
it('should return true when multiple keys are defined', () => {
const config: ApiConfiguration = {
apiKey: 'test-key',
glamaApiKey: 'glama-key',
openRouterApiKey: 'openrouter-key'
};
expect(checkExistKey(config)).toBe(true);
});
it('should return true when only non-key fields are undefined', () => {
const config: ApiConfiguration = {
apiKey: 'test-key',
apiProvider: undefined,
anthropicBaseUrl: undefined
};
expect(checkExistKey(config)).toBe(true);
});
it('should return false when all key fields are undefined', () => {
const config: ApiConfiguration = {
apiKey: undefined,
glamaApiKey: undefined,
openRouterApiKey: undefined,
awsRegion: undefined,
vertexProjectId: undefined,
openAiApiKey: undefined,
ollamaModelId: undefined,
lmStudioModelId: undefined,
geminiApiKey: undefined,
openAiNativeApiKey: undefined,
deepSeekApiKey: undefined,
vsCodeLmModelSelector: undefined
};
expect(checkExistKey(config)).toBe(false);
});
});

View File

@@ -0,0 +1,44 @@
import { stringifyVsCodeLmModelSelector, SELECTOR_SEPARATOR } from '../vsCodeSelectorUtils';
import { LanguageModelChatSelector } from 'vscode';
describe('vsCodeSelectorUtils', () => {
describe('stringifyVsCodeLmModelSelector', () => {
it('should join all defined selector properties with separator', () => {
const selector: LanguageModelChatSelector = {
vendor: 'test-vendor',
family: 'test-family',
version: 'v1',
id: 'test-id'
};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('test-vendor/test-family/v1/test-id');
});
it('should skip undefined properties', () => {
const selector: LanguageModelChatSelector = {
vendor: 'test-vendor',
family: 'test-family'
};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('test-vendor/test-family');
});
it('should handle empty selector', () => {
const selector: LanguageModelChatSelector = {};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('');
});
it('should handle selector with only one property', () => {
const selector: LanguageModelChatSelector = {
vendor: 'test-vendor'
};
const result = stringifyVsCodeLmModelSelector(selector);
expect(result).toBe('test-vendor');
});
});
});

View File

@@ -1,3 +1,5 @@
import * as vscode from 'vscode';
export type ApiProvider = export type ApiProvider =
| "anthropic" | "anthropic"
| "glama" | "glama"
@@ -10,11 +12,13 @@ export type ApiProvider =
| "gemini" | "gemini"
| "openai-native" | "openai-native"
| "deepseek" | "deepseek"
| "vscode-lm"
export interface ApiHandlerOptions { export interface ApiHandlerOptions {
apiModelId?: string apiModelId?: string
apiKey?: string // anthropic apiKey?: string // anthropic
anthropicBaseUrl?: string anthropicBaseUrl?: string
vsCodeLmModelSelector?: vscode.LanguageModelChatSelector
glamaModelId?: string glamaModelId?: string
glamaModelInfo?: ModelInfo glamaModelInfo?: ModelInfo
glamaApiKey?: string glamaApiKey?: string
@@ -58,7 +62,7 @@ export type ApiConfiguration = ApiHandlerOptions & {
export interface ModelInfo { export interface ModelInfo {
maxTokens?: number maxTokens?: number
contextWindow?: number contextWindow: number
supportsImages?: boolean supportsImages?: boolean
supportsComputerUse?: boolean supportsComputerUse?: boolean
supportsPromptCache: boolean // this value is hardcoded for now supportsPromptCache: boolean // this value is hardcoded for now

View File

@@ -13,7 +13,8 @@ export function checkExistKey(config: ApiConfiguration | undefined) {
config.lmStudioModelId, config.lmStudioModelId,
config.geminiApiKey, config.geminiApiKey,
config.openAiNativeApiKey, config.openAiNativeApiKey,
config.deepSeekApiKey config.deepSeekApiKey,
config.vsCodeLmModelSelector,
].some((key) => key !== undefined) ].some((key) => key !== undefined)
: false; : false;
} }

View File

@@ -0,0 +1,14 @@
import { LanguageModelChatSelector } from 'vscode';
export const SELECTOR_SEPARATOR = '/';
export function stringifyVsCodeLmModelSelector(selector: LanguageModelChatSelector): string {
return [
selector.vendor,
selector.family,
selector.version,
selector.id
]
.filter(Boolean)
.join(SELECTOR_SEPARATOR);
}

View File

@@ -15,7 +15,7 @@ module.exports.jest = function(config) {
// Configure transform ignore patterns for ES modules // Configure transform ignore patterns for ES modules
config.transformIgnorePatterns = [ config.transformIgnorePatterns = [
'/node_modules/(?!(rehype-highlight|react-remark|unist-util-visit|unist-util-find-after|vfile|unified|bail|is-plain-obj|trough|vfile-message|unist-util-stringify-position|mdast-util-from-markdown|mdast-util-to-string|micromark|decode-named-character-reference|character-entities|markdown-table|zwitch|longest-streak|escape-string-regexp|unist-util-is|hast-util-to-text|@vscode/webview-ui-toolkit|@microsoft/fast-react-wrapper|@microsoft/fast-element|@microsoft/fast-foundation|@microsoft/fast-web-utilities|exenv-es6)/)' '/node_modules/(?!(rehype-highlight|react-remark|unist-util-visit|unist-util-find-after|vfile|unified|bail|is-plain-obj|trough|vfile-message|unist-util-stringify-position|mdast-util-from-markdown|mdast-util-to-string|micromark|decode-named-character-reference|character-entities|markdown-table|zwitch|longest-streak|escape-string-regexp|unist-util-is|hast-util-to-text|@vscode/webview-ui-toolkit|@microsoft/fast-react-wrapper|@microsoft/fast-element|@microsoft/fast-foundation|@microsoft/fast-web-utilities|exenv-es6|vscrui)/)'
]; ];
return config; return config;

View File

@@ -31,6 +31,7 @@
"shell-quote": "^1.8.2", "shell-quote": "^1.8.2",
"styled-components": "^6.1.13", "styled-components": "^6.1.13",
"typescript": "^4.9.5", "typescript": "^4.9.5",
"vscrui": "^0.2.0",
"web-vitals": "^2.1.4" "web-vitals": "^2.1.4"
}, },
"devDependencies": { "devDependencies": {
@@ -15155,6 +15156,20 @@
"url": "https://opencollective.com/unified" "url": "https://opencollective.com/unified"
} }
}, },
"node_modules/vscrui": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/vscrui/-/vscrui-0.2.0.tgz",
"integrity": "sha512-fvxZM/uIYOMN3fUbE2In+R1VrNj8PKcfAdh+Us2bJaPGuG9ySkR6xkV2aJVqXxWDX77U3v/UQGc5e7URrB52Gw==",
"license": "MIT",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/estruyf"
},
"peerDependencies": {
"@types/react": "*",
"react": "^17 || ^18"
}
},
"node_modules/w3c-hr-time": { "node_modules/w3c-hr-time": {
"version": "1.0.2", "version": "1.0.2",
"license": "MIT", "license": "MIT",

View File

@@ -26,6 +26,7 @@
"shell-quote": "^1.8.2", "shell-quote": "^1.8.2",
"styled-components": "^6.1.13", "styled-components": "^6.1.13",
"typescript": "^4.9.5", "typescript": "^4.9.5",
"vscrui": "^0.2.0",
"web-vitals": "^2.1.4" "web-vitals": "^2.1.4"
}, },
"scripts": { "scripts": {

View File

@@ -0,0 +1,235 @@
import { VSCodeCheckbox } from "@vscode/webview-ui-toolkit/react"
import { useCallback, useState } from "react"
import { useExtensionState } from "../../context/ExtensionStateContext"
import { vscode } from "../../utils/vscode"
interface AutoApproveAction {
id: string
label: string
enabled: boolean
shortName: string
description: string
}
interface AutoApproveMenuProps {
style?: React.CSSProperties
}
const AutoApproveMenu = ({ style }: AutoApproveMenuProps) => {
const [isExpanded, setIsExpanded] = useState(false)
const {
alwaysAllowReadOnly,
setAlwaysAllowReadOnly,
alwaysAllowWrite,
setAlwaysAllowWrite,
alwaysAllowExecute,
setAlwaysAllowExecute,
alwaysAllowBrowser,
setAlwaysAllowBrowser,
alwaysAllowMcp,
setAlwaysAllowMcp,
alwaysApproveResubmit,
setAlwaysApproveResubmit,
autoApprovalEnabled,
setAutoApprovalEnabled,
} = useExtensionState()
const actions: AutoApproveAction[] = [
{
id: "readFiles",
label: "Read files and directories",
shortName: "Read",
enabled: alwaysAllowReadOnly ?? false,
description: "Allows access to read any file on your computer.",
},
{
id: "editFiles",
label: "Edit files",
shortName: "Edit",
enabled: alwaysAllowWrite ?? false,
description: "Allows modification of any files on your computer.",
},
{
id: "executeCommands",
label: "Execute approved commands",
shortName: "Commands",
enabled: alwaysAllowExecute ?? false,
description:
"Allows execution of approved terminal commands. You can configure this in the settings panel.",
},
{
id: "useBrowser",
label: "Use the browser",
shortName: "Browser",
enabled: alwaysAllowBrowser ?? false,
description: "Allows ability to launch and interact with any website in a headless browser.",
},
{
id: "useMcp",
label: "Use MCP servers",
shortName: "MCP",
enabled: alwaysAllowMcp ?? false,
description: "Allows use of configured MCP servers which may modify filesystem or interact with APIs.",
},
{
id: "retryRequests",
label: "Retry failed requests",
shortName: "Retries",
enabled: alwaysApproveResubmit ?? false,
description: "Automatically retry failed API requests when the provider returns an error response.",
},
]
const toggleExpanded = useCallback(() => {
setIsExpanded((prev) => !prev)
}, [])
const enabledActionsList = actions
.filter((action) => action.enabled)
.map((action) => action.shortName)
.join(", ")
// Individual checkbox handlers - each one only updates its own state
const handleReadOnlyChange = useCallback(() => {
const newValue = !(alwaysAllowReadOnly ?? false)
setAlwaysAllowReadOnly(newValue)
vscode.postMessage({ type: "alwaysAllowReadOnly", bool: newValue })
}, [alwaysAllowReadOnly, setAlwaysAllowReadOnly])
const handleWriteChange = useCallback(() => {
const newValue = !(alwaysAllowWrite ?? false)
setAlwaysAllowWrite(newValue)
vscode.postMessage({ type: "alwaysAllowWrite", bool: newValue })
}, [alwaysAllowWrite, setAlwaysAllowWrite])
const handleExecuteChange = useCallback(() => {
const newValue = !(alwaysAllowExecute ?? false)
setAlwaysAllowExecute(newValue)
vscode.postMessage({ type: "alwaysAllowExecute", bool: newValue })
}, [alwaysAllowExecute, setAlwaysAllowExecute])
const handleBrowserChange = useCallback(() => {
const newValue = !(alwaysAllowBrowser ?? false)
setAlwaysAllowBrowser(newValue)
vscode.postMessage({ type: "alwaysAllowBrowser", bool: newValue })
}, [alwaysAllowBrowser, setAlwaysAllowBrowser])
const handleMcpChange = useCallback(() => {
const newValue = !(alwaysAllowMcp ?? false)
setAlwaysAllowMcp(newValue)
vscode.postMessage({ type: "alwaysAllowMcp", bool: newValue })
}, [alwaysAllowMcp, setAlwaysAllowMcp])
const handleRetryChange = useCallback(() => {
const newValue = !(alwaysApproveResubmit ?? false)
setAlwaysApproveResubmit(newValue)
vscode.postMessage({ type: "alwaysApproveResubmit", bool: newValue })
}, [alwaysApproveResubmit, setAlwaysApproveResubmit])
// Map action IDs to their specific handlers
const actionHandlers: Record<AutoApproveAction['id'], () => void> = {
readFiles: handleReadOnlyChange,
editFiles: handleWriteChange,
executeCommands: handleExecuteChange,
useBrowser: handleBrowserChange,
useMcp: handleMcpChange,
retryRequests: handleRetryChange,
}
return (
<div
style={{
padding: "0 15px",
userSelect: "none",
borderTop: isExpanded
? `0.5px solid color-mix(in srgb, var(--vscode-titleBar-inactiveForeground) 20%, transparent)`
: "none",
overflowY: "auto",
...style,
}}>
<div
style={{
display: "flex",
alignItems: "center",
gap: "8px",
padding: isExpanded ? "8px 0" : "8px 0 0 0",
cursor: "pointer",
}}
onClick={toggleExpanded}>
<div onClick={(e) => e.stopPropagation()}>
<VSCodeCheckbox
checked={autoApprovalEnabled ?? false}
onChange={() => {
const newValue = !(autoApprovalEnabled ?? false)
setAutoApprovalEnabled(newValue)
vscode.postMessage({ type: "autoApprovalEnabled", bool: newValue })
}}
/>
</div>
<div style={{
display: 'flex',
alignItems: 'center',
gap: '4px',
flex: 1,
minWidth: 0
}}>
<span style={{
color: "var(--vscode-foreground)",
flexShrink: 0
}}>Auto-approve:</span>
<span style={{
color: "var(--vscode-descriptionForeground)",
overflow: "hidden",
textOverflow: "ellipsis",
whiteSpace: "nowrap",
flex: 1,
minWidth: 0
}}>
{enabledActionsList || "None"}
</span>
<span
className={`codicon codicon-chevron-${isExpanded ? "down" : "right"}`}
style={{
flexShrink: 0,
marginLeft: isExpanded ? "2px" : "-2px",
}}
/>
</div>
</div>
{isExpanded && (
<div style={{ padding: "0" }}>
<div
style={{
marginBottom: "10px",
color: "var(--vscode-descriptionForeground)",
fontSize: "12px",
}}>
Auto-approve allows Cline to perform actions without asking for permission. Only enable for
actions you fully trust.
</div>
{actions.map((action) => (
<div key={action.id} style={{ margin: "6px 0" }}>
<div onClick={(e) => e.stopPropagation()}>
<VSCodeCheckbox
checked={action.enabled}
onChange={actionHandlers[action.id]}>
{action.label}
</VSCodeCheckbox>
</div>
<div
style={{
marginLeft: "28px",
color: "var(--vscode-descriptionForeground)",
fontSize: "12px",
}}>
{action.description}
</div>
</div>
))}
</div>
)}
</div>
)
}
export default AutoApproveMenu

View File

@@ -527,7 +527,6 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
flexDirection: "column", flexDirection: "column",
gap: "8px", gap: "8px",
backgroundColor: "var(--vscode-input-background)", backgroundColor: "var(--vscode-input-background)",
minHeight: "100px",
margin: "10px 15px", margin: "10px 15px",
padding: "8px" padding: "8px"
}} }}
@@ -652,7 +651,7 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
onHeightChange?.(height) onHeightChange?.(height)
}} }}
placeholder={placeholderText} placeholder={placeholderText}
minRows={4} minRows={2}
maxRows={20} maxRows={20}
autoFocus={true} autoFocus={true}
style={{ style={{

View File

@@ -25,6 +25,7 @@ import BrowserSessionRow from "./BrowserSessionRow"
import ChatRow from "./ChatRow" import ChatRow from "./ChatRow"
import ChatTextArea from "./ChatTextArea" import ChatTextArea from "./ChatTextArea"
import TaskHeader from "./TaskHeader" import TaskHeader from "./TaskHeader"
import AutoApproveMenu from "./AutoApproveMenu"
import { AudioType } from "../../../../src/shared/WebviewMessage" import { AudioType } from "../../../../src/shared/WebviewMessage"
import { validateCommand } from "../../utils/command-validation" import { validateCommand } from "../../utils/command-validation"
@@ -38,7 +39,7 @@ interface ChatViewProps {
export const MAX_IMAGES_PER_MESSAGE = 20 // Anthropic limits to 20 images export const MAX_IMAGES_PER_MESSAGE = 20 // Anthropic limits to 20 images
const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryView }: ChatViewProps) => { const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryView }: ChatViewProps) => {
const { version, clineMessages: messages, taskHistory, apiConfiguration, mcpServers, alwaysAllowBrowser, alwaysAllowReadOnly, alwaysAllowWrite, alwaysAllowExecute, alwaysAllowMcp, allowedCommands, writeDelayMs, mode, setMode } = useExtensionState() const { version, clineMessages: messages, taskHistory, apiConfiguration, mcpServers, alwaysAllowBrowser, alwaysAllowReadOnly, alwaysAllowWrite, alwaysAllowExecute, alwaysAllowMcp, allowedCommands, writeDelayMs, mode, setMode, autoApprovalEnabled } = useExtensionState()
//const task = messages.length > 0 ? (messages[0].say === "task" ? messages[0] : undefined) : undefined) : undefined //const task = messages.length > 0 ? (messages[0].say === "task" ? messages[0] : undefined) : undefined) : undefined
const task = useMemo(() => messages.at(0), [messages]) // leaving this less safe version here since if the first message is not a task, then the extension is in a bad state and needs to be debugged (see Cline.abort) const task = useMemo(() => messages.at(0), [messages]) // leaving this less safe version here since if the first message is not a task, then the extension is in a bad state and needs to be debugged (see Cline.abort)
@@ -528,7 +529,7 @@ const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryVie
const isAutoApproved = useCallback( const isAutoApproved = useCallback(
(message: ClineMessage | undefined) => { (message: ClineMessage | undefined) => {
if (!message || message.type !== "ask") return false if (!autoApprovalEnabled || !message || message.type !== "ask") return false
return ( return (
(alwaysAllowBrowser && message.ask === "browser_action_launch") || (alwaysAllowBrowser && message.ask === "browser_action_launch") ||
@@ -538,17 +539,7 @@ const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryVie
(alwaysAllowMcp && message.ask === "use_mcp_server" && isMcpToolAlwaysAllowed(message)) (alwaysAllowMcp && message.ask === "use_mcp_server" && isMcpToolAlwaysAllowed(message))
) )
}, },
[ [autoApprovalEnabled, alwaysAllowBrowser, alwaysAllowReadOnly, isReadOnlyToolAction, alwaysAllowWrite, isWriteToolAction, alwaysAllowExecute, isAllowedCommand, alwaysAllowMcp, isMcpToolAlwaysAllowed]
alwaysAllowBrowser,
alwaysAllowReadOnly,
alwaysAllowWrite,
alwaysAllowExecute,
alwaysAllowMcp,
isReadOnlyToolAction,
isWriteToolAction,
isAllowedCommand,
isMcpToolAlwaysAllowed
]
) )
useEffect(() => { useEffect(() => {
@@ -866,10 +857,12 @@ const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryVie
) : ( ) : (
<div <div
style={{ style={{
flexGrow: 1, flex: "1 1 0", // flex-grow: 1, flex-shrink: 1, flex-basis: 0
minHeight: 0,
overflowY: "auto", overflowY: "auto",
display: "flex", display: "flex",
flexDirection: "column", flexDirection: "column",
paddingBottom: "10px",
}}> }}>
{showAnnouncement && <Announcement version={version} hideAnnouncement={hideAnnouncement} />} {showAnnouncement && <Announcement version={version} hideAnnouncement={hideAnnouncement} />}
<div style={{ padding: "0 20px", flexShrink: 0 }}> <div style={{ padding: "0 20px", flexShrink: 0 }}>
@@ -885,6 +878,32 @@ const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryVie
{taskHistory.length > 0 && <HistoryPreview showHistoryView={showHistoryView} />} {taskHistory.length > 0 && <HistoryPreview showHistoryView={showHistoryView} />}
</div> </div>
)} )}
{/*
// Flex layout explanation:
// 1. Content div above uses flex: "1 1 0" to:
// - Grow to fill available space (flex-grow: 1)
// - Shrink when AutoApproveMenu needs space (flex-shrink: 1)
// - Start from zero size (flex-basis: 0) to ensure proper distribution
// minHeight: 0 allows it to shrink below its content height
//
// 2. AutoApproveMenu uses flex: "0 1 auto" to:
// - Not grow beyond its content (flex-grow: 0)
// - Shrink when viewport is small (flex-shrink: 1)
// - Use its content size as basis (flex-basis: auto)
// This ensures it takes its natural height when there's space
// but becomes scrollable when the viewport is too small
*/}
{!task && (
<AutoApproveMenu
style={{
marginBottom: -2,
flex: "0 1 auto", // flex-grow: 0, flex-shrink: 1, flex-basis: auto
minHeight: 0,
}}
/>
)}
{task && ( {task && (
<> <>
<div style={{ flexGrow: 1, display: "flex" }} ref={scrollContainerRef}> <div style={{ flexGrow: 1, display: "flex" }} ref={scrollContainerRef}>
@@ -914,6 +933,7 @@ const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryVie
initialTopMostItemIndex={groupedMessages.length - 1} initialTopMostItemIndex={groupedMessages.length - 1}
/> />
</div> </div>
<AutoApproveMenu />
{showScrollToBottom ? ( {showScrollToBottom ? (
<div <div
style={{ style={{
@@ -938,7 +958,7 @@ const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryVie
: 0.5 : 0.5
: 0, : 0,
display: "flex", display: "flex",
padding: "10px 15px 0px 15px", padding: `${primaryButtonText || secondaryButtonText || isStreaming ? "10" : "0"}px 15px 0px 15px`,
}}> }}>
{primaryButtonText && !isStreaming && ( {primaryButtonText && !isStreaming && (
<VSCodeButton <VSCodeButton

View File

@@ -0,0 +1,198 @@
import { render, fireEvent, screen } from "@testing-library/react"
import { useExtensionState } from "../../../context/ExtensionStateContext"
import AutoApproveMenu from "../AutoApproveMenu"
import { codeMode, defaultPrompts } from "../../../../../src/shared/modes"
// Mock the ExtensionStateContext hook
jest.mock("../../../context/ExtensionStateContext")
const mockUseExtensionState = useExtensionState as jest.MockedFunction<typeof useExtensionState>
describe("AutoApproveMenu", () => {
const defaultMockState = {
// Required state properties
version: "1.0.0",
clineMessages: [],
taskHistory: [],
shouldShowAnnouncement: false,
allowedCommands: [],
soundEnabled: false,
soundVolume: 0.5,
diffEnabled: false,
fuzzyMatchThreshold: 1.0,
preferredLanguage: "English",
writeDelayMs: 1000,
browserViewportSize: "900x600",
screenshotQuality: 75,
terminalOutputLineLimit: 500,
mcpEnabled: true,
requestDelaySeconds: 5,
currentApiConfigName: "default",
listApiConfigMeta: [],
mode: codeMode,
customPrompts: defaultPrompts,
enhancementApiConfigId: "",
didHydrateState: true,
showWelcome: false,
theme: {},
glamaModels: {},
openRouterModels: {},
openAiModels: [],
mcpServers: [],
filePaths: [],
// Auto-approve specific properties
alwaysAllowReadOnly: false,
alwaysAllowWrite: false,
alwaysAllowExecute: false,
alwaysAllowBrowser: false,
alwaysAllowMcp: false,
alwaysApproveResubmit: false,
autoApprovalEnabled: false,
// Required setter functions
setApiConfiguration: jest.fn(),
setCustomInstructions: jest.fn(),
setAlwaysAllowReadOnly: jest.fn(),
setAlwaysAllowWrite: jest.fn(),
setAlwaysAllowExecute: jest.fn(),
setAlwaysAllowBrowser: jest.fn(),
setAlwaysAllowMcp: jest.fn(),
setShowAnnouncement: jest.fn(),
setAllowedCommands: jest.fn(),
setSoundEnabled: jest.fn(),
setSoundVolume: jest.fn(),
setDiffEnabled: jest.fn(),
setBrowserViewportSize: jest.fn(),
setFuzzyMatchThreshold: jest.fn(),
setPreferredLanguage: jest.fn(),
setWriteDelayMs: jest.fn(),
setScreenshotQuality: jest.fn(),
setTerminalOutputLineLimit: jest.fn(),
setMcpEnabled: jest.fn(),
setAlwaysApproveResubmit: jest.fn(),
setRequestDelaySeconds: jest.fn(),
setCurrentApiConfigName: jest.fn(),
setListApiConfigMeta: jest.fn(),
onUpdateApiConfig: jest.fn(),
setMode: jest.fn(),
setCustomPrompts: jest.fn(),
setEnhancementApiConfigId: jest.fn(),
setAutoApprovalEnabled: jest.fn(),
}
beforeEach(() => {
mockUseExtensionState.mockReturnValue(defaultMockState)
})
afterEach(() => {
jest.clearAllMocks()
})
it("renders with initial collapsed state", () => {
render(<AutoApproveMenu />)
// Check for main checkbox and label
expect(screen.getByText("Auto-approve:")).toBeInTheDocument()
expect(screen.getByText("None")).toBeInTheDocument()
// Verify the menu is collapsed (actions not visible)
expect(screen.queryByText("Read files and directories")).not.toBeInTheDocument()
})
it("expands menu when clicked", () => {
render(<AutoApproveMenu />)
// Click to expand
fireEvent.click(screen.getByText("Auto-approve:"))
// Verify menu items are visible
expect(screen.getByText("Read files and directories")).toBeInTheDocument()
expect(screen.getByText("Edit files")).toBeInTheDocument()
expect(screen.getByText("Execute approved commands")).toBeInTheDocument()
expect(screen.getByText("Use the browser")).toBeInTheDocument()
expect(screen.getByText("Use MCP servers")).toBeInTheDocument()
expect(screen.getByText("Retry failed requests")).toBeInTheDocument()
})
it("toggles main auto-approval checkbox", () => {
render(<AutoApproveMenu />)
const mainCheckbox = screen.getByRole("checkbox")
fireEvent.click(mainCheckbox)
expect(defaultMockState.setAutoApprovalEnabled).toHaveBeenCalledWith(true)
})
it("toggles individual permissions", () => {
render(<AutoApproveMenu />)
// Expand menu
fireEvent.click(screen.getByText("Auto-approve:"))
// Click read files checkbox
fireEvent.click(screen.getByText("Read files and directories"))
expect(defaultMockState.setAlwaysAllowReadOnly).toHaveBeenCalledWith(true)
// Click edit files checkbox
fireEvent.click(screen.getByText("Edit files"))
expect(defaultMockState.setAlwaysAllowWrite).toHaveBeenCalledWith(true)
// Click execute commands checkbox
fireEvent.click(screen.getByText("Execute approved commands"))
expect(defaultMockState.setAlwaysAllowExecute).toHaveBeenCalledWith(true)
})
it("displays enabled actions in summary", () => {
mockUseExtensionState.mockReturnValue({
...defaultMockState,
alwaysAllowReadOnly: true,
alwaysAllowWrite: true,
autoApprovalEnabled: true,
})
render(<AutoApproveMenu />)
// Check that enabled actions are shown in summary
expect(screen.getByText("Read, Edit")).toBeInTheDocument()
})
it("preserves checkbox states", () => {
// Mock state with some permissions enabled
const mockState = {
...defaultMockState,
alwaysAllowReadOnly: true,
alwaysAllowWrite: true,
}
// Update mock to return our state
mockUseExtensionState.mockReturnValue(mockState)
render(<AutoApproveMenu />)
// Expand menu
fireEvent.click(screen.getByText("Auto-approve:"))
// Verify read and edit checkboxes are checked
expect(screen.getByLabelText("Read files and directories")).toBeInTheDocument()
expect(screen.getByLabelText("Edit files")).toBeInTheDocument()
// Verify the setters haven't been called yet
expect(mockState.setAlwaysAllowReadOnly).not.toHaveBeenCalled()
expect(mockState.setAlwaysAllowWrite).not.toHaveBeenCalled()
// Collapse menu
fireEvent.click(screen.getByText("Auto-approve:"))
// Expand again
fireEvent.click(screen.getByText("Auto-approve:"))
// Verify checkboxes are still present
expect(screen.getByLabelText("Read files and directories")).toBeInTheDocument()
expect(screen.getByLabelText("Edit files")).toBeInTheDocument()
// Verify the setters still haven't been called
expect(mockState.setAlwaysAllowReadOnly).not.toHaveBeenCalled()
expect(mockState.setAlwaysAllowWrite).not.toHaveBeenCalled()
})
})

View File

@@ -0,0 +1,313 @@
import React from 'react'
import { render, waitFor } from '@testing-library/react'
import ChatView from '../ChatView'
import { ExtensionStateContextProvider } from '../../../context/ExtensionStateContext'
import { vscode } from '../../../utils/vscode'
// Mock vscode API
jest.mock('../../../utils/vscode', () => ({
vscode: {
postMessage: jest.fn(),
},
}))
// Mock all problematic dependencies
jest.mock('rehype-highlight', () => ({
__esModule: true,
default: () => () => {},
}))
jest.mock('hast-util-to-text', () => ({
__esModule: true,
default: () => '',
}))
// Mock components that use ESM dependencies
jest.mock('../BrowserSessionRow', () => ({
__esModule: true,
default: function MockBrowserSessionRow({ messages }: { messages: any[] }) {
return <div data-testid="browser-session">{JSON.stringify(messages)}</div>
}
}))
jest.mock('../ChatRow', () => ({
__esModule: true,
default: function MockChatRow({ message }: { message: any }) {
return <div data-testid="chat-row">{JSON.stringify(message)}</div>
}
}))
jest.mock('../TaskHeader', () => ({
__esModule: true,
default: function MockTaskHeader({ task }: { task: any }) {
return <div data-testid="task-header">{JSON.stringify(task)}</div>
}
}))
jest.mock('../AutoApproveMenu', () => ({
__esModule: true,
default: () => null,
}))
jest.mock('../../common/CodeBlock', () => ({
__esModule: true,
default: () => null,
CODE_BLOCK_BG_COLOR: 'rgb(30, 30, 30)',
}))
jest.mock('../../common/CodeAccordian', () => ({
__esModule: true,
default: () => null,
}))
jest.mock('../ContextMenu', () => ({
__esModule: true,
default: () => null,
}))
// Mock window.postMessage to trigger state hydration
const mockPostMessage = (state: any) => {
window.postMessage({
type: 'state',
state: {
version: '1.0.0',
clineMessages: [],
taskHistory: [],
shouldShowAnnouncement: false,
allowedCommands: [],
alwaysAllowExecute: false,
autoApprovalEnabled: true,
...state
}
}, '*')
}
describe('ChatView - Auto Approval Tests', () => {
beforeEach(() => {
jest.clearAllMocks()
})
it('auto-approves read operations when enabled', async () => {
render(
<ExtensionStateContextProvider>
<ChatView
isHidden={false}
showAnnouncement={false}
hideAnnouncement={() => {}}
showHistoryView={() => {}}
/>
</ExtensionStateContextProvider>
)
// First hydrate state with initial task
mockPostMessage({
alwaysAllowReadOnly: true,
autoApprovalEnabled: true,
clineMessages: [
{
type: 'say',
say: 'task',
ts: Date.now() - 2000,
text: 'Initial task'
}
]
})
// Then send the read tool ask message
mockPostMessage({
alwaysAllowReadOnly: true,
autoApprovalEnabled: true,
clineMessages: [
{
type: 'say',
say: 'task',
ts: Date.now() - 2000,
text: 'Initial task'
},
{
type: 'ask',
ask: 'tool',
ts: Date.now(),
text: JSON.stringify({ tool: 'readFile', path: 'test.txt' }),
partial: false
}
]
})
// Wait for the auto-approval message
await waitFor(() => {
expect(vscode.postMessage).toHaveBeenCalledWith({
type: 'askResponse',
askResponse: 'yesButtonClicked'
})
})
})
it('does not auto-approve when autoApprovalEnabled is false', async () => {
render(
<ExtensionStateContextProvider>
<ChatView
isHidden={false}
showAnnouncement={false}
hideAnnouncement={() => {}}
showHistoryView={() => {}}
/>
</ExtensionStateContextProvider>
)
// First hydrate state with initial task
mockPostMessage({
alwaysAllowReadOnly: true,
autoApprovalEnabled: false,
clineMessages: [
{
type: 'say',
say: 'task',
ts: Date.now() - 2000,
text: 'Initial task'
}
]
})
// Then send the read tool ask message
mockPostMessage({
alwaysAllowReadOnly: true,
autoApprovalEnabled: false,
clineMessages: [
{
type: 'say',
say: 'task',
ts: Date.now() - 2000,
text: 'Initial task'
},
{
type: 'ask',
ask: 'tool',
ts: Date.now(),
text: JSON.stringify({ tool: 'readFile', path: 'test.txt' }),
partial: false
}
]
})
// Verify no auto-approval message was sent
expect(vscode.postMessage).not.toHaveBeenCalledWith({
type: 'askResponse',
askResponse: 'yesButtonClicked'
})
})
it('auto-approves write operations when enabled', async () => {
render(
<ExtensionStateContextProvider>
<ChatView
isHidden={false}
showAnnouncement={false}
hideAnnouncement={() => {}}
showHistoryView={() => {}}
/>
</ExtensionStateContextProvider>
)
// First hydrate state with initial task
mockPostMessage({
alwaysAllowWrite: true,
autoApprovalEnabled: true,
writeDelayMs: 0,
clineMessages: [
{
type: 'say',
say: 'task',
ts: Date.now() - 2000,
text: 'Initial task'
}
]
})
// Then send the write tool ask message
mockPostMessage({
alwaysAllowWrite: true,
autoApprovalEnabled: true,
writeDelayMs: 0,
clineMessages: [
{
type: 'say',
say: 'task',
ts: Date.now() - 2000,
text: 'Initial task'
},
{
type: 'ask',
ask: 'tool',
ts: Date.now(),
text: JSON.stringify({ tool: 'editedExistingFile', path: 'test.txt' }),
partial: false
}
]
})
// Wait for the auto-approval message
await waitFor(() => {
expect(vscode.postMessage).toHaveBeenCalledWith({
type: 'askResponse',
askResponse: 'yesButtonClicked'
})
})
})
it('auto-approves browser actions when enabled', async () => {
render(
<ExtensionStateContextProvider>
<ChatView
isHidden={false}
showAnnouncement={false}
hideAnnouncement={() => {}}
showHistoryView={() => {}}
/>
</ExtensionStateContextProvider>
)
// First hydrate state with initial task
mockPostMessage({
alwaysAllowBrowser: true,
autoApprovalEnabled: true,
clineMessages: [
{
type: 'say',
say: 'task',
ts: Date.now() - 2000,
text: 'Initial task'
}
]
})
// Then send the browser action ask message
mockPostMessage({
alwaysAllowBrowser: true,
autoApprovalEnabled: true,
clineMessages: [
{
type: 'say',
say: 'task',
ts: Date.now() - 2000,
text: 'Initial task'
},
{
type: 'ask',
ask: 'browser_action_launch',
ts: Date.now(),
text: JSON.stringify({ action: 'launch', url: 'http://example.com' }),
partial: false
}
]
})
// Wait for the auto-approval message
await waitFor(() => {
expect(vscode.postMessage).toHaveBeenCalledWith({
type: 'askResponse',
askResponse: 'yesButtonClicked'
})
})
})
})

View File

@@ -46,6 +46,11 @@ jest.mock('../ChatRow', () => ({
} }
})) }))
jest.mock('../AutoApproveMenu', () => ({
__esModule: true,
default: () => null,
}))
interface ChatTextAreaProps { interface ChatTextAreaProps {
onSend: (value: string) => void; onSend: (value: string) => void;
inputValue?: string; inputValue?: string;
@@ -139,6 +144,89 @@ describe('ChatView - Auto Approval Tests', () => {
jest.clearAllMocks() jest.clearAllMocks()
}) })
it('does not auto-approve any actions when autoApprovalEnabled is false', () => {
render(
<ExtensionStateContextProvider>
<ChatView
isHidden={false}
showAnnouncement={false}
hideAnnouncement={() => {}}
showHistoryView={() => {}}
/>
</ExtensionStateContextProvider>
)
// First hydrate state with initial task
mockPostMessage({
autoApprovalEnabled: false,
alwaysAllowBrowser: true,
alwaysAllowReadOnly: true,
alwaysAllowWrite: true,
alwaysAllowExecute: true,
allowedCommands: ['npm test'],
clineMessages: [
{
type: 'say',
say: 'task',
ts: Date.now() - 2000,
text: 'Initial task'
}
]
})
// Test various types of actions that should not be auto-approved
const testCases = [
{
ask: 'browser_action_launch',
text: JSON.stringify({ action: 'launch', url: 'http://example.com' })
},
{
ask: 'tool',
text: JSON.stringify({ tool: 'readFile', path: 'test.txt' })
},
{
ask: 'tool',
text: JSON.stringify({ tool: 'editedExistingFile', path: 'test.txt' })
},
{
ask: 'command',
text: 'npm test'
}
]
testCases.forEach(testCase => {
mockPostMessage({
autoApprovalEnabled: false,
alwaysAllowBrowser: true,
alwaysAllowReadOnly: true,
alwaysAllowWrite: true,
alwaysAllowExecute: true,
allowedCommands: ['npm test'],
clineMessages: [
{
type: 'say',
say: 'task',
ts: Date.now() - 2000,
text: 'Initial task'
},
{
type: 'ask',
ask: testCase.ask,
ts: Date.now(),
text: testCase.text,
partial: false
}
]
})
// Verify no auto-approval message was sent
expect(vscode.postMessage).not.toHaveBeenCalledWith({
type: 'askResponse',
askResponse: 'yesButtonClicked'
})
})
})
it('auto-approves browser actions when alwaysAllowBrowser is enabled', async () => { it('auto-approves browser actions when alwaysAllowBrowser is enabled', async () => {
render( render(
<ExtensionStateContextProvider> <ExtensionStateContextProvider>
@@ -153,6 +241,7 @@ describe('ChatView - Auto Approval Tests', () => {
// First hydrate state with initial task // First hydrate state with initial task
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowBrowser: true, alwaysAllowBrowser: true,
clineMessages: [ clineMessages: [
{ {
@@ -166,6 +255,7 @@ describe('ChatView - Auto Approval Tests', () => {
// Then send the browser action ask message // Then send the browser action ask message
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowBrowser: true, alwaysAllowBrowser: true,
clineMessages: [ clineMessages: [
{ {
@@ -207,6 +297,7 @@ describe('ChatView - Auto Approval Tests', () => {
// First hydrate state with initial task // First hydrate state with initial task
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowReadOnly: true, alwaysAllowReadOnly: true,
clineMessages: [ clineMessages: [
{ {
@@ -220,6 +311,7 @@ describe('ChatView - Auto Approval Tests', () => {
// Then send the read-only tool ask message // Then send the read-only tool ask message
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowReadOnly: true, alwaysAllowReadOnly: true,
clineMessages: [ clineMessages: [
{ {
@@ -262,6 +354,7 @@ describe('ChatView - Auto Approval Tests', () => {
// First hydrate state with initial task // First hydrate state with initial task
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowWrite: true, alwaysAllowWrite: true,
writeDelayMs: 0, writeDelayMs: 0,
clineMessages: [ clineMessages: [
@@ -276,6 +369,7 @@ describe('ChatView - Auto Approval Tests', () => {
// Then send the write tool ask message // Then send the write tool ask message
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowWrite: true, alwaysAllowWrite: true,
writeDelayMs: 0, writeDelayMs: 0,
clineMessages: [ clineMessages: [
@@ -318,6 +412,7 @@ describe('ChatView - Auto Approval Tests', () => {
// First hydrate state with initial task // First hydrate state with initial task
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowWrite: true, alwaysAllowWrite: true,
clineMessages: [ clineMessages: [
{ {
@@ -331,6 +426,7 @@ describe('ChatView - Auto Approval Tests', () => {
// Then send a non-tool write operation message // Then send a non-tool write operation message
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowWrite: true, alwaysAllowWrite: true,
clineMessages: [ clineMessages: [
{ {
@@ -371,6 +467,7 @@ describe('ChatView - Auto Approval Tests', () => {
// First hydrate state with initial task // First hydrate state with initial task
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowExecute: true, alwaysAllowExecute: true,
allowedCommands: ['npm test'], allowedCommands: ['npm test'],
clineMessages: [ clineMessages: [
@@ -385,6 +482,7 @@ describe('ChatView - Auto Approval Tests', () => {
// Then send the command ask message // Then send the command ask message
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowExecute: true, alwaysAllowExecute: true,
allowedCommands: ['npm test'], allowedCommands: ['npm test'],
clineMessages: [ clineMessages: [
@@ -427,6 +525,7 @@ describe('ChatView - Auto Approval Tests', () => {
// First hydrate state with initial task // First hydrate state with initial task
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowExecute: true, alwaysAllowExecute: true,
allowedCommands: ['npm test'], allowedCommands: ['npm test'],
clineMessages: [ clineMessages: [
@@ -441,6 +540,7 @@ describe('ChatView - Auto Approval Tests', () => {
// Then send the disallowed command ask message // Then send the disallowed command ask message
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowExecute: true, alwaysAllowExecute: true,
allowedCommands: ['npm test'], allowedCommands: ['npm test'],
clineMessages: [ clineMessages: [
@@ -498,6 +598,7 @@ describe('ChatView - Auto Approval Tests', () => {
// First hydrate state with initial task // First hydrate state with initial task
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowExecute: true, alwaysAllowExecute: true,
allowedCommands: ['npm test', 'npm run build', 'echo', 'Select-String'], allowedCommands: ['npm test', 'npm run build', 'echo', 'Select-String'],
clineMessages: [ clineMessages: [
@@ -512,6 +613,7 @@ describe('ChatView - Auto Approval Tests', () => {
// Then send the chained command ask message // Then send the chained command ask message
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowExecute: true, alwaysAllowExecute: true,
allowedCommands: ['npm test', 'npm run build', 'echo', 'Select-String'], allowedCommands: ['npm test', 'npm run build', 'echo', 'Select-String'],
clineMessages: [ clineMessages: [
@@ -585,6 +687,7 @@ describe('ChatView - Auto Approval Tests', () => {
// Then send the chained command ask message // Then send the chained command ask message
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowExecute: true, alwaysAllowExecute: true,
allowedCommands: ['npm test', 'Select-String'], allowedCommands: ['npm test', 'Select-String'],
clineMessages: [ clineMessages: [
@@ -643,6 +746,7 @@ describe('ChatView - Auto Approval Tests', () => {
jest.clearAllMocks() jest.clearAllMocks()
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowExecute: true, alwaysAllowExecute: true,
allowedCommands: ['npm test', 'Select-String'], allowedCommands: ['npm test', 'Select-String'],
clineMessages: [ clineMessages: [
@@ -656,6 +760,7 @@ describe('ChatView - Auto Approval Tests', () => {
}) })
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowExecute: true, alwaysAllowExecute: true,
allowedCommands: ['npm test', 'Select-String'], allowedCommands: ['npm test', 'Select-String'],
clineMessages: [ clineMessages: [
@@ -688,6 +793,7 @@ describe('ChatView - Auto Approval Tests', () => {
jest.clearAllMocks() jest.clearAllMocks()
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowExecute: true, alwaysAllowExecute: true,
allowedCommands: ['npm test', 'Select-String'], allowedCommands: ['npm test', 'Select-String'],
clineMessages: [ clineMessages: [
@@ -701,6 +807,7 @@ describe('ChatView - Auto Approval Tests', () => {
}) })
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowExecute: true, alwaysAllowExecute: true,
allowedCommands: ['npm test', 'Select-String'], allowedCommands: ['npm test', 'Select-String'],
clineMessages: [ clineMessages: [
@@ -748,6 +855,7 @@ describe('ChatView - Sound Playing Tests', () => {
// First hydrate state with initial task and streaming // First hydrate state with initial task and streaming
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowBrowser: true, alwaysAllowBrowser: true,
clineMessages: [ clineMessages: [
{ {
@@ -768,6 +876,7 @@ describe('ChatView - Sound Playing Tests', () => {
// Then send the browser action ask message (streaming finished) // Then send the browser action ask message (streaming finished)
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowBrowser: true, alwaysAllowBrowser: true,
clineMessages: [ clineMessages: [
{ {
@@ -807,6 +916,7 @@ describe('ChatView - Sound Playing Tests', () => {
// First hydrate state with initial task and streaming // First hydrate state with initial task and streaming
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowBrowser: false, alwaysAllowBrowser: false,
clineMessages: [ clineMessages: [
{ {
@@ -827,6 +937,7 @@ describe('ChatView - Sound Playing Tests', () => {
// Then send the browser action ask message (streaming finished) // Then send the browser action ask message (streaming finished)
mockPostMessage({ mockPostMessage({
autoApprovalEnabled: true,
alwaysAllowBrowser: false, alwaysAllowBrowser: false,
clineMessages: [ clineMessages: [
{ {

View File

@@ -1,11 +1,10 @@
import { Checkbox, Dropdown } from "vscrui"
import type { DropdownOption } from "vscrui"
import { import {
VSCodeCheckbox,
VSCodeDropdown,
VSCodeLink, VSCodeLink,
VSCodeOption,
VSCodeRadio, VSCodeRadio,
VSCodeRadioGroup, VSCodeRadioGroup,
VSCodeTextField, VSCodeTextField
} from "@vscode/webview-ui-toolkit/react" } from "@vscode/webview-ui-toolkit/react"
import { Fragment, memo, useCallback, useEffect, useMemo, useState } from "react" import { Fragment, memo, useCallback, useEffect, useMemo, useState } from "react"
import { useEvent, useInterval } from "react-use" import { useEvent, useInterval } from "react-use"
@@ -34,6 +33,7 @@ import {
import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage" import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage"
import { useExtensionState } from "../../context/ExtensionStateContext" import { useExtensionState } from "../../context/ExtensionStateContext"
import { vscode } from "../../utils/vscode" import { vscode } from "../../utils/vscode"
import * as vscodemodels from "vscode"
import VSCodeButtonLink from "../common/VSCodeButtonLink" import VSCodeButtonLink from "../common/VSCodeButtonLink"
import OpenRouterModelPicker, { import OpenRouterModelPicker, {
ModelDescriptionMarkdown, ModelDescriptionMarkdown,
@@ -51,6 +51,7 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
const { apiConfiguration, setApiConfiguration, uriScheme, onUpdateApiConfig } = useExtensionState() const { apiConfiguration, setApiConfiguration, uriScheme, onUpdateApiConfig } = useExtensionState()
const [ollamaModels, setOllamaModels] = useState<string[]>([]) const [ollamaModels, setOllamaModels] = useState<string[]>([])
const [lmStudioModels, setLmStudioModels] = useState<string[]>([]) const [lmStudioModels, setLmStudioModels] = useState<string[]>([])
const [vsCodeLmModels, setVsCodeLmModels] = useState<vscodemodels.LanguageModelChatSelector[]>([])
const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl) const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl)
const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion) const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion)
const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false)
@@ -71,54 +72,48 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl }) vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl })
} else if (selectedProvider === "lmstudio") { } else if (selectedProvider === "lmstudio") {
vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl }) vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl })
} else if (selectedProvider === "vscode-lm") {
vscode.postMessage({ type: "requestVsCodeLmModels" })
} }
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl]) }, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl])
useEffect(() => { useEffect(() => {
if (selectedProvider === "ollama" || selectedProvider === "lmstudio") { if (selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm") {
requestLocalModels() requestLocalModels()
} }
}, [selectedProvider, requestLocalModels]) }, [selectedProvider, requestLocalModels])
useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" ? 2000 : null) useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm" ? 2000 : null)
const handleMessage = useCallback((event: MessageEvent) => { const handleMessage = useCallback((event: MessageEvent) => {
const message: ExtensionMessage = event.data const message: ExtensionMessage = event.data
if (message.type === "ollamaModels" && message.ollamaModels) { if (message.type === "ollamaModels" && message.ollamaModels) {
setOllamaModels(message.ollamaModels) setOllamaModels(message.ollamaModels)
} else if (message.type === "lmStudioModels" && message.lmStudioModels) { } else if (message.type === "lmStudioModels" && message.lmStudioModels) {
setLmStudioModels(message.lmStudioModels) setLmStudioModels(message.lmStudioModels)
} else if (message.type === "vsCodeLmModels" && message.vsCodeLmModels) {
setVsCodeLmModels(message.vsCodeLmModels)
} }
}, []) }, [])
useEvent("message", handleMessage) useEvent("message", handleMessage)
/*
VSCodeDropdown has an open bug where dynamically rendered options don't auto select the provided value prop. You can see this for yourself by comparing it with normal select/option elements, which work as expected.
https://github.com/microsoft/vscode-webview-ui-toolkit/issues/433
In our case, when the user switches between providers, we recalculate the selectedModelId depending on the provider, the default model for that provider, and a modelId that the user may have selected. Unfortunately, the VSCodeDropdown component wouldn't select this calculated value, and would default to the first "Select a model..." option instead, which makes it seem like the model was cleared out when it wasn't.
As a workaround, we create separate instances of the dropdown for each provider, and then conditionally render the one that matches the current provider.
*/
const createDropdown = (models: Record<string, ModelInfo>) => { const createDropdown = (models: Record<string, ModelInfo>) => {
const options: DropdownOption[] = [
{ value: "", label: "Select a model..." },
...Object.keys(models).map((modelId) => ({
value: modelId,
label: modelId,
}))
]
return ( return (
<VSCodeDropdown <Dropdown
id="model-id" id="model-id"
value={selectedModelId} value={selectedModelId}
onChange={handleInputChange("apiModelId")} onChange={(value: unknown) => {handleInputChange("apiModelId")({
style={{ width: "100%" }}> target: {
<VSCodeOption value="">Select a model...</VSCodeOption> value: (value as DropdownOption).value
{Object.keys(models).map((modelId) => ( }
<VSCodeOption })}}
key={modelId} style={{ width: "100%" }}
value={modelId} options={options}
style={{ />
whiteSpace: "normal",
wordWrap: "break-word",
maxWidth: "100%",
}}>
{modelId}
</VSCodeOption>
))}
</VSCodeDropdown>
) )
} }
@@ -128,23 +123,32 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
<label htmlFor="api-provider"> <label htmlFor="api-provider">
<span style={{ fontWeight: 500 }}>API Provider</span> <span style={{ fontWeight: 500 }}>API Provider</span>
</label> </label>
<VSCodeDropdown <Dropdown
id="api-provider" id="api-provider"
value={selectedProvider} value={selectedProvider}
onChange={handleInputChange("apiProvider")} onChange={(value: unknown) => {
style={{ minWidth: 130, position: "relative", zIndex: OPENROUTER_MODEL_PICKER_Z_INDEX + 1 }}> handleInputChange("apiProvider")({
<VSCodeOption value="openrouter">OpenRouter</VSCodeOption> target: {
<VSCodeOption value="anthropic">Anthropic</VSCodeOption> value: (value as DropdownOption).value
<VSCodeOption value="gemini">Google Gemini</VSCodeOption> }
<VSCodeOption value="deepseek">DeepSeek</VSCodeOption> })
<VSCodeOption value="openai-native">OpenAI</VSCodeOption> }}
<VSCodeOption value="openai">OpenAI Compatible</VSCodeOption> style={{ minWidth: 130, position: "relative", zIndex: OPENROUTER_MODEL_PICKER_Z_INDEX + 1 }}
<VSCodeOption value="vertex">GCP Vertex AI</VSCodeOption> options={[
<VSCodeOption value="bedrock">AWS Bedrock</VSCodeOption> { value: "openrouter", label: "OpenRouter" },
<VSCodeOption value="glama">Glama</VSCodeOption> { value: "anthropic", label: "Anthropic" },
<VSCodeOption value="lmstudio">LM Studio</VSCodeOption> { value: "gemini", label: "Google Gemini" },
<VSCodeOption value="ollama">Ollama</VSCodeOption> { value: "deepseek", label: "DeepSeek" },
</VSCodeDropdown> { value: "openai-native", label: "OpenAI" },
{ value: "openai", label: "OpenAI Compatible" },
{ value: "vertex", label: "GCP Vertex AI" },
{ value: "bedrock", label: "AWS Bedrock" },
{ value: "glama", label: "Glama" },
{ value: "vscode-lm", label: "VS Code LM API" },
{ value: "lmstudio", label: "LM Studio" },
{ value: "ollama", label: "Ollama" }
]}
/>
</div> </div>
{selectedProvider === "anthropic" && ( {selectedProvider === "anthropic" && (
@@ -158,17 +162,16 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
<span style={{ fontWeight: 500 }}>Anthropic API Key</span> <span style={{ fontWeight: 500 }}>Anthropic API Key</span>
</VSCodeTextField> </VSCodeTextField>
<VSCodeCheckbox <Checkbox
checked={anthropicBaseUrlSelected} checked={anthropicBaseUrlSelected}
onChange={(e: any) => { onChange={(checked: boolean) => {
const isChecked = e.target.checked === true setAnthropicBaseUrlSelected(checked)
setAnthropicBaseUrlSelected(isChecked) if (!checked) {
if (!isChecked) {
setApiConfiguration({ ...apiConfiguration, anthropicBaseUrl: "" }) setApiConfiguration({ ...apiConfiguration, anthropicBaseUrl: "" })
} }
}}> }}>
Use custom base URL Use custom base URL
</VSCodeCheckbox> </Checkbox>
{anthropicBaseUrlSelected && ( {anthropicBaseUrlSelected && (
<VSCodeTextField <VSCodeTextField
@@ -209,11 +212,12 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
<span style={{ fontWeight: 500 }}>Glama API Key</span> <span style={{ fontWeight: 500 }}>Glama API Key</span>
</VSCodeTextField> </VSCodeTextField>
{!apiConfiguration?.glamaApiKey && ( {!apiConfiguration?.glamaApiKey && (
<VSCodeLink <VSCodeButtonLink
href="https://glama.ai/settings/api-keys" href={getGlamaAuthUrl(uriScheme)}
style={{ display: "inline", fontSize: "inherit" }}> style={{ margin: "5px 0 0 0" }}
You can get an Glama API key by signing up here. appearance="secondary">
</VSCodeLink> Get Glama API Key
</VSCodeButtonLink>
)} )}
<p <p
style={{ style={{
@@ -286,15 +290,16 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
</span> </span>
)} */} )} */}
</p> </p>
<VSCodeCheckbox <Checkbox
checked={apiConfiguration?.openRouterUseMiddleOutTransform || false} checked={apiConfiguration?.openRouterUseMiddleOutTransform || false}
onChange={(e: any) => { onChange={(checked: boolean) => {
const isChecked = e.target.checked === true handleInputChange("openRouterUseMiddleOutTransform")({
setApiConfiguration({ ...apiConfiguration, openRouterUseMiddleOutTransform: isChecked }) target: { value: checked },
})
}}> }}>
Compress prompts and message chains to the context size (<a href="https://openrouter.ai/docs/transforms">OpenRouter Transforms</a>) Compress prompts and message chains to the context size (<a href="https://openrouter.ai/docs/transforms">OpenRouter Transforms</a>)
</VSCodeCheckbox> </Checkbox>
<br/> <br />
</div> </div>
)} )}
@@ -328,45 +333,44 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
<label htmlFor="aws-region-dropdown"> <label htmlFor="aws-region-dropdown">
<span style={{ fontWeight: 500 }}>AWS Region</span> <span style={{ fontWeight: 500 }}>AWS Region</span>
</label> </label>
<VSCodeDropdown <Dropdown
id="aws-region-dropdown" id="aws-region-dropdown"
value={apiConfiguration?.awsRegion || ""} value={apiConfiguration?.awsRegion || ""}
style={{ width: "100%" }} style={{ width: "100%" }}
onChange={handleInputChange("awsRegion")}> onChange={(value: unknown) => {handleInputChange("awsRegion")({
<VSCodeOption value="">Select a region...</VSCodeOption> target: {
{/* The user will have to choose a region that supports the model they use, but this shouldn't be a problem since they'd have to request access for it in that region in the first place. */} value: (value as DropdownOption).value
<VSCodeOption value="us-east-1">us-east-1</VSCodeOption> }
<VSCodeOption value="us-east-2">us-east-2</VSCodeOption> })}}
{/* <VSCodeOption value="us-west-1">us-west-1</VSCodeOption> */} options={[
<VSCodeOption value="us-west-2">us-west-2</VSCodeOption> { value: "", label: "Select a region..." },
{/* <VSCodeOption value="af-south-1">af-south-1</VSCodeOption> */} { value: "us-east-1", label: "us-east-1" },
{/* <VSCodeOption value="ap-east-1">ap-east-1</VSCodeOption> */} { value: "us-east-2", label: "us-east-2" },
<VSCodeOption value="ap-south-1">ap-south-1</VSCodeOption> { value: "us-west-2", label: "us-west-2" },
<VSCodeOption value="ap-northeast-1">ap-northeast-1</VSCodeOption> { value: "ap-south-1", label: "ap-south-1" },
<VSCodeOption value="ap-northeast-2">ap-northeast-2</VSCodeOption> { value: "ap-northeast-1", label: "ap-northeast-1" },
{/* <VSCodeOption value="ap-northeast-3">ap-northeast-3</VSCodeOption> */} { value: "ap-northeast-2", label: "ap-northeast-2" },
<VSCodeOption value="ap-southeast-1">ap-southeast-1</VSCodeOption> { value: "ap-southeast-1", label: "ap-southeast-1" },
<VSCodeOption value="ap-southeast-2">ap-southeast-2</VSCodeOption> { value: "ap-southeast-2", label: "ap-southeast-2" },
<VSCodeOption value="ca-central-1">ca-central-1</VSCodeOption> { value: "ca-central-1", label: "ca-central-1" },
<VSCodeOption value="eu-central-1">eu-central-1</VSCodeOption> { value: "eu-central-1", label: "eu-central-1" },
<VSCodeOption value="eu-west-1">eu-west-1</VSCodeOption> { value: "eu-west-1", label: "eu-west-1" },
<VSCodeOption value="eu-west-2">eu-west-2</VSCodeOption> { value: "eu-west-2", label: "eu-west-2" },
<VSCodeOption value="eu-west-3">eu-west-3</VSCodeOption> { value: "eu-west-3", label: "eu-west-3" },
{/* <VSCodeOption value="eu-north-1">eu-north-1</VSCodeOption> */} { value: "sa-east-1", label: "sa-east-1" },
{/* <VSCodeOption value="me-south-1">me-south-1</VSCodeOption> */} { value: "us-gov-west-1", label: "us-gov-west-1" }
<VSCodeOption value="sa-east-1">sa-east-1</VSCodeOption> ]}
<VSCodeOption value="us-gov-west-1">us-gov-west-1</VSCodeOption> />
{/* <VSCodeOption value="us-gov-east-1">us-gov-east-1</VSCodeOption> */}
</VSCodeDropdown>
</div> </div>
<VSCodeCheckbox <Checkbox
checked={apiConfiguration?.awsUseCrossRegionInference || false} checked={apiConfiguration?.awsUseCrossRegionInference || false}
onChange={(e: any) => { onChange={(checked: boolean) => {
const isChecked = e.target.checked === true handleInputChange("awsUseCrossRegionInference")({
setApiConfiguration({ ...apiConfiguration, awsUseCrossRegionInference: isChecked }) target: { value: checked },
})
}}> }}>
Use cross-region inference Use cross-region inference
</VSCodeCheckbox> </Checkbox>
<p <p
style={{ style={{
fontSize: "12px", fontSize: "12px",
@@ -393,18 +397,24 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
<label htmlFor="vertex-region-dropdown"> <label htmlFor="vertex-region-dropdown">
<span style={{ fontWeight: 500 }}>Google Cloud Region</span> <span style={{ fontWeight: 500 }}>Google Cloud Region</span>
</label> </label>
<VSCodeDropdown <Dropdown
id="vertex-region-dropdown" id="vertex-region-dropdown"
value={apiConfiguration?.vertexRegion || ""} value={apiConfiguration?.vertexRegion || ""}
style={{ width: "100%" }} style={{ width: "100%" }}
onChange={handleInputChange("vertexRegion")}> onChange={(value: unknown) => {handleInputChange("vertexRegion")({
<VSCodeOption value="">Select a region...</VSCodeOption> target: {
<VSCodeOption value="us-east5">us-east5</VSCodeOption> value: (value as DropdownOption).value
<VSCodeOption value="us-central1">us-central1</VSCodeOption> }
<VSCodeOption value="europe-west1">europe-west1</VSCodeOption> })}}
<VSCodeOption value="europe-west4">europe-west4</VSCodeOption> options={[
<VSCodeOption value="asia-southeast1">asia-southeast1</VSCodeOption> { value: "", label: "Select a region..." },
</VSCodeDropdown> { value: "us-east5", label: "us-east5" },
{ value: "us-central1", label: "us-central1" },
{ value: "europe-west1", label: "europe-west1" },
{ value: "europe-west4", label: "europe-west4" },
{ value: "asia-southeast1", label: "asia-southeast1" }
]}
/>
</div> </div>
<p <p
style={{ style={{
@@ -477,29 +487,26 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
</VSCodeTextField> </VSCodeTextField>
<OpenAiModelPicker /> <OpenAiModelPicker />
<div style={{ display: 'flex', alignItems: 'center' }}> <div style={{ display: 'flex', alignItems: 'center' }}>
<VSCodeCheckbox <Checkbox
checked={apiConfiguration?.openAiStreamingEnabled ?? true} checked={apiConfiguration?.openAiStreamingEnabled ?? true}
onChange={(e: any) => { onChange={(checked: boolean) => {
const isChecked = e.target.checked handleInputChange("openAiStreamingEnabled")({
setApiConfiguration({ target: { value: checked },
...apiConfiguration,
openAiStreamingEnabled: isChecked
}) })
}}> }}>
Enable streaming Enable streaming
</VSCodeCheckbox> </Checkbox>
</div> </div>
<VSCodeCheckbox <Checkbox
checked={azureApiVersionSelected} checked={azureApiVersionSelected}
onChange={(e: any) => { onChange={(checked: boolean) => {
const isChecked = e.target.checked === true setAzureApiVersionSelected(checked)
setAzureApiVersionSelected(isChecked) if (!checked) {
if (!isChecked) {
setApiConfiguration({ ...apiConfiguration, azureApiVersion: "" }) setApiConfiguration({ ...apiConfiguration, azureApiVersion: "" })
} }
}}> }}>
Set Azure API version Set Azure API version
</VSCodeCheckbox> </Checkbox>
{azureApiVersionSelected && ( {azureApiVersionSelected && (
<VSCodeTextField <VSCodeTextField
value={apiConfiguration?.azureApiVersion || ""} value={apiConfiguration?.azureApiVersion || ""}
@@ -619,6 +626,63 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
</div> </div>
)} )}
{selectedProvider === "vscode-lm" && (
<div>
<div className="dropdown-container">
<label htmlFor="vscode-lm-model">
<span style={{ fontWeight: 500 }}>Language Model</span>
</label>
{vsCodeLmModels.length > 0 ? (
<Dropdown
id="vscode-lm-model"
value={apiConfiguration?.vsCodeLmModelSelector ?
`${apiConfiguration.vsCodeLmModelSelector.vendor ?? ""}/${apiConfiguration.vsCodeLmModelSelector.family ?? ""}` :
""}
onChange={(value: unknown) => {
const valueStr = (value as DropdownOption)?.value;
if (!valueStr) {
return
}
const [vendor, family] = valueStr.split('/');
handleInputChange("vsCodeLmModelSelector")({
target: {
value: { vendor, family }
}
})
}}
style={{ width: "100%" }}
options={[
{ value: "", label: "Select a model..." },
...vsCodeLmModels.map((model) => ({
value: `${model.vendor}/${model.family}`,
label: `${model.vendor} - ${model.family}`
}))
]}
/>
) : (
<p style={{
fontSize: "12px",
marginTop: "5px",
color: "var(--vscode-descriptionForeground)",
}}>
The VS Code Language Model API allows you to run models provided by other VS Code extensions (including but not limited to GitHub Copilot).
The easiest way to get started is to install the Copilot and Copilot Chat extensions from the VS Code Marketplace.
</p>
)}
<p
style={{
fontSize: "12px",
marginTop: "5px",
color: "var(--vscode-errorForeground)",
fontWeight: 500,
}}>
Note: This is a very experimental integration and may not work as expected. Please report any issues to the Roo-Cline GitHub repository.
</p>
</div>
</div>
)}
{selectedProvider === "ollama" && ( {selectedProvider === "ollama" && (
<div> <div>
<VSCodeTextField <VSCodeTextField
@@ -739,8 +803,14 @@ const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) =
) )
} }
export function getGlamaAuthUrl(uriScheme?: string) {
const callbackUrl = `${uriScheme || "vscode"}://rooveterinaryinc.roo-cline/glama`
return `https://glama.ai/oauth/authorize?callback_url=${encodeURIComponent(callbackUrl)}`
}
export function getOpenRouterAuthUrl(uriScheme?: string) { export function getOpenRouterAuthUrl(uriScheme?: string) {
return `https://openrouter.ai/auth?callback_url=${uriScheme || "vscode"}://saoudrizwan.claude-dev/openrouter` return `https://openrouter.ai/auth?callback_url=${uriScheme || "vscode"}://rooveterinaryinc.roo-cline/openrouter`
} }
export const formatPrice = (price: number) => { export const formatPrice = (price: number) => {
@@ -932,6 +1002,17 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
selectedModelId: apiConfiguration?.lmStudioModelId || "", selectedModelId: apiConfiguration?.lmStudioModelId || "",
selectedModelInfo: openAiModelInfoSaneDefaults, selectedModelInfo: openAiModelInfoSaneDefaults,
} }
case "vscode-lm":
return {
selectedProvider: provider,
selectedModelId: apiConfiguration?.vsCodeLmModelSelector ?
`${apiConfiguration.vsCodeLmModelSelector.vendor}/${apiConfiguration.vsCodeLmModelSelector.family}` :
"",
selectedModelInfo: {
...openAiModelInfoSaneDefaults,
supportsImages: false, // VSCode LM API currently doesn't support images
},
}
default: default:
return getProviderData(anthropicModels, anthropicDefaultModelId) return getProviderData(anthropicModels, anthropicDefaultModelId)
} }

View File

@@ -1,4 +1,5 @@
import { VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" import { VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react"
import debounce from "debounce"
import { Fzf } from "fzf" import { Fzf } from "fzf"
import React, { KeyboardEvent, memo, useEffect, useMemo, useRef, useState } from "react" import React, { KeyboardEvent, memo, useEffect, useMemo, useRef, useState } from "react"
import { useRemark } from "react-remark" import { useRemark } from "react-remark"
@@ -44,8 +45,24 @@ const GlamaModelPicker: React.FC = () => {
} }
}, [apiConfiguration, searchTerm]) }, [apiConfiguration, searchTerm])
const debouncedRefreshModels = useMemo(
() =>
debounce(
() => {
vscode.postMessage({ type: "refreshGlamaModels" })
},
50
),
[]
)
useMount(() => { useMount(() => {
vscode.postMessage({ type: "refreshGlamaModels" }) debouncedRefreshModels()
// Cleanup debounced function
return () => {
debouncedRefreshModels.clear()
}
}) })
useEffect(() => { useEffect(() => {

View File

@@ -1,6 +1,7 @@
import { VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" import { VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react"
import { Fzf } from "fzf" import { Fzf } from "fzf"
import React, { KeyboardEvent, memo, useEffect, useMemo, useRef, useState } from "react" import React, { KeyboardEvent, memo, useEffect, useMemo, useRef, useState } from "react"
import debounce from "debounce"
import { useRemark } from "react-remark" import { useRemark } from "react-remark"
import styled from "styled-components" import styled from "styled-components"
import { useExtensionState } from "../../context/ExtensionStateContext" import { useExtensionState } from "../../context/ExtensionStateContext"
@@ -34,18 +35,38 @@ const OpenAiModelPicker: React.FC = () => {
} }
}, [apiConfiguration, searchTerm]) }, [apiConfiguration, searchTerm])
const debouncedRefreshModels = useMemo(
() =>
debounce(
(baseUrl: string, apiKey: string) => {
vscode.postMessage({
type: "refreshOpenAiModels",
values: {
baseUrl,
apiKey
}
})
},
50
),
[]
)
useEffect(() => { useEffect(() => {
if (!apiConfiguration?.openAiBaseUrl || !apiConfiguration?.openAiApiKey) { if (!apiConfiguration?.openAiBaseUrl || !apiConfiguration?.openAiApiKey) {
return return
} }
vscode.postMessage({ debouncedRefreshModels(
type: "refreshOpenAiModels", values: { apiConfiguration.openAiBaseUrl,
baseUrl: apiConfiguration?.openAiBaseUrl, apiConfiguration.openAiApiKey
apiKey: apiConfiguration?.openAiApiKey )
}
}) // Cleanup debounced function
}, [apiConfiguration?.openAiBaseUrl, apiConfiguration?.openAiApiKey]) return () => {
debouncedRefreshModels.clear()
}
}, [apiConfiguration?.openAiBaseUrl, apiConfiguration?.openAiApiKey, debouncedRefreshModels])
useEffect(() => { useEffect(() => {
const handleClickOutside = (event: MouseEvent) => { const handleClickOutside = (event: MouseEvent) => {

View File

@@ -1,4 +1,5 @@
import { VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" import { VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react"
import debounce from "debounce"
import { Fzf } from "fzf" import { Fzf } from "fzf"
import React, { KeyboardEvent, memo, useEffect, useMemo, useRef, useState } from "react" import React, { KeyboardEvent, memo, useEffect, useMemo, useRef, useState } from "react"
import { useRemark } from "react-remark" import { useRemark } from "react-remark"
@@ -43,8 +44,24 @@ const OpenRouterModelPicker: React.FC = () => {
} }
}, [apiConfiguration, searchTerm]) }, [apiConfiguration, searchTerm])
const debouncedRefreshModels = useMemo(
() =>
debounce(
() => {
vscode.postMessage({ type: "refreshOpenRouterModels" })
},
50
),
[]
)
useMount(() => { useMount(() => {
vscode.postMessage({ type: "refreshOpenRouterModels" }) debouncedRefreshModels()
// Cleanup debounced function
return () => {
debouncedRefreshModels.clear()
}
}) })
useEffect(() => { useEffect(() => {

View File

@@ -63,8 +63,10 @@ export interface ExtensionStateContextType extends ExtensionState {
setCustomPrompts: (value: CustomPrompts) => void setCustomPrompts: (value: CustomPrompts) => void
enhancementApiConfigId?: string enhancementApiConfigId?: string
setEnhancementApiConfigId: (value: string) => void setEnhancementApiConfigId: (value: string) => void
experimentalDiffStrategy: boolean experimentalDiffStrategy: boolean
setExperimentalDiffStrategy: (value: boolean) => void setExperimentalDiffStrategy: (value: boolean) => void
autoApprovalEnabled?: boolean
setAutoApprovalEnabled: (value: boolean) => void
} }
export const ExtensionStateContext = createContext<ExtensionStateContextType | undefined>(undefined) export const ExtensionStateContext = createContext<ExtensionStateContextType | undefined>(undefined)
@@ -93,7 +95,8 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
mode: codeMode, mode: codeMode,
customPrompts: defaultPrompts, customPrompts: defaultPrompts,
enhancementApiConfigId: '', enhancementApiConfigId: '',
experimentalDiffStrategy: false, experimentalDiffStrategy: false,
autoApprovalEnabled: false,
}) })
const [didHydrateState, setDidHydrateState] = useState(false) const [didHydrateState, setDidHydrateState] = useState(false)
const [showWelcome, setShowWelcome] = useState(false) const [showWelcome, setShowWelcome] = useState(false)
@@ -124,11 +127,12 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
const message: ExtensionMessage = event.data const message: ExtensionMessage = event.data
switch (message.type) { switch (message.type) {
case "state": { case "state": {
const newState = message.state!
setState(prevState => ({ setState(prevState => ({
...prevState, ...prevState,
...message.state! ...newState
})) }))
const config = message.state?.apiConfiguration const config = newState.apiConfiguration
const hasKey = checkExistKey(config) const hasKey = checkExistKey(config)
setShowWelcome(!hasKey) setShowWelcome(!hasKey)
setDidHydrateState(true) setDidHydrateState(true)
@@ -210,6 +214,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
fuzzyMatchThreshold: state.fuzzyMatchThreshold, fuzzyMatchThreshold: state.fuzzyMatchThreshold,
writeDelayMs: state.writeDelayMs, writeDelayMs: state.writeDelayMs,
screenshotQuality: state.screenshotQuality, screenshotQuality: state.screenshotQuality,
experimentalDiffStrategy: state.experimentalDiffStrategy ?? false,
setApiConfiguration: (value) => setState((prevState) => ({ setApiConfiguration: (value) => setState((prevState) => ({
...prevState, ...prevState,
apiConfiguration: value apiConfiguration: value
@@ -240,8 +245,8 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
setMode: (value: Mode) => setState((prevState) => ({ ...prevState, mode: value })), setMode: (value: Mode) => setState((prevState) => ({ ...prevState, mode: value })),
setCustomPrompts: (value) => setState((prevState) => ({ ...prevState, customPrompts: value })), setCustomPrompts: (value) => setState((prevState) => ({ ...prevState, customPrompts: value })),
setEnhancementApiConfigId: (value) => setState((prevState) => ({ ...prevState, enhancementApiConfigId: value })), setEnhancementApiConfigId: (value) => setState((prevState) => ({ ...prevState, enhancementApiConfigId: value })),
experimentalDiffStrategy: state.experimentalDiffStrategy ?? false, setExperimentalDiffStrategy: (value) => setState((prevState) => ({ ...prevState, experimentalDiffStrategy: value })),
setExperimentalDiffStrategy: (value) => setState((prevState) => ({ ...prevState, experimentalDiffStrategy: value })) setAutoApprovalEnabled: (value) => setState((prevState) => ({ ...prevState, autoApprovalEnabled: value })),
} }
return <ExtensionStateContext.Provider value={contextValue}>{children}</ExtensionStateContext.Provider> return <ExtensionStateContext.Provider value={contextValue}>{children}</ExtensionStateContext.Provider>

View File

@@ -1,14 +1,26 @@
import '@testing-library/jest-dom'; import '@testing-library/jest-dom';
// Mock window.matchMedia // Mock crypto.getRandomValues
Object.defineProperty(window, 'crypto', {
value: {
getRandomValues: function(buffer: Uint8Array) {
for (let i = 0; i < buffer.length; i++) {
buffer[i] = Math.floor(Math.random() * 256);
}
return buffer;
}
}
});
// Mock matchMedia
Object.defineProperty(window, 'matchMedia', { Object.defineProperty(window, 'matchMedia', {
writable: true, writable: true,
value: jest.fn().mockImplementation(query => ({ value: jest.fn().mockImplementation(query => ({
matches: false, matches: false,
media: query, media: query,
onchange: null, onchange: null,
addListener: jest.fn(), // Deprecated addListener: jest.fn(), // deprecated
removeListener: jest.fn(), // Deprecated removeListener: jest.fn(), // deprecated
addEventListener: jest.fn(), addEventListener: jest.fn(),
removeEventListener: jest.fn(), removeEventListener: jest.fn(),
dispatchEvent: jest.fn(), dispatchEvent: jest.fn(),

View File

@@ -57,6 +57,11 @@ export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): s
return "You must provide a valid model ID." return "You must provide a valid model ID."
} }
break break
case "vscode-lm":
if (!apiConfiguration.vsCodeLmModelSelector) {
return "You must provide a valid model selector."
}
break
} }
} }
return undefined return undefined