mirror of
https://github.com/pacnpal/Roo-Code.git
synced 2025-12-20 04:11:10 -05:00
5
.changeset/fifty-lemons-double.md
Normal file
5
.changeset/fifty-lemons-double.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"roo-cline": minor
|
||||||
|
---
|
||||||
|
|
||||||
|
3.1
|
||||||
19
README.md
19
README.md
@@ -1,8 +1,23 @@
|
|||||||
# Roo-Cline
|
# Roo Cline
|
||||||
|
|
||||||
A fork of Cline, an autonomous coding agent, with some additional experimental features. It’s been mainly writing itself recently, with a light touch of human guidance here and there.
|
A fork of Cline, an autonomous coding agent, with some additional experimental features. It’s been mainly writing itself recently, with a light touch of human guidance here and there.
|
||||||
|
|
||||||
## New in 3.0 - chat modes!
|
## New in 3.1: Chat Mode Prompt Customization & Prompt Enhancements
|
||||||
|
|
||||||
|
Hot off the heels of **v3.0** introducing Code, Architect, and Ask chat modes, one of the most requested features has arrived: **customizable prompts for each mode**! 🎉
|
||||||
|
|
||||||
|
You can now tailor the **role definition** and **custom instructions** for every chat mode to perfectly fit your workflow. Want to adjust Architect mode to focus more on system scalability? Or tweak Ask mode for deeper research queries? Done. Plus, you can define these via **mode-specific `.clinerules-[mode]` files**. You’ll find all of this in the new **Prompts** tab in the top menu.
|
||||||
|
|
||||||
|
The second big feature in this release is a complete revamp of **prompt enhancements**. This feature helps you craft messages to get even better results from Cline. Here’s what’s new:
|
||||||
|
- Works with **any provider** and API configuration, not just OpenRouter.
|
||||||
|
- Fully customizable prompts to match your unique needs.
|
||||||
|
- Same simple workflow: just hit the ✨ **Enhance Prompt** button in the chat input to try it out.
|
||||||
|
|
||||||
|
Whether you’re using GPT-4, other APIs, or switching configurations, this gives you total control over how your prompts are optimized.
|
||||||
|
|
||||||
|
As always, we’d love to hear your thoughts and ideas! What features do you want to see in **v3.2**? Drop by https://www.reddit.com/r/roocline and join the discussion - we're building Roo Cline together. 🚀
|
||||||
|
|
||||||
|
## New in 3.0 - Chat Modes!
|
||||||
|
|
||||||
You can now choose between different prompts for Roo Cline to better suit your workflow. Here’s what’s available:
|
You can now choose between different prompts for Roo Cline to better suit your workflow. Here’s what’s available:
|
||||||
|
|
||||||
|
|||||||
18
package.json
18
package.json
@@ -74,6 +74,11 @@
|
|||||||
"title": "MCP Servers",
|
"title": "MCP Servers",
|
||||||
"icon": "$(server)"
|
"icon": "$(server)"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"command": "roo-cline.promptsButtonClicked",
|
||||||
|
"title": "Prompts",
|
||||||
|
"icon": "$(notebook)"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"command": "roo-cline.historyButtonClicked",
|
"command": "roo-cline.historyButtonClicked",
|
||||||
"title": "History",
|
"title": "History",
|
||||||
@@ -103,24 +108,29 @@
|
|||||||
"when": "view == roo-cline.SidebarProvider"
|
"when": "view == roo-cline.SidebarProvider"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"command": "roo-cline.mcpButtonClicked",
|
"command": "roo-cline.promptsButtonClicked",
|
||||||
"group": "navigation@2",
|
"group": "navigation@2",
|
||||||
"when": "view == roo-cline.SidebarProvider"
|
"when": "view == roo-cline.SidebarProvider"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"command": "roo-cline.historyButtonClicked",
|
"command": "roo-cline.mcpButtonClicked",
|
||||||
"group": "navigation@3",
|
"group": "navigation@3",
|
||||||
"when": "view == roo-cline.SidebarProvider"
|
"when": "view == roo-cline.SidebarProvider"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"command": "roo-cline.popoutButtonClicked",
|
"command": "roo-cline.historyButtonClicked",
|
||||||
"group": "navigation@4",
|
"group": "navigation@4",
|
||||||
"when": "view == roo-cline.SidebarProvider"
|
"when": "view == roo-cline.SidebarProvider"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"command": "roo-cline.settingsButtonClicked",
|
"command": "roo-cline.popoutButtonClicked",
|
||||||
"group": "navigation@5",
|
"group": "navigation@5",
|
||||||
"when": "view == roo-cline.SidebarProvider"
|
"when": "view == roo-cline.SidebarProvider"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"command": "roo-cline.settingsButtonClicked",
|
||||||
|
"group": "navigation@6",
|
||||||
|
"when": "view == roo-cline.SidebarProvider"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -46,7 +46,42 @@ jest.mock('@anthropic-ai/sdk', () => {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
messages: {
|
messages: {
|
||||||
create: mockCreate
|
create: mockCreate.mockImplementation(async (options) => {
|
||||||
|
if (!options.stream) {
|
||||||
|
return {
|
||||||
|
id: 'test-completion',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Test response' }
|
||||||
|
],
|
||||||
|
role: 'assistant',
|
||||||
|
model: options.model,
|
||||||
|
usage: {
|
||||||
|
input_tokens: 10,
|
||||||
|
output_tokens: 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
async *[Symbol.asyncIterator]() {
|
||||||
|
yield {
|
||||||
|
type: 'message_start',
|
||||||
|
message: {
|
||||||
|
usage: {
|
||||||
|
input_tokens: 10,
|
||||||
|
output_tokens: 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
yield {
|
||||||
|
type: 'content_block_start',
|
||||||
|
content_block: {
|
||||||
|
type: 'text',
|
||||||
|
text: 'Test response'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}))
|
}))
|
||||||
};
|
};
|
||||||
@@ -144,6 +179,42 @@ describe('AnthropicHandler', () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('completePrompt', () => {
|
||||||
|
it('should complete prompt successfully', async () => {
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(mockCreate).toHaveBeenCalledWith({
|
||||||
|
model: mockOptions.apiModelId,
|
||||||
|
messages: [{ role: 'user', content: 'Test prompt' }],
|
||||||
|
max_tokens: 8192,
|
||||||
|
temperature: 0,
|
||||||
|
stream: false
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle API errors', async () => {
|
||||||
|
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
||||||
|
await expect(handler.completePrompt('Test prompt'))
|
||||||
|
.rejects.toThrow('Anthropic completion error: API Error');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle non-text content', async () => {
|
||||||
|
mockCreate.mockImplementationOnce(async () => ({
|
||||||
|
content: [{ type: 'image' }]
|
||||||
|
}));
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty response', async () => {
|
||||||
|
mockCreate.mockImplementationOnce(async () => ({
|
||||||
|
content: [{ type: 'text', text: '' }]
|
||||||
|
}));
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('getModel', () => {
|
describe('getModel', () => {
|
||||||
it('should return default model if no model ID is provided', () => {
|
it('should return default model if no model ID is provided', () => {
|
||||||
const handlerWithoutModel = new AnthropicHandler({
|
const handlerWithoutModel = new AnthropicHandler({
|
||||||
|
|||||||
@@ -119,6 +119,108 @@ describe('AwsBedrockHandler', () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('completePrompt', () => {
|
||||||
|
it('should complete prompt successfully', async () => {
|
||||||
|
const mockResponse = {
|
||||||
|
output: new TextEncoder().encode(JSON.stringify({
|
||||||
|
content: 'Test response'
|
||||||
|
}))
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockSend = jest.fn().mockResolvedValue(mockResponse);
|
||||||
|
handler['client'] = {
|
||||||
|
send: mockSend
|
||||||
|
} as unknown as BedrockRuntimeClient;
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(mockSend).toHaveBeenCalledWith(expect.objectContaining({
|
||||||
|
input: expect.objectContaining({
|
||||||
|
modelId: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
|
||||||
|
messages: expect.arrayContaining([
|
||||||
|
expect.objectContaining({
|
||||||
|
role: 'user',
|
||||||
|
content: [{ text: 'Test prompt' }]
|
||||||
|
})
|
||||||
|
]),
|
||||||
|
inferenceConfig: expect.objectContaining({
|
||||||
|
maxTokens: 5000,
|
||||||
|
temperature: 0.3,
|
||||||
|
topP: 0.1
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle API errors', async () => {
|
||||||
|
const mockError = new Error('AWS Bedrock error');
|
||||||
|
const mockSend = jest.fn().mockRejectedValue(mockError);
|
||||||
|
handler['client'] = {
|
||||||
|
send: mockSend
|
||||||
|
} as unknown as BedrockRuntimeClient;
|
||||||
|
|
||||||
|
await expect(handler.completePrompt('Test prompt'))
|
||||||
|
.rejects.toThrow('Bedrock completion error: AWS Bedrock error');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle invalid response format', async () => {
|
||||||
|
const mockResponse = {
|
||||||
|
output: new TextEncoder().encode('invalid json')
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockSend = jest.fn().mockResolvedValue(mockResponse);
|
||||||
|
handler['client'] = {
|
||||||
|
send: mockSend
|
||||||
|
} as unknown as BedrockRuntimeClient;
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty response', async () => {
|
||||||
|
const mockResponse = {
|
||||||
|
output: new TextEncoder().encode(JSON.stringify({}))
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockSend = jest.fn().mockResolvedValue(mockResponse);
|
||||||
|
handler['client'] = {
|
||||||
|
send: mockSend
|
||||||
|
} as unknown as BedrockRuntimeClient;
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle cross-region inference', async () => {
|
||||||
|
handler = new AwsBedrockHandler({
|
||||||
|
apiModelId: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
|
||||||
|
awsAccessKey: 'test-access-key',
|
||||||
|
awsSecretKey: 'test-secret-key',
|
||||||
|
awsRegion: 'us-east-1',
|
||||||
|
awsUseCrossRegionInference: true
|
||||||
|
});
|
||||||
|
|
||||||
|
const mockResponse = {
|
||||||
|
output: new TextEncoder().encode(JSON.stringify({
|
||||||
|
content: 'Test response'
|
||||||
|
}))
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockSend = jest.fn().mockResolvedValue(mockResponse);
|
||||||
|
handler['client'] = {
|
||||||
|
send: mockSend
|
||||||
|
} as unknown as BedrockRuntimeClient;
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(mockSend).toHaveBeenCalledWith(expect.objectContaining({
|
||||||
|
input: expect.objectContaining({
|
||||||
|
modelId: 'us.anthropic.claude-3-5-sonnet-20241022-v2:0'
|
||||||
|
})
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('getModel', () => {
|
describe('getModel', () => {
|
||||||
it('should return correct model info in test environment', () => {
|
it('should return correct model info in test environment', () => {
|
||||||
const modelInfo = handler.getModel();
|
const modelInfo = handler.getModel();
|
||||||
|
|||||||
@@ -6,7 +6,12 @@ import { GoogleGenerativeAI } from '@google/generative-ai';
|
|||||||
jest.mock('@google/generative-ai', () => ({
|
jest.mock('@google/generative-ai', () => ({
|
||||||
GoogleGenerativeAI: jest.fn().mockImplementation(() => ({
|
GoogleGenerativeAI: jest.fn().mockImplementation(() => ({
|
||||||
getGenerativeModel: jest.fn().mockReturnValue({
|
getGenerativeModel: jest.fn().mockReturnValue({
|
||||||
generateContentStream: jest.fn()
|
generateContentStream: jest.fn(),
|
||||||
|
generateContent: jest.fn().mockResolvedValue({
|
||||||
|
response: {
|
||||||
|
text: () => 'Test response'
|
||||||
|
}
|
||||||
|
})
|
||||||
})
|
})
|
||||||
}))
|
}))
|
||||||
}));
|
}));
|
||||||
@@ -133,6 +138,59 @@ describe('GeminiHandler', () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('completePrompt', () => {
|
||||||
|
it('should complete prompt successfully', async () => {
|
||||||
|
const mockGenerateContent = jest.fn().mockResolvedValue({
|
||||||
|
response: {
|
||||||
|
text: () => 'Test response'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
const mockGetGenerativeModel = jest.fn().mockReturnValue({
|
||||||
|
generateContent: mockGenerateContent
|
||||||
|
});
|
||||||
|
(handler['client'] as any).getGenerativeModel = mockGetGenerativeModel;
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(mockGetGenerativeModel).toHaveBeenCalledWith({
|
||||||
|
model: 'gemini-2.0-flash-thinking-exp-1219'
|
||||||
|
});
|
||||||
|
expect(mockGenerateContent).toHaveBeenCalledWith({
|
||||||
|
contents: [{ role: 'user', parts: [{ text: 'Test prompt' }] }],
|
||||||
|
generationConfig: {
|
||||||
|
temperature: 0
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle API errors', async () => {
|
||||||
|
const mockError = new Error('Gemini API error');
|
||||||
|
const mockGenerateContent = jest.fn().mockRejectedValue(mockError);
|
||||||
|
const mockGetGenerativeModel = jest.fn().mockReturnValue({
|
||||||
|
generateContent: mockGenerateContent
|
||||||
|
});
|
||||||
|
(handler['client'] as any).getGenerativeModel = mockGetGenerativeModel;
|
||||||
|
|
||||||
|
await expect(handler.completePrompt('Test prompt'))
|
||||||
|
.rejects.toThrow('Gemini completion error: Gemini API error');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty response', async () => {
|
||||||
|
const mockGenerateContent = jest.fn().mockResolvedValue({
|
||||||
|
response: {
|
||||||
|
text: () => ''
|
||||||
|
}
|
||||||
|
});
|
||||||
|
const mockGetGenerativeModel = jest.fn().mockReturnValue({
|
||||||
|
generateContent: mockGenerateContent
|
||||||
|
});
|
||||||
|
(handler['client'] as any).getGenerativeModel = mockGetGenerativeModel;
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('getModel', () => {
|
describe('getModel', () => {
|
||||||
it('should return correct model info', () => {
|
it('should return correct model info', () => {
|
||||||
const modelInfo = handler.getModel();
|
const modelInfo = handler.getModel();
|
||||||
|
|||||||
226
src/api/providers/__tests__/glama.test.ts
Normal file
226
src/api/providers/__tests__/glama.test.ts
Normal file
@@ -0,0 +1,226 @@
|
|||||||
|
import { GlamaHandler } from '../glama';
|
||||||
|
import { ApiHandlerOptions } from '../../../shared/api';
|
||||||
|
import OpenAI from 'openai';
|
||||||
|
import { Anthropic } from '@anthropic-ai/sdk';
|
||||||
|
import axios from 'axios';
|
||||||
|
|
||||||
|
// Mock OpenAI client
|
||||||
|
const mockCreate = jest.fn();
|
||||||
|
const mockWithResponse = jest.fn();
|
||||||
|
|
||||||
|
jest.mock('openai', () => {
|
||||||
|
return {
|
||||||
|
__esModule: true,
|
||||||
|
default: jest.fn().mockImplementation(() => ({
|
||||||
|
chat: {
|
||||||
|
completions: {
|
||||||
|
create: (...args: any[]) => {
|
||||||
|
const stream = {
|
||||||
|
[Symbol.asyncIterator]: async function* () {
|
||||||
|
yield {
|
||||||
|
choices: [{
|
||||||
|
delta: { content: 'Test response' },
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: null
|
||||||
|
};
|
||||||
|
yield {
|
||||||
|
choices: [{
|
||||||
|
delta: {},
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 5,
|
||||||
|
total_tokens: 15
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = mockCreate(...args);
|
||||||
|
if (args[0].stream) {
|
||||||
|
mockWithResponse.mockReturnValue(Promise.resolve({
|
||||||
|
data: stream,
|
||||||
|
response: {
|
||||||
|
headers: {
|
||||||
|
get: (name: string) => name === 'x-completion-request-id' ? 'test-request-id' : null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
result.withResponse = mockWithResponse;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('GlamaHandler', () => {
|
||||||
|
let handler: GlamaHandler;
|
||||||
|
let mockOptions: ApiHandlerOptions;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mockOptions = {
|
||||||
|
apiModelId: 'anthropic/claude-3-5-sonnet',
|
||||||
|
glamaModelId: 'anthropic/claude-3-5-sonnet',
|
||||||
|
glamaApiKey: 'test-api-key'
|
||||||
|
};
|
||||||
|
handler = new GlamaHandler(mockOptions);
|
||||||
|
mockCreate.mockClear();
|
||||||
|
mockWithResponse.mockClear();
|
||||||
|
|
||||||
|
// Default mock implementation for non-streaming responses
|
||||||
|
mockCreate.mockResolvedValue({
|
||||||
|
id: 'test-completion',
|
||||||
|
choices: [{
|
||||||
|
message: { role: 'assistant', content: 'Test response' },
|
||||||
|
finish_reason: 'stop',
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 5,
|
||||||
|
total_tokens: 15
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('constructor', () => {
|
||||||
|
it('should initialize with provided options', () => {
|
||||||
|
expect(handler).toBeInstanceOf(GlamaHandler);
|
||||||
|
expect(handler.getModel().id).toBe(mockOptions.apiModelId);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('createMessage', () => {
|
||||||
|
const systemPrompt = 'You are a helpful assistant.';
|
||||||
|
const messages: Anthropic.Messages.MessageParam[] = [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: 'Hello!'
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
it('should handle streaming responses', async () => {
|
||||||
|
// Mock axios for token usage request
|
||||||
|
const mockAxios = jest.spyOn(axios, 'get').mockResolvedValueOnce({
|
||||||
|
data: {
|
||||||
|
tokenUsage: {
|
||||||
|
promptTokens: 10,
|
||||||
|
completionTokens: 5,
|
||||||
|
cacheCreationInputTokens: 0,
|
||||||
|
cacheReadInputTokens: 0
|
||||||
|
},
|
||||||
|
totalCostUsd: "0.00"
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
|
const chunks: any[] = [];
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
chunks.push(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(chunks.length).toBe(2); // Text chunk and usage chunk
|
||||||
|
expect(chunks[0]).toEqual({
|
||||||
|
type: 'text',
|
||||||
|
text: 'Test response'
|
||||||
|
});
|
||||||
|
expect(chunks[1]).toEqual({
|
||||||
|
type: 'usage',
|
||||||
|
inputTokens: 10,
|
||||||
|
outputTokens: 5,
|
||||||
|
cacheWriteTokens: 0,
|
||||||
|
cacheReadTokens: 0,
|
||||||
|
totalCost: 0
|
||||||
|
});
|
||||||
|
|
||||||
|
mockAxios.mockRestore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle API errors', async () => {
|
||||||
|
mockCreate.mockImplementationOnce(() => {
|
||||||
|
throw new Error('API Error');
|
||||||
|
});
|
||||||
|
|
||||||
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
|
const chunks = [];
|
||||||
|
|
||||||
|
try {
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
chunks.push(chunk);
|
||||||
|
}
|
||||||
|
fail('Expected error to be thrown');
|
||||||
|
} catch (error) {
|
||||||
|
expect(error).toBeInstanceOf(Error);
|
||||||
|
expect(error.message).toBe('API Error');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('completePrompt', () => {
|
||||||
|
it('should complete prompt successfully', async () => {
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
|
||||||
|
model: mockOptions.apiModelId,
|
||||||
|
messages: [{ role: 'user', content: 'Test prompt' }],
|
||||||
|
temperature: 0,
|
||||||
|
max_tokens: 8192
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle API errors', async () => {
|
||||||
|
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
||||||
|
await expect(handler.completePrompt('Test prompt'))
|
||||||
|
.rejects.toThrow('Glama completion error: API Error');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty response', async () => {
|
||||||
|
mockCreate.mockResolvedValueOnce({
|
||||||
|
choices: [{ message: { content: '' } }]
|
||||||
|
});
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not set max_tokens for non-Anthropic models', async () => {
|
||||||
|
// Reset mock to clear any previous calls
|
||||||
|
mockCreate.mockClear();
|
||||||
|
|
||||||
|
const nonAnthropicOptions = {
|
||||||
|
apiModelId: 'openai/gpt-4',
|
||||||
|
glamaModelId: 'openai/gpt-4',
|
||||||
|
glamaApiKey: 'test-key',
|
||||||
|
glamaModelInfo: {
|
||||||
|
maxTokens: 4096,
|
||||||
|
contextWindow: 8192,
|
||||||
|
supportsImages: true,
|
||||||
|
supportsPromptCache: false
|
||||||
|
}
|
||||||
|
};
|
||||||
|
const nonAnthropicHandler = new GlamaHandler(nonAnthropicOptions);
|
||||||
|
|
||||||
|
await nonAnthropicHandler.completePrompt('Test prompt');
|
||||||
|
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
|
||||||
|
model: 'openai/gpt-4',
|
||||||
|
messages: [{ role: 'user', content: 'Test prompt' }],
|
||||||
|
temperature: 0
|
||||||
|
}));
|
||||||
|
expect(mockCreate.mock.calls[0][0]).not.toHaveProperty('max_tokens');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getModel', () => {
|
||||||
|
it('should return model info', () => {
|
||||||
|
const modelInfo = handler.getModel();
|
||||||
|
expect(modelInfo.id).toBe(mockOptions.apiModelId);
|
||||||
|
expect(modelInfo.info).toBeDefined();
|
||||||
|
expect(modelInfo.info.maxTokens).toBe(8192);
|
||||||
|
expect(modelInfo.info.contextWindow).toBe(200_000);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -1,148 +1,160 @@
|
|||||||
import { LmStudioHandler } from '../lmstudio';
|
import { LmStudioHandler } from '../lmstudio';
|
||||||
import { Anthropic } from '@anthropic-ai/sdk';
|
import { ApiHandlerOptions } from '../../../shared/api';
|
||||||
import OpenAI from 'openai';
|
import OpenAI from 'openai';
|
||||||
|
import { Anthropic } from '@anthropic-ai/sdk';
|
||||||
|
|
||||||
// Mock OpenAI SDK
|
// Mock OpenAI client
|
||||||
jest.mock('openai', () => ({
|
const mockCreate = jest.fn();
|
||||||
__esModule: true,
|
jest.mock('openai', () => {
|
||||||
default: jest.fn().mockImplementation(() => ({
|
return {
|
||||||
chat: {
|
__esModule: true,
|
||||||
completions: {
|
default: jest.fn().mockImplementation(() => ({
|
||||||
create: jest.fn()
|
chat: {
|
||||||
|
completions: {
|
||||||
|
create: mockCreate.mockImplementation(async (options) => {
|
||||||
|
if (!options.stream) {
|
||||||
|
return {
|
||||||
|
id: 'test-completion',
|
||||||
|
choices: [{
|
||||||
|
message: { role: 'assistant', content: 'Test response' },
|
||||||
|
finish_reason: 'stop',
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 5,
|
||||||
|
total_tokens: 15
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
[Symbol.asyncIterator]: async function* () {
|
||||||
|
yield {
|
||||||
|
choices: [{
|
||||||
|
delta: { content: 'Test response' },
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: null
|
||||||
|
};
|
||||||
|
yield {
|
||||||
|
choices: [{
|
||||||
|
delta: {},
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 5,
|
||||||
|
total_tokens: 15
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}))
|
||||||
}))
|
};
|
||||||
}));
|
});
|
||||||
|
|
||||||
describe('LmStudioHandler', () => {
|
describe('LmStudioHandler', () => {
|
||||||
let handler: LmStudioHandler;
|
let handler: LmStudioHandler;
|
||||||
|
let mockOptions: ApiHandlerOptions;
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
handler = new LmStudioHandler({
|
mockOptions = {
|
||||||
lmStudioModelId: 'mistral-7b',
|
apiModelId: 'local-model',
|
||||||
lmStudioBaseUrl: 'http://localhost:1234'
|
lmStudioModelId: 'local-model',
|
||||||
});
|
lmStudioBaseUrl: 'http://localhost:1234/v1'
|
||||||
|
};
|
||||||
|
handler = new LmStudioHandler(mockOptions);
|
||||||
|
mockCreate.mockClear();
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('constructor', () => {
|
describe('constructor', () => {
|
||||||
it('should initialize with provided config', () => {
|
it('should initialize with provided options', () => {
|
||||||
expect(OpenAI).toHaveBeenCalledWith({
|
expect(handler).toBeInstanceOf(LmStudioHandler);
|
||||||
baseURL: 'http://localhost:1234/v1',
|
expect(handler.getModel().id).toBe(mockOptions.lmStudioModelId);
|
||||||
apiKey: 'noop'
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should use default base URL if not provided', () => {
|
it('should use default base URL if not provided', () => {
|
||||||
const defaultHandler = new LmStudioHandler({
|
const handlerWithoutUrl = new LmStudioHandler({
|
||||||
lmStudioModelId: 'mistral-7b'
|
apiModelId: 'local-model',
|
||||||
});
|
lmStudioModelId: 'local-model'
|
||||||
|
|
||||||
expect(OpenAI).toHaveBeenCalledWith({
|
|
||||||
baseURL: 'http://localhost:1234/v1',
|
|
||||||
apiKey: 'noop'
|
|
||||||
});
|
});
|
||||||
|
expect(handlerWithoutUrl).toBeInstanceOf(LmStudioHandler);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('createMessage', () => {
|
describe('createMessage', () => {
|
||||||
const mockMessages: Anthropic.Messages.MessageParam[] = [
|
const systemPrompt = 'You are a helpful assistant.';
|
||||||
|
const messages: Anthropic.Messages.MessageParam[] = [
|
||||||
{
|
{
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: 'Hello'
|
content: 'Hello!'
|
||||||
},
|
|
||||||
{
|
|
||||||
role: 'assistant',
|
|
||||||
content: 'Hi there!'
|
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
const systemPrompt = 'You are a helpful assistant';
|
it('should handle streaming responses', async () => {
|
||||||
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
it('should handle streaming responses correctly', async () => {
|
const chunks: any[] = [];
|
||||||
const mockStream = [
|
|
||||||
{
|
|
||||||
choices: [{
|
|
||||||
delta: { content: 'Hello' }
|
|
||||||
}]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
choices: [{
|
|
||||||
delta: { content: ' world!' }
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
// Setup async iterator for mock stream
|
|
||||||
const asyncIterator = {
|
|
||||||
async *[Symbol.asyncIterator]() {
|
|
||||||
for (const chunk of mockStream) {
|
|
||||||
yield chunk;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const mockCreate = jest.fn().mockResolvedValue(asyncIterator);
|
|
||||||
(handler['client'].chat.completions as any).create = mockCreate;
|
|
||||||
|
|
||||||
const stream = handler.createMessage(systemPrompt, mockMessages);
|
|
||||||
const chunks = [];
|
|
||||||
|
|
||||||
for await (const chunk of stream) {
|
for await (const chunk of stream) {
|
||||||
chunks.push(chunk);
|
chunks.push(chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
expect(chunks.length).toBe(2);
|
expect(chunks.length).toBeGreaterThan(0);
|
||||||
expect(chunks[0]).toEqual({
|
const textChunks = chunks.filter(chunk => chunk.type === 'text');
|
||||||
type: 'text',
|
expect(textChunks).toHaveLength(1);
|
||||||
text: 'Hello'
|
expect(textChunks[0].text).toBe('Test response');
|
||||||
});
|
|
||||||
expect(chunks[1]).toEqual({
|
|
||||||
type: 'text',
|
|
||||||
text: ' world!'
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(mockCreate).toHaveBeenCalledWith({
|
|
||||||
model: 'mistral-7b',
|
|
||||||
messages: expect.arrayContaining([
|
|
||||||
{
|
|
||||||
role: 'system',
|
|
||||||
content: systemPrompt
|
|
||||||
}
|
|
||||||
]),
|
|
||||||
temperature: 0,
|
|
||||||
stream: true
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle API errors with custom message', async () => {
|
it('should handle API errors', async () => {
|
||||||
const mockError = new Error('LM Studio API error');
|
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
||||||
const mockCreate = jest.fn().mockRejectedValue(mockError);
|
|
||||||
(handler['client'].chat.completions as any).create = mockCreate;
|
|
||||||
|
|
||||||
const stream = handler.createMessage(systemPrompt, mockMessages);
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
|
|
||||||
await expect(async () => {
|
await expect(async () => {
|
||||||
for await (const chunk of stream) {
|
for await (const chunk of stream) {
|
||||||
// Should throw before yielding any chunks
|
// Should not reach here
|
||||||
}
|
}
|
||||||
}).rejects.toThrow('Please check the LM Studio developer logs to debug what went wrong');
|
}).rejects.toThrow('Please check the LM Studio developer logs to debug what went wrong');
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('completePrompt', () => {
|
||||||
|
it('should complete prompt successfully', async () => {
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(mockCreate).toHaveBeenCalledWith({
|
||||||
|
model: mockOptions.lmStudioModelId,
|
||||||
|
messages: [{ role: 'user', content: 'Test prompt' }],
|
||||||
|
temperature: 0,
|
||||||
|
stream: false
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle API errors', async () => {
|
||||||
|
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
||||||
|
await expect(handler.completePrompt('Test prompt'))
|
||||||
|
.rejects.toThrow('Please check the LM Studio developer logs to debug what went wrong');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty response', async () => {
|
||||||
|
mockCreate.mockResolvedValueOnce({
|
||||||
|
choices: [{ message: { content: '' } }]
|
||||||
|
});
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('getModel', () => {
|
describe('getModel', () => {
|
||||||
it('should return model info with sane defaults', () => {
|
it('should return model info', () => {
|
||||||
const modelInfo = handler.getModel();
|
const modelInfo = handler.getModel();
|
||||||
expect(modelInfo.id).toBe('mistral-7b');
|
expect(modelInfo.id).toBe(mockOptions.lmStudioModelId);
|
||||||
expect(modelInfo.info).toBeDefined();
|
expect(modelInfo.info).toBeDefined();
|
||||||
expect(modelInfo.info.maxTokens).toBe(-1);
|
expect(modelInfo.info.maxTokens).toBe(-1);
|
||||||
expect(modelInfo.info.contextWindow).toBe(128_000);
|
expect(modelInfo.info.contextWindow).toBe(128_000);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return empty string as model ID if not provided', () => {
|
|
||||||
const noModelHandler = new LmStudioHandler({});
|
|
||||||
const modelInfo = noModelHandler.getModel();
|
|
||||||
expect(modelInfo.id).toBe('');
|
|
||||||
expect(modelInfo.info).toBeDefined();
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -1,148 +1,160 @@
|
|||||||
import { OllamaHandler } from '../ollama';
|
import { OllamaHandler } from '../ollama';
|
||||||
import { Anthropic } from '@anthropic-ai/sdk';
|
import { ApiHandlerOptions } from '../../../shared/api';
|
||||||
import OpenAI from 'openai';
|
import OpenAI from 'openai';
|
||||||
|
import { Anthropic } from '@anthropic-ai/sdk';
|
||||||
|
|
||||||
// Mock OpenAI SDK
|
// Mock OpenAI client
|
||||||
jest.mock('openai', () => ({
|
const mockCreate = jest.fn();
|
||||||
__esModule: true,
|
jest.mock('openai', () => {
|
||||||
default: jest.fn().mockImplementation(() => ({
|
return {
|
||||||
chat: {
|
__esModule: true,
|
||||||
completions: {
|
default: jest.fn().mockImplementation(() => ({
|
||||||
create: jest.fn()
|
chat: {
|
||||||
|
completions: {
|
||||||
|
create: mockCreate.mockImplementation(async (options) => {
|
||||||
|
if (!options.stream) {
|
||||||
|
return {
|
||||||
|
id: 'test-completion',
|
||||||
|
choices: [{
|
||||||
|
message: { role: 'assistant', content: 'Test response' },
|
||||||
|
finish_reason: 'stop',
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 5,
|
||||||
|
total_tokens: 15
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
[Symbol.asyncIterator]: async function* () {
|
||||||
|
yield {
|
||||||
|
choices: [{
|
||||||
|
delta: { content: 'Test response' },
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: null
|
||||||
|
};
|
||||||
|
yield {
|
||||||
|
choices: [{
|
||||||
|
delta: {},
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 5,
|
||||||
|
total_tokens: 15
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}))
|
||||||
}))
|
};
|
||||||
}));
|
});
|
||||||
|
|
||||||
describe('OllamaHandler', () => {
|
describe('OllamaHandler', () => {
|
||||||
let handler: OllamaHandler;
|
let handler: OllamaHandler;
|
||||||
|
let mockOptions: ApiHandlerOptions;
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
handler = new OllamaHandler({
|
mockOptions = {
|
||||||
|
apiModelId: 'llama2',
|
||||||
ollamaModelId: 'llama2',
|
ollamaModelId: 'llama2',
|
||||||
ollamaBaseUrl: 'http://localhost:11434'
|
ollamaBaseUrl: 'http://localhost:11434/v1'
|
||||||
});
|
};
|
||||||
|
handler = new OllamaHandler(mockOptions);
|
||||||
|
mockCreate.mockClear();
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('constructor', () => {
|
describe('constructor', () => {
|
||||||
it('should initialize with provided config', () => {
|
it('should initialize with provided options', () => {
|
||||||
expect(OpenAI).toHaveBeenCalledWith({
|
expect(handler).toBeInstanceOf(OllamaHandler);
|
||||||
baseURL: 'http://localhost:11434/v1',
|
expect(handler.getModel().id).toBe(mockOptions.ollamaModelId);
|
||||||
apiKey: 'ollama'
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should use default base URL if not provided', () => {
|
it('should use default base URL if not provided', () => {
|
||||||
const defaultHandler = new OllamaHandler({
|
const handlerWithoutUrl = new OllamaHandler({
|
||||||
|
apiModelId: 'llama2',
|
||||||
ollamaModelId: 'llama2'
|
ollamaModelId: 'llama2'
|
||||||
});
|
});
|
||||||
|
expect(handlerWithoutUrl).toBeInstanceOf(OllamaHandler);
|
||||||
expect(OpenAI).toHaveBeenCalledWith({
|
|
||||||
baseURL: 'http://localhost:11434/v1',
|
|
||||||
apiKey: 'ollama'
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('createMessage', () => {
|
describe('createMessage', () => {
|
||||||
const mockMessages: Anthropic.Messages.MessageParam[] = [
|
const systemPrompt = 'You are a helpful assistant.';
|
||||||
|
const messages: Anthropic.Messages.MessageParam[] = [
|
||||||
{
|
{
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: 'Hello'
|
content: 'Hello!'
|
||||||
},
|
|
||||||
{
|
|
||||||
role: 'assistant',
|
|
||||||
content: 'Hi there!'
|
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
const systemPrompt = 'You are a helpful assistant';
|
it('should handle streaming responses', async () => {
|
||||||
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
it('should handle streaming responses correctly', async () => {
|
const chunks: any[] = [];
|
||||||
const mockStream = [
|
|
||||||
{
|
|
||||||
choices: [{
|
|
||||||
delta: { content: 'Hello' }
|
|
||||||
}]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
choices: [{
|
|
||||||
delta: { content: ' world!' }
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
// Setup async iterator for mock stream
|
|
||||||
const asyncIterator = {
|
|
||||||
async *[Symbol.asyncIterator]() {
|
|
||||||
for (const chunk of mockStream) {
|
|
||||||
yield chunk;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const mockCreate = jest.fn().mockResolvedValue(asyncIterator);
|
|
||||||
(handler['client'].chat.completions as any).create = mockCreate;
|
|
||||||
|
|
||||||
const stream = handler.createMessage(systemPrompt, mockMessages);
|
|
||||||
const chunks = [];
|
|
||||||
|
|
||||||
for await (const chunk of stream) {
|
for await (const chunk of stream) {
|
||||||
chunks.push(chunk);
|
chunks.push(chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
expect(chunks.length).toBe(2);
|
expect(chunks.length).toBeGreaterThan(0);
|
||||||
expect(chunks[0]).toEqual({
|
const textChunks = chunks.filter(chunk => chunk.type === 'text');
|
||||||
type: 'text',
|
expect(textChunks).toHaveLength(1);
|
||||||
text: 'Hello'
|
expect(textChunks[0].text).toBe('Test response');
|
||||||
});
|
});
|
||||||
expect(chunks[1]).toEqual({
|
|
||||||
type: 'text',
|
|
||||||
text: ' world!'
|
|
||||||
});
|
|
||||||
|
|
||||||
|
it('should handle API errors', async () => {
|
||||||
|
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
||||||
|
|
||||||
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
|
|
||||||
|
await expect(async () => {
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
// Should not reach here
|
||||||
|
}
|
||||||
|
}).rejects.toThrow('API Error');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('completePrompt', () => {
|
||||||
|
it('should complete prompt successfully', async () => {
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
expect(mockCreate).toHaveBeenCalledWith({
|
expect(mockCreate).toHaveBeenCalledWith({
|
||||||
model: 'llama2',
|
model: mockOptions.ollamaModelId,
|
||||||
messages: expect.arrayContaining([
|
messages: [{ role: 'user', content: 'Test prompt' }],
|
||||||
{
|
|
||||||
role: 'system',
|
|
||||||
content: systemPrompt
|
|
||||||
}
|
|
||||||
]),
|
|
||||||
temperature: 0,
|
temperature: 0,
|
||||||
stream: true
|
stream: false
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle API errors', async () => {
|
it('should handle API errors', async () => {
|
||||||
const mockError = new Error('Ollama API error');
|
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
||||||
const mockCreate = jest.fn().mockRejectedValue(mockError);
|
await expect(handler.completePrompt('Test prompt'))
|
||||||
(handler['client'].chat.completions as any).create = mockCreate;
|
.rejects.toThrow('Ollama completion error: API Error');
|
||||||
|
});
|
||||||
|
|
||||||
const stream = handler.createMessage(systemPrompt, mockMessages);
|
it('should handle empty response', async () => {
|
||||||
|
mockCreate.mockResolvedValueOnce({
|
||||||
await expect(async () => {
|
choices: [{ message: { content: '' } }]
|
||||||
for await (const chunk of stream) {
|
});
|
||||||
// Should throw before yielding any chunks
|
const result = await handler.completePrompt('Test prompt');
|
||||||
}
|
expect(result).toBe('');
|
||||||
}).rejects.toThrow('Ollama API error');
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('getModel', () => {
|
describe('getModel', () => {
|
||||||
it('should return model info with sane defaults', () => {
|
it('should return model info', () => {
|
||||||
const modelInfo = handler.getModel();
|
const modelInfo = handler.getModel();
|
||||||
expect(modelInfo.id).toBe('llama2');
|
expect(modelInfo.id).toBe(mockOptions.ollamaModelId);
|
||||||
expect(modelInfo.info).toBeDefined();
|
expect(modelInfo.info).toBeDefined();
|
||||||
expect(modelInfo.info.maxTokens).toBe(-1);
|
expect(modelInfo.info.maxTokens).toBe(-1);
|
||||||
expect(modelInfo.info.contextWindow).toBe(128_000);
|
expect(modelInfo.info.contextWindow).toBe(128_000);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return empty string as model ID if not provided', () => {
|
|
||||||
const noModelHandler = new OllamaHandler({});
|
|
||||||
const modelInfo = noModelHandler.getModel();
|
|
||||||
expect(modelInfo.id).toBe('');
|
|
||||||
expect(modelInfo.info).toBeDefined();
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -1,230 +1,209 @@
|
|||||||
import { OpenAiNativeHandler } from "../openai-native"
|
import { OpenAiNativeHandler } from '../openai-native';
|
||||||
import OpenAI from "openai"
|
import { ApiHandlerOptions } from '../../../shared/api';
|
||||||
import { ApiHandlerOptions, openAiNativeDefaultModelId } from "../../../shared/api"
|
import OpenAI from 'openai';
|
||||||
import { Anthropic } from "@anthropic-ai/sdk"
|
import { Anthropic } from '@anthropic-ai/sdk';
|
||||||
|
|
||||||
// Mock OpenAI
|
// Mock OpenAI client
|
||||||
jest.mock("openai")
|
const mockCreate = jest.fn();
|
||||||
|
jest.mock('openai', () => {
|
||||||
describe("OpenAiNativeHandler", () => {
|
return {
|
||||||
let handler: OpenAiNativeHandler
|
__esModule: true,
|
||||||
let mockOptions: ApiHandlerOptions
|
default: jest.fn().mockImplementation(() => ({
|
||||||
let mockOpenAIClient: jest.Mocked<OpenAI>
|
|
||||||
let mockCreate: jest.Mock
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
// Reset mocks
|
|
||||||
jest.clearAllMocks()
|
|
||||||
|
|
||||||
// Setup mock options
|
|
||||||
mockOptions = {
|
|
||||||
openAiNativeApiKey: "test-api-key",
|
|
||||||
apiModelId: "gpt-4o", // Use the correct model ID from shared/api.ts
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup mock create function
|
|
||||||
mockCreate = jest.fn()
|
|
||||||
|
|
||||||
// Setup mock OpenAI client
|
|
||||||
mockOpenAIClient = {
|
|
||||||
chat: {
|
chat: {
|
||||||
completions: {
|
completions: {
|
||||||
create: mockCreate,
|
create: mockCreate.mockImplementation(async (options) => {
|
||||||
},
|
if (!options.stream) {
|
||||||
},
|
return {
|
||||||
} as unknown as jest.Mocked<OpenAI>
|
id: 'test-completion',
|
||||||
|
choices: [{
|
||||||
|
message: { role: 'assistant', content: 'Test response' },
|
||||||
|
finish_reason: 'stop',
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 5,
|
||||||
|
total_tokens: 15
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Mock OpenAI constructor
|
return {
|
||||||
;(OpenAI as jest.MockedClass<typeof OpenAI>).mockImplementation(() => mockOpenAIClient)
|
[Symbol.asyncIterator]: async function* () {
|
||||||
|
yield {
|
||||||
|
choices: [{
|
||||||
|
delta: { content: 'Test response' },
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: null
|
||||||
|
};
|
||||||
|
yield {
|
||||||
|
choices: [{
|
||||||
|
delta: {},
|
||||||
|
index: 0
|
||||||
|
}],
|
||||||
|
usage: {
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 5,
|
||||||
|
total_tokens: 15
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
// Create handler instance
|
describe('OpenAiNativeHandler', () => {
|
||||||
handler = new OpenAiNativeHandler(mockOptions)
|
let handler: OpenAiNativeHandler;
|
||||||
})
|
let mockOptions: ApiHandlerOptions;
|
||||||
|
|
||||||
describe("constructor", () => {
|
beforeEach(() => {
|
||||||
it("should initialize with provided options", () => {
|
mockOptions = {
|
||||||
expect(OpenAI).toHaveBeenCalledWith({
|
apiModelId: 'gpt-4o',
|
||||||
apiKey: mockOptions.openAiNativeApiKey,
|
openAiNativeApiKey: 'test-api-key'
|
||||||
})
|
};
|
||||||
})
|
handler = new OpenAiNativeHandler(mockOptions);
|
||||||
})
|
mockCreate.mockClear();
|
||||||
|
});
|
||||||
|
|
||||||
describe("getModel", () => {
|
describe('constructor', () => {
|
||||||
it("should return specified model when valid", () => {
|
it('should initialize with provided options', () => {
|
||||||
const result = handler.getModel()
|
expect(handler).toBeInstanceOf(OpenAiNativeHandler);
|
||||||
expect(result.id).toBe("gpt-4o") // Use the correct model ID
|
expect(handler.getModel().id).toBe(mockOptions.apiModelId);
|
||||||
})
|
});
|
||||||
|
|
||||||
it("should return default model when model ID is invalid", () => {
|
it('should initialize with empty API key', () => {
|
||||||
handler = new OpenAiNativeHandler({
|
const handlerWithoutKey = new OpenAiNativeHandler({
|
||||||
...mockOptions,
|
apiModelId: 'gpt-4o',
|
||||||
apiModelId: "invalid-model" as any,
|
openAiNativeApiKey: ''
|
||||||
})
|
});
|
||||||
const result = handler.getModel()
|
expect(handlerWithoutKey).toBeInstanceOf(OpenAiNativeHandler);
|
||||||
expect(result.id).toBe(openAiNativeDefaultModelId)
|
});
|
||||||
})
|
});
|
||||||
|
|
||||||
it("should return default model when model ID is not provided", () => {
|
describe('createMessage', () => {
|
||||||
handler = new OpenAiNativeHandler({
|
const systemPrompt = 'You are a helpful assistant.';
|
||||||
...mockOptions,
|
|
||||||
apiModelId: undefined,
|
|
||||||
})
|
|
||||||
const result = handler.getModel()
|
|
||||||
expect(result.id).toBe(openAiNativeDefaultModelId)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe("createMessage", () => {
|
|
||||||
const systemPrompt = "You are a helpful assistant"
|
|
||||||
const messages: Anthropic.Messages.MessageParam[] = [
|
const messages: Anthropic.Messages.MessageParam[] = [
|
||||||
{ role: "user", content: "Hello" },
|
{
|
||||||
]
|
role: 'user',
|
||||||
|
content: 'Hello!'
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
describe("o1 models", () => {
|
it('should handle streaming responses', async () => {
|
||||||
beforeEach(() => {
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
handler = new OpenAiNativeHandler({
|
const chunks: any[] = [];
|
||||||
...mockOptions,
|
for await (const chunk of stream) {
|
||||||
apiModelId: "o1-preview",
|
chunks.push(chunk);
|
||||||
})
|
}
|
||||||
})
|
|
||||||
|
|
||||||
it("should handle non-streaming response for o1 models", async () => {
|
expect(chunks.length).toBeGreaterThan(0);
|
||||||
const mockResponse = {
|
const textChunks = chunks.filter(chunk => chunk.type === 'text');
|
||||||
choices: [{ message: { content: "Hello there!" } }],
|
expect(textChunks).toHaveLength(1);
|
||||||
usage: {
|
expect(textChunks[0].text).toBe('Test response');
|
||||||
prompt_tokens: 10,
|
});
|
||||||
completion_tokens: 5,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
mockCreate.mockResolvedValueOnce(mockResponse)
|
it('should handle API errors', async () => {
|
||||||
|
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
||||||
|
|
||||||
const generator = handler.createMessage(systemPrompt, messages)
|
const stream = handler.createMessage(systemPrompt, messages);
|
||||||
const results = []
|
|
||||||
for await (const result of generator) {
|
|
||||||
results.push(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
expect(results).toEqual([
|
|
||||||
{ type: "text", text: "Hello there!" },
|
|
||||||
{ type: "usage", inputTokens: 10, outputTokens: 5 },
|
|
||||||
])
|
|
||||||
|
|
||||||
expect(mockCreate).toHaveBeenCalledWith({
|
|
||||||
model: "o1-preview",
|
|
||||||
messages: [
|
|
||||||
{ role: "user", content: systemPrompt },
|
|
||||||
{ role: "user", content: "Hello" },
|
|
||||||
],
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
it("should handle missing content in response", async () => {
|
|
||||||
const mockResponse = {
|
|
||||||
choices: [{ message: { content: null } }],
|
|
||||||
usage: null,
|
|
||||||
}
|
|
||||||
|
|
||||||
mockCreate.mockResolvedValueOnce(mockResponse)
|
|
||||||
|
|
||||||
const generator = handler.createMessage(systemPrompt, messages)
|
|
||||||
const results = []
|
|
||||||
for await (const result of generator) {
|
|
||||||
results.push(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
expect(results).toEqual([
|
|
||||||
{ type: "text", text: "" },
|
|
||||||
{ type: "usage", inputTokens: 0, outputTokens: 0 },
|
|
||||||
])
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe("streaming models", () => {
|
|
||||||
beforeEach(() => {
|
|
||||||
handler = new OpenAiNativeHandler({
|
|
||||||
...mockOptions,
|
|
||||||
apiModelId: "gpt-4o",
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
it("should handle streaming response", async () => {
|
|
||||||
const mockStream = [
|
|
||||||
{ choices: [{ delta: { content: "Hello" } }], usage: null },
|
|
||||||
{ choices: [{ delta: { content: " there" } }], usage: null },
|
|
||||||
{ choices: [{ delta: { content: "!" } }], usage: { prompt_tokens: 10, completion_tokens: 5 } },
|
|
||||||
]
|
|
||||||
|
|
||||||
mockCreate.mockResolvedValueOnce(
|
|
||||||
(async function* () {
|
|
||||||
for (const chunk of mockStream) {
|
|
||||||
yield chunk
|
|
||||||
}
|
|
||||||
})()
|
|
||||||
)
|
|
||||||
|
|
||||||
const generator = handler.createMessage(systemPrompt, messages)
|
|
||||||
const results = []
|
|
||||||
for await (const result of generator) {
|
|
||||||
results.push(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
expect(results).toEqual([
|
|
||||||
{ type: "text", text: "Hello" },
|
|
||||||
{ type: "text", text: " there" },
|
|
||||||
{ type: "text", text: "!" },
|
|
||||||
{ type: "usage", inputTokens: 10, outputTokens: 5 },
|
|
||||||
])
|
|
||||||
|
|
||||||
expect(mockCreate).toHaveBeenCalledWith({
|
|
||||||
model: "gpt-4o",
|
|
||||||
temperature: 0,
|
|
||||||
messages: [
|
|
||||||
{ role: "system", content: systemPrompt },
|
|
||||||
{ role: "user", content: "Hello" },
|
|
||||||
],
|
|
||||||
stream: true,
|
|
||||||
stream_options: { include_usage: true },
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
it("should handle empty delta content", async () => {
|
|
||||||
const mockStream = [
|
|
||||||
{ choices: [{ delta: {} }], usage: null },
|
|
||||||
{ choices: [{ delta: { content: null } }], usage: null },
|
|
||||||
{ choices: [{ delta: { content: "Hello" } }], usage: { prompt_tokens: 10, completion_tokens: 5 } },
|
|
||||||
]
|
|
||||||
|
|
||||||
mockCreate.mockResolvedValueOnce(
|
|
||||||
(async function* () {
|
|
||||||
for (const chunk of mockStream) {
|
|
||||||
yield chunk
|
|
||||||
}
|
|
||||||
})()
|
|
||||||
)
|
|
||||||
|
|
||||||
const generator = handler.createMessage(systemPrompt, messages)
|
|
||||||
const results = []
|
|
||||||
for await (const result of generator) {
|
|
||||||
results.push(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
expect(results).toEqual([
|
|
||||||
{ type: "text", text: "Hello" },
|
|
||||||
{ type: "usage", inputTokens: 10, outputTokens: 5 },
|
|
||||||
])
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
it("should handle API errors", async () => {
|
|
||||||
mockCreate.mockRejectedValueOnce(new Error("API Error"))
|
|
||||||
|
|
||||||
const generator = handler.createMessage(systemPrompt, messages)
|
|
||||||
await expect(async () => {
|
await expect(async () => {
|
||||||
for await (const _ of generator) {
|
for await (const chunk of stream) {
|
||||||
// consume generator
|
// Should not reach here
|
||||||
}
|
}
|
||||||
}).rejects.toThrow("API Error")
|
}).rejects.toThrow('API Error');
|
||||||
})
|
});
|
||||||
})
|
});
|
||||||
})
|
|
||||||
|
describe('completePrompt', () => {
|
||||||
|
it('should complete prompt successfully with gpt-4o model', async () => {
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(mockCreate).toHaveBeenCalledWith({
|
||||||
|
model: 'gpt-4o',
|
||||||
|
messages: [{ role: 'user', content: 'Test prompt' }],
|
||||||
|
temperature: 0
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should complete prompt successfully with o1 model', async () => {
|
||||||
|
handler = new OpenAiNativeHandler({
|
||||||
|
apiModelId: 'o1',
|
||||||
|
openAiNativeApiKey: 'test-api-key'
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(mockCreate).toHaveBeenCalledWith({
|
||||||
|
model: 'o1',
|
||||||
|
messages: [{ role: 'user', content: 'Test prompt' }]
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should complete prompt successfully with o1-preview model', async () => {
|
||||||
|
handler = new OpenAiNativeHandler({
|
||||||
|
apiModelId: 'o1-preview',
|
||||||
|
openAiNativeApiKey: 'test-api-key'
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(mockCreate).toHaveBeenCalledWith({
|
||||||
|
model: 'o1-preview',
|
||||||
|
messages: [{ role: 'user', content: 'Test prompt' }]
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should complete prompt successfully with o1-mini model', async () => {
|
||||||
|
handler = new OpenAiNativeHandler({
|
||||||
|
apiModelId: 'o1-mini',
|
||||||
|
openAiNativeApiKey: 'test-api-key'
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(mockCreate).toHaveBeenCalledWith({
|
||||||
|
model: 'o1-mini',
|
||||||
|
messages: [{ role: 'user', content: 'Test prompt' }]
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle API errors', async () => {
|
||||||
|
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
||||||
|
await expect(handler.completePrompt('Test prompt'))
|
||||||
|
.rejects.toThrow('OpenAI Native completion error: API Error');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty response', async () => {
|
||||||
|
mockCreate.mockResolvedValueOnce({
|
||||||
|
choices: [{ message: { content: '' } }]
|
||||||
|
});
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getModel', () => {
|
||||||
|
it('should return model info', () => {
|
||||||
|
const modelInfo = handler.getModel();
|
||||||
|
expect(modelInfo.id).toBe(mockOptions.apiModelId);
|
||||||
|
expect(modelInfo.info).toBeDefined();
|
||||||
|
expect(modelInfo.info.maxTokens).toBe(4096);
|
||||||
|
expect(modelInfo.info.contextWindow).toBe(128_000);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle undefined model ID', () => {
|
||||||
|
const handlerWithoutModel = new OpenAiNativeHandler({
|
||||||
|
openAiNativeApiKey: 'test-api-key'
|
||||||
|
});
|
||||||
|
const modelInfo = handlerWithoutModel.getModel();
|
||||||
|
expect(modelInfo.id).toBe('gpt-4o'); // Default model
|
||||||
|
expect(modelInfo.info).toBeDefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -176,6 +176,32 @@ describe('OpenAiHandler', () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('completePrompt', () => {
|
||||||
|
it('should complete prompt successfully', async () => {
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(mockCreate).toHaveBeenCalledWith({
|
||||||
|
model: mockOptions.openAiModelId,
|
||||||
|
messages: [{ role: 'user', content: 'Test prompt' }],
|
||||||
|
temperature: 0
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle API errors', async () => {
|
||||||
|
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
||||||
|
await expect(handler.completePrompt('Test prompt'))
|
||||||
|
.rejects.toThrow('OpenAI completion error: API Error');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty response', async () => {
|
||||||
|
mockCreate.mockImplementationOnce(() => ({
|
||||||
|
choices: [{ message: { content: '' } }]
|
||||||
|
}));
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('getModel', () => {
|
describe('getModel', () => {
|
||||||
it('should return model info with sane defaults', () => {
|
it('should return model info with sane defaults', () => {
|
||||||
const model = handler.getModel();
|
const model = handler.getModel();
|
||||||
|
|||||||
@@ -6,7 +6,42 @@ import { AnthropicVertex } from '@anthropic-ai/vertex-sdk';
|
|||||||
jest.mock('@anthropic-ai/vertex-sdk', () => ({
|
jest.mock('@anthropic-ai/vertex-sdk', () => ({
|
||||||
AnthropicVertex: jest.fn().mockImplementation(() => ({
|
AnthropicVertex: jest.fn().mockImplementation(() => ({
|
||||||
messages: {
|
messages: {
|
||||||
create: jest.fn()
|
create: jest.fn().mockImplementation(async (options) => {
|
||||||
|
if (!options.stream) {
|
||||||
|
return {
|
||||||
|
id: 'test-completion',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Test response' }
|
||||||
|
],
|
||||||
|
role: 'assistant',
|
||||||
|
model: options.model,
|
||||||
|
usage: {
|
||||||
|
input_tokens: 10,
|
||||||
|
output_tokens: 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
async *[Symbol.asyncIterator]() {
|
||||||
|
yield {
|
||||||
|
type: 'message_start',
|
||||||
|
message: {
|
||||||
|
usage: {
|
||||||
|
input_tokens: 10,
|
||||||
|
output_tokens: 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
yield {
|
||||||
|
type: 'content_block_start',
|
||||||
|
content_block: {
|
||||||
|
type: 'text',
|
||||||
|
text: 'Test response'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}))
|
}))
|
||||||
}));
|
}));
|
||||||
@@ -196,6 +231,49 @@ describe('VertexHandler', () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('completePrompt', () => {
|
||||||
|
it('should complete prompt successfully', async () => {
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('Test response');
|
||||||
|
expect(handler['client'].messages.create).toHaveBeenCalledWith({
|
||||||
|
model: 'claude-3-5-sonnet-v2@20241022',
|
||||||
|
max_tokens: 8192,
|
||||||
|
temperature: 0,
|
||||||
|
messages: [{ role: 'user', content: 'Test prompt' }],
|
||||||
|
stream: false
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle API errors', async () => {
|
||||||
|
const mockError = new Error('Vertex API error');
|
||||||
|
const mockCreate = jest.fn().mockRejectedValue(mockError);
|
||||||
|
(handler['client'].messages as any).create = mockCreate;
|
||||||
|
|
||||||
|
await expect(handler.completePrompt('Test prompt'))
|
||||||
|
.rejects.toThrow('Vertex completion error: Vertex API error');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle non-text content', async () => {
|
||||||
|
const mockCreate = jest.fn().mockResolvedValue({
|
||||||
|
content: [{ type: 'image' }]
|
||||||
|
});
|
||||||
|
(handler['client'].messages as any).create = mockCreate;
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty response', async () => {
|
||||||
|
const mockCreate = jest.fn().mockResolvedValue({
|
||||||
|
content: [{ type: 'text', text: '' }]
|
||||||
|
});
|
||||||
|
(handler['client'].messages as any).create = mockCreate;
|
||||||
|
|
||||||
|
const result = await handler.completePrompt('Test prompt');
|
||||||
|
expect(result).toBe('');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('getModel', () => {
|
describe('getModel', () => {
|
||||||
it('should return correct model info', () => {
|
it('should return correct model info', () => {
|
||||||
const modelInfo = handler.getModel();
|
const modelInfo = handler.getModel();
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ import {
|
|||||||
ApiHandlerOptions,
|
ApiHandlerOptions,
|
||||||
ModelInfo,
|
ModelInfo,
|
||||||
} from "../../shared/api"
|
} from "../../shared/api"
|
||||||
import { ApiHandler } from "../index"
|
import { ApiHandler, SingleCompletionHandler } from "../index"
|
||||||
import { ApiStream } from "../transform/stream"
|
import { ApiStream } from "../transform/stream"
|
||||||
|
|
||||||
export class AnthropicHandler implements ApiHandler {
|
export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
|
||||||
private options: ApiHandlerOptions
|
private options: ApiHandlerOptions
|
||||||
private client: Anthropic
|
private client: Anthropic
|
||||||
|
|
||||||
@@ -173,4 +173,27 @@ export class AnthropicHandler implements ApiHandler {
|
|||||||
}
|
}
|
||||||
return { id: anthropicDefaultModelId, info: anthropicModels[anthropicDefaultModelId] }
|
return { id: anthropicDefaultModelId, info: anthropicModels[anthropicDefaultModelId] }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async completePrompt(prompt: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
const response = await this.client.messages.create({
|
||||||
|
model: this.getModel().id,
|
||||||
|
max_tokens: this.getModel().info.maxTokens || 8192,
|
||||||
|
temperature: 0,
|
||||||
|
messages: [{ role: "user", content: prompt }],
|
||||||
|
stream: false
|
||||||
|
})
|
||||||
|
|
||||||
|
const content = response.content[0]
|
||||||
|
if (content.type === 'text') {
|
||||||
|
return content.text
|
||||||
|
}
|
||||||
|
return ''
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`Anthropic completion error: ${error.message}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import { BedrockRuntimeClient, ConverseStreamCommand, BedrockRuntimeClientConfig } from "@aws-sdk/client-bedrock-runtime"
|
import { BedrockRuntimeClient, ConverseStreamCommand, ConverseCommand, BedrockRuntimeClientConfig } from "@aws-sdk/client-bedrock-runtime"
|
||||||
import { Anthropic } from "@anthropic-ai/sdk"
|
import { Anthropic } from "@anthropic-ai/sdk"
|
||||||
import { ApiHandler } from "../"
|
import { ApiHandler, SingleCompletionHandler } from "../"
|
||||||
import { ApiHandlerOptions, BedrockModelId, ModelInfo, bedrockDefaultModelId, bedrockModels } from "../../shared/api"
|
import { ApiHandlerOptions, BedrockModelId, ModelInfo, bedrockDefaultModelId, bedrockModels } from "../../shared/api"
|
||||||
import { ApiStream } from "../transform/stream"
|
import { ApiStream } from "../transform/stream"
|
||||||
import { convertToBedrockConverseMessages, convertToAnthropicMessage } from "../transform/bedrock-converse-format"
|
import { convertToBedrockConverseMessages, convertToAnthropicMessage } from "../transform/bedrock-converse-format"
|
||||||
@@ -38,7 +38,7 @@ export interface StreamEvent {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
export class AwsBedrockHandler implements ApiHandler {
|
export class AwsBedrockHandler implements ApiHandler, SingleCompletionHandler {
|
||||||
private options: ApiHandlerOptions
|
private options: ApiHandlerOptions
|
||||||
private client: BedrockRuntimeClient
|
private client: BedrockRuntimeClient
|
||||||
|
|
||||||
@@ -219,4 +219,63 @@ export class AwsBedrockHandler implements ApiHandler {
|
|||||||
info: bedrockModels[bedrockDefaultModelId]
|
info: bedrockModels[bedrockDefaultModelId]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async completePrompt(prompt: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
const modelConfig = this.getModel()
|
||||||
|
|
||||||
|
// Handle cross-region inference
|
||||||
|
let modelId: string
|
||||||
|
if (this.options.awsUseCrossRegionInference) {
|
||||||
|
let regionPrefix = (this.options.awsRegion || "").slice(0, 3)
|
||||||
|
switch (regionPrefix) {
|
||||||
|
case "us-":
|
||||||
|
modelId = `us.${modelConfig.id}`
|
||||||
|
break
|
||||||
|
case "eu-":
|
||||||
|
modelId = `eu.${modelConfig.id}`
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
modelId = modelConfig.id
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
modelId = modelConfig.id
|
||||||
|
}
|
||||||
|
|
||||||
|
const payload = {
|
||||||
|
modelId,
|
||||||
|
messages: convertToBedrockConverseMessages([{
|
||||||
|
role: "user",
|
||||||
|
content: prompt
|
||||||
|
}]),
|
||||||
|
inferenceConfig: {
|
||||||
|
maxTokens: modelConfig.info.maxTokens || 5000,
|
||||||
|
temperature: 0.3,
|
||||||
|
topP: 0.1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const command = new ConverseCommand(payload)
|
||||||
|
const response = await this.client.send(command)
|
||||||
|
|
||||||
|
if (response.output && response.output instanceof Uint8Array) {
|
||||||
|
try {
|
||||||
|
const outputStr = new TextDecoder().decode(response.output)
|
||||||
|
const output = JSON.parse(outputStr)
|
||||||
|
if (output.content) {
|
||||||
|
return output.content
|
||||||
|
}
|
||||||
|
} catch (parseError) {
|
||||||
|
console.error('Failed to parse Bedrock response:', parseError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ''
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`Bedrock completion error: ${error.message}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
import { Anthropic } from "@anthropic-ai/sdk"
|
import { Anthropic } from "@anthropic-ai/sdk"
|
||||||
import { GoogleGenerativeAI } from "@google/generative-ai"
|
import { GoogleGenerativeAI } from "@google/generative-ai"
|
||||||
import { ApiHandler } from "../"
|
import { ApiHandler, SingleCompletionHandler } from "../"
|
||||||
import { ApiHandlerOptions, geminiDefaultModelId, GeminiModelId, geminiModels, ModelInfo } from "../../shared/api"
|
import { ApiHandlerOptions, geminiDefaultModelId, GeminiModelId, geminiModels, ModelInfo } from "../../shared/api"
|
||||||
import { convertAnthropicMessageToGemini } from "../transform/gemini-format"
|
import { convertAnthropicMessageToGemini } from "../transform/gemini-format"
|
||||||
import { ApiStream } from "../transform/stream"
|
import { ApiStream } from "../transform/stream"
|
||||||
|
|
||||||
export class GeminiHandler implements ApiHandler {
|
export class GeminiHandler implements ApiHandler, SingleCompletionHandler {
|
||||||
private options: ApiHandlerOptions
|
private options: ApiHandlerOptions
|
||||||
private client: GoogleGenerativeAI
|
private client: GoogleGenerativeAI
|
||||||
|
|
||||||
@@ -53,4 +53,26 @@ export class GeminiHandler implements ApiHandler {
|
|||||||
}
|
}
|
||||||
return { id: geminiDefaultModelId, info: geminiModels[geminiDefaultModelId] }
|
return { id: geminiDefaultModelId, info: geminiModels[geminiDefaultModelId] }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async completePrompt(prompt: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
const model = this.client.getGenerativeModel({
|
||||||
|
model: this.getModel().id,
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = await model.generateContent({
|
||||||
|
contents: [{ role: "user", parts: [{ text: prompt }] }],
|
||||||
|
generationConfig: {
|
||||||
|
temperature: 0,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
return result.response.text()
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`Gemini completion error: ${error.message}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
import { Anthropic } from "@anthropic-ai/sdk"
|
import { Anthropic } from "@anthropic-ai/sdk"
|
||||||
import axios from "axios"
|
import axios from "axios"
|
||||||
import OpenAI from "openai"
|
import OpenAI from "openai"
|
||||||
import { ApiHandler } from "../"
|
import { ApiHandler, SingleCompletionHandler } from "../"
|
||||||
import { ApiHandlerOptions, ModelInfo, glamaDefaultModelId, glamaDefaultModelInfo } from "../../shared/api"
|
import { ApiHandlerOptions, ModelInfo, glamaDefaultModelId, glamaDefaultModelInfo } from "../../shared/api"
|
||||||
import { convertToOpenAiMessages } from "../transform/openai-format"
|
import { convertToOpenAiMessages } from "../transform/openai-format"
|
||||||
import { ApiStream } from "../transform/stream"
|
import { ApiStream } from "../transform/stream"
|
||||||
import delay from "delay"
|
import delay from "delay"
|
||||||
|
|
||||||
export class GlamaHandler implements ApiHandler {
|
export class GlamaHandler implements ApiHandler, SingleCompletionHandler {
|
||||||
private options: ApiHandlerOptions
|
private options: ApiHandlerOptions
|
||||||
private client: OpenAI
|
private client: OpenAI
|
||||||
|
|
||||||
@@ -129,4 +129,26 @@ export class GlamaHandler implements ApiHandler {
|
|||||||
|
|
||||||
return { id: glamaDefaultModelId, info: glamaDefaultModelInfo }
|
return { id: glamaDefaultModelId, info: glamaDefaultModelInfo }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async completePrompt(prompt: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
|
||||||
|
model: this.getModel().id,
|
||||||
|
messages: [{ role: "user", content: prompt }],
|
||||||
|
temperature: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.getModel().id.startsWith("anthropic/")) {
|
||||||
|
requestOptions.max_tokens = 8192
|
||||||
|
}
|
||||||
|
|
||||||
|
const response = await this.client.chat.completions.create(requestOptions)
|
||||||
|
return response.choices[0]?.message.content || ""
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`Glama completion error: ${error.message}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
import { Anthropic } from "@anthropic-ai/sdk"
|
import { Anthropic } from "@anthropic-ai/sdk"
|
||||||
import OpenAI from "openai"
|
import OpenAI from "openai"
|
||||||
import { ApiHandler } from "../"
|
import { ApiHandler, SingleCompletionHandler } from "../"
|
||||||
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
|
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
|
||||||
import { convertToOpenAiMessages } from "../transform/openai-format"
|
import { convertToOpenAiMessages } from "../transform/openai-format"
|
||||||
import { ApiStream } from "../transform/stream"
|
import { ApiStream } from "../transform/stream"
|
||||||
|
|
||||||
export class LmStudioHandler implements ApiHandler {
|
export class LmStudioHandler implements ApiHandler, SingleCompletionHandler {
|
||||||
private options: ApiHandlerOptions
|
private options: ApiHandlerOptions
|
||||||
private client: OpenAI
|
private client: OpenAI
|
||||||
|
|
||||||
@@ -53,4 +53,20 @@ export class LmStudioHandler implements ApiHandler {
|
|||||||
info: openAiModelInfoSaneDefaults,
|
info: openAiModelInfoSaneDefaults,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async completePrompt(prompt: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
const response = await this.client.chat.completions.create({
|
||||||
|
model: this.getModel().id,
|
||||||
|
messages: [{ role: "user", content: prompt }],
|
||||||
|
temperature: 0,
|
||||||
|
stream: false
|
||||||
|
})
|
||||||
|
return response.choices[0]?.message.content || ""
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(
|
||||||
|
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Cline's prompts.",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
import { Anthropic } from "@anthropic-ai/sdk"
|
import { Anthropic } from "@anthropic-ai/sdk"
|
||||||
import OpenAI from "openai"
|
import OpenAI from "openai"
|
||||||
import { ApiHandler } from "../"
|
import { ApiHandler, SingleCompletionHandler } from "../"
|
||||||
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
|
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
|
||||||
import { convertToOpenAiMessages } from "../transform/openai-format"
|
import { convertToOpenAiMessages } from "../transform/openai-format"
|
||||||
import { ApiStream } from "../transform/stream"
|
import { ApiStream } from "../transform/stream"
|
||||||
|
|
||||||
export class OllamaHandler implements ApiHandler {
|
export class OllamaHandler implements ApiHandler, SingleCompletionHandler {
|
||||||
private options: ApiHandlerOptions
|
private options: ApiHandlerOptions
|
||||||
private client: OpenAI
|
private client: OpenAI
|
||||||
|
|
||||||
@@ -46,4 +46,21 @@ export class OllamaHandler implements ApiHandler {
|
|||||||
info: openAiModelInfoSaneDefaults,
|
info: openAiModelInfoSaneDefaults,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async completePrompt(prompt: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
const response = await this.client.chat.completions.create({
|
||||||
|
model: this.getModel().id,
|
||||||
|
messages: [{ role: "user", content: prompt }],
|
||||||
|
temperature: 0,
|
||||||
|
stream: false
|
||||||
|
})
|
||||||
|
return response.choices[0]?.message.content || ""
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`Ollama completion error: ${error.message}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import { Anthropic } from "@anthropic-ai/sdk"
|
import { Anthropic } from "@anthropic-ai/sdk"
|
||||||
import OpenAI from "openai"
|
import OpenAI from "openai"
|
||||||
import { ApiHandler } from "../"
|
import { ApiHandler, SingleCompletionHandler } from "../"
|
||||||
import {
|
import {
|
||||||
ApiHandlerOptions,
|
ApiHandlerOptions,
|
||||||
ModelInfo,
|
ModelInfo,
|
||||||
@@ -11,7 +11,7 @@ import {
|
|||||||
import { convertToOpenAiMessages } from "../transform/openai-format"
|
import { convertToOpenAiMessages } from "../transform/openai-format"
|
||||||
import { ApiStream } from "../transform/stream"
|
import { ApiStream } from "../transform/stream"
|
||||||
|
|
||||||
export class OpenAiNativeHandler implements ApiHandler {
|
export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler {
|
||||||
private options: ApiHandlerOptions
|
private options: ApiHandlerOptions
|
||||||
private client: OpenAI
|
private client: OpenAI
|
||||||
|
|
||||||
@@ -83,4 +83,37 @@ export class OpenAiNativeHandler implements ApiHandler {
|
|||||||
}
|
}
|
||||||
return { id: openAiNativeDefaultModelId, info: openAiNativeModels[openAiNativeDefaultModelId] }
|
return { id: openAiNativeDefaultModelId, info: openAiNativeModels[openAiNativeDefaultModelId] }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async completePrompt(prompt: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
const modelId = this.getModel().id
|
||||||
|
let requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
|
||||||
|
|
||||||
|
switch (modelId) {
|
||||||
|
case "o1":
|
||||||
|
case "o1-preview":
|
||||||
|
case "o1-mini":
|
||||||
|
// o1 doesn't support non-1 temp or system prompt
|
||||||
|
requestOptions = {
|
||||||
|
model: modelId,
|
||||||
|
messages: [{ role: "user", content: prompt }]
|
||||||
|
}
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
requestOptions = {
|
||||||
|
model: modelId,
|
||||||
|
messages: [{ role: "user", content: prompt }],
|
||||||
|
temperature: 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const response = await this.client.chat.completions.create(requestOptions)
|
||||||
|
return response.choices[0]?.message.content || ""
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`OpenAI Native completion error: ${error.message}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,11 +6,11 @@ import {
|
|||||||
ModelInfo,
|
ModelInfo,
|
||||||
openAiModelInfoSaneDefaults,
|
openAiModelInfoSaneDefaults,
|
||||||
} from "../../shared/api"
|
} from "../../shared/api"
|
||||||
import { ApiHandler } from "../index"
|
import { ApiHandler, SingleCompletionHandler } from "../index"
|
||||||
import { convertToOpenAiMessages } from "../transform/openai-format"
|
import { convertToOpenAiMessages } from "../transform/openai-format"
|
||||||
import { ApiStream } from "../transform/stream"
|
import { ApiStream } from "../transform/stream"
|
||||||
|
|
||||||
export class OpenAiHandler implements ApiHandler {
|
export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
|
||||||
protected options: ApiHandlerOptions
|
protected options: ApiHandlerOptions
|
||||||
private client: OpenAI
|
private client: OpenAI
|
||||||
|
|
||||||
@@ -100,4 +100,22 @@ export class OpenAiHandler implements ApiHandler {
|
|||||||
info: openAiModelInfoSaneDefaults,
|
info: openAiModelInfoSaneDefaults,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async completePrompt(prompt: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
|
||||||
|
model: this.getModel().id,
|
||||||
|
messages: [{ role: "user", content: prompt }],
|
||||||
|
temperature: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
const response = await this.client.chat.completions.create(requestOptions)
|
||||||
|
return response.choices[0]?.message.content || ""
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`OpenAI completion error: ${error.message}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
import { Anthropic } from "@anthropic-ai/sdk"
|
import { Anthropic } from "@anthropic-ai/sdk"
|
||||||
import { AnthropicVertex } from "@anthropic-ai/vertex-sdk"
|
import { AnthropicVertex } from "@anthropic-ai/vertex-sdk"
|
||||||
import { ApiHandler } from "../"
|
import { ApiHandler, SingleCompletionHandler } from "../"
|
||||||
import { ApiHandlerOptions, ModelInfo, vertexDefaultModelId, VertexModelId, vertexModels } from "../../shared/api"
|
import { ApiHandlerOptions, ModelInfo, vertexDefaultModelId, VertexModelId, vertexModels } from "../../shared/api"
|
||||||
import { ApiStream } from "../transform/stream"
|
import { ApiStream } from "../transform/stream"
|
||||||
|
|
||||||
// https://docs.anthropic.com/en/api/claude-on-vertex-ai
|
// https://docs.anthropic.com/en/api/claude-on-vertex-ai
|
||||||
export class VertexHandler implements ApiHandler {
|
export class VertexHandler implements ApiHandler, SingleCompletionHandler {
|
||||||
private options: ApiHandlerOptions
|
private options: ApiHandlerOptions
|
||||||
private client: AnthropicVertex
|
private client: AnthropicVertex
|
||||||
|
|
||||||
@@ -83,4 +83,27 @@ export class VertexHandler implements ApiHandler {
|
|||||||
}
|
}
|
||||||
return { id: vertexDefaultModelId, info: vertexModels[vertexDefaultModelId] }
|
return { id: vertexDefaultModelId, info: vertexModels[vertexDefaultModelId] }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async completePrompt(prompt: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
const response = await this.client.messages.create({
|
||||||
|
model: this.getModel().id,
|
||||||
|
max_tokens: this.getModel().info.maxTokens || 8192,
|
||||||
|
temperature: 0,
|
||||||
|
messages: [{ role: "user", content: prompt }],
|
||||||
|
stream: false
|
||||||
|
})
|
||||||
|
|
||||||
|
const content = response.content[0]
|
||||||
|
if (content.type === 'text') {
|
||||||
|
return content.text
|
||||||
|
}
|
||||||
|
return ''
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`Vertex completion error: ${error.message}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -780,15 +780,24 @@ export class Cline {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
const { browserViewportSize, preferredLanguage, mode } = await this.providerRef.deref()?.getState() ?? {}
|
const { browserViewportSize, preferredLanguage, mode, customPrompts } = await this.providerRef.deref()?.getState() ?? {}
|
||||||
const systemPrompt = await SYSTEM_PROMPT(
|
const systemPrompt = await SYSTEM_PROMPT(
|
||||||
cwd,
|
cwd,
|
||||||
this.api.getModel().info.supportsComputerUse ?? false,
|
this.api.getModel().info.supportsComputerUse ?? false,
|
||||||
mcpHub,
|
mcpHub,
|
||||||
this.diffStrategy,
|
this.diffStrategy,
|
||||||
browserViewportSize,
|
browserViewportSize,
|
||||||
|
mode,
|
||||||
|
customPrompts
|
||||||
|
) + await addCustomInstructions(
|
||||||
|
{
|
||||||
|
customInstructions: this.customInstructions,
|
||||||
|
customPrompts,
|
||||||
|
preferredLanguage
|
||||||
|
},
|
||||||
|
cwd,
|
||||||
mode
|
mode
|
||||||
) + await addCustomInstructions(this.customInstructions ?? '', cwd, preferredLanguage)
|
)
|
||||||
|
|
||||||
// If the previous API request's total token usage is close to the context window, truncate the conversation history to free up space for the new request
|
// If the previous API request's total token usage is close to the context window, truncate the conversation history to free up space for the new request
|
||||||
if (previousApiReqIndex >= 0) {
|
if (previousApiReqIndex >= 0) {
|
||||||
|
|||||||
@@ -131,7 +131,7 @@ Detailed commit message with multiple lines
|
|||||||
await openMention("/path/to/file")
|
await openMention("/path/to/file")
|
||||||
expect(mockExecuteCommand).not.toHaveBeenCalled()
|
expect(mockExecuteCommand).not.toHaveBeenCalled()
|
||||||
expect(mockOpenExternal).not.toHaveBeenCalled()
|
expect(mockOpenExternal).not.toHaveBeenCalled()
|
||||||
expect(mockShowErrorMessage).toHaveBeenCalledWith("Could not open file!")
|
expect(mockShowErrorMessage).toHaveBeenCalledWith("Could not open file: File does not exist")
|
||||||
|
|
||||||
await openMention("problems")
|
await openMention("problems")
|
||||||
expect(mockExecuteCommand).toHaveBeenCalledWith("workbench.actions.view.problems")
|
expect(mockExecuteCommand).toHaveBeenCalledWith("workbench.actions.view.problems")
|
||||||
|
|||||||
@@ -2185,6 +2185,66 @@ Custom test instructions
|
|||||||
2. Second rule"
|
2. Second rule"
|
||||||
`;
|
`;
|
||||||
|
|
||||||
|
exports[`addCustomInstructions should combine global and mode-specific instructions 1`] = `
|
||||||
|
"
|
||||||
|
====
|
||||||
|
|
||||||
|
USER'S CUSTOM INSTRUCTIONS
|
||||||
|
|
||||||
|
The following additional instructions are provided by the user, and should be followed to the best of your ability without interfering with the TOOL USE guidelines.
|
||||||
|
|
||||||
|
Global instructions
|
||||||
|
|
||||||
|
Mode-specific instructions
|
||||||
|
|
||||||
|
# Rules from .clinerules:
|
||||||
|
# Test Rules
|
||||||
|
1. First rule
|
||||||
|
2. Second rule"
|
||||||
|
`;
|
||||||
|
|
||||||
|
exports[`addCustomInstructions should fall back to generic rules when mode-specific rules not found 1`] = `
|
||||||
|
"
|
||||||
|
====
|
||||||
|
|
||||||
|
USER'S CUSTOM INSTRUCTIONS
|
||||||
|
|
||||||
|
The following additional instructions are provided by the user, and should be followed to the best of your ability without interfering with the TOOL USE guidelines.
|
||||||
|
|
||||||
|
# Rules from .clinerules:
|
||||||
|
# Test Rules
|
||||||
|
1. First rule
|
||||||
|
2. Second rule"
|
||||||
|
`;
|
||||||
|
|
||||||
|
exports[`addCustomInstructions should handle empty mode-specific instructions 1`] = `
|
||||||
|
"
|
||||||
|
====
|
||||||
|
|
||||||
|
USER'S CUSTOM INSTRUCTIONS
|
||||||
|
|
||||||
|
The following additional instructions are provided by the user, and should be followed to the best of your ability without interfering with the TOOL USE guidelines.
|
||||||
|
|
||||||
|
# Rules from .clinerules:
|
||||||
|
# Test Rules
|
||||||
|
1. First rule
|
||||||
|
2. Second rule"
|
||||||
|
`;
|
||||||
|
|
||||||
|
exports[`addCustomInstructions should handle undefined mode-specific instructions 1`] = `
|
||||||
|
"
|
||||||
|
====
|
||||||
|
|
||||||
|
USER'S CUSTOM INSTRUCTIONS
|
||||||
|
|
||||||
|
The following additional instructions are provided by the user, and should be followed to the best of your ability without interfering with the TOOL USE guidelines.
|
||||||
|
|
||||||
|
# Rules from .clinerules:
|
||||||
|
# Test Rules
|
||||||
|
1. First rule
|
||||||
|
2. Second rule"
|
||||||
|
`;
|
||||||
|
|
||||||
exports[`addCustomInstructions should include custom instructions when provided 1`] = `
|
exports[`addCustomInstructions should include custom instructions when provided 1`] = `
|
||||||
"
|
"
|
||||||
====
|
====
|
||||||
@@ -2217,7 +2277,7 @@ You should always speak and think in the Spanish language.
|
|||||||
2. Second rule"
|
2. Second rule"
|
||||||
`;
|
`;
|
||||||
|
|
||||||
exports[`addCustomInstructions should include rules from .clinerules 1`] = `
|
exports[`addCustomInstructions should prioritize mode-specific instructions after global ones 1`] = `
|
||||||
"
|
"
|
||||||
====
|
====
|
||||||
|
|
||||||
@@ -2225,6 +2285,80 @@ USER'S CUSTOM INSTRUCTIONS
|
|||||||
|
|
||||||
The following additional instructions are provided by the user, and should be followed to the best of your ability without interfering with the TOOL USE guidelines.
|
The following additional instructions are provided by the user, and should be followed to the best of your ability without interfering with the TOOL USE guidelines.
|
||||||
|
|
||||||
|
First instruction
|
||||||
|
|
||||||
|
Second instruction
|
||||||
|
|
||||||
|
# Rules from .clinerules:
|
||||||
|
# Test Rules
|
||||||
|
1. First rule
|
||||||
|
2. Second rule"
|
||||||
|
`;
|
||||||
|
|
||||||
|
exports[`addCustomInstructions should prioritize mode-specific rules for architect mode 1`] = `
|
||||||
|
"
|
||||||
|
====
|
||||||
|
|
||||||
|
USER'S CUSTOM INSTRUCTIONS
|
||||||
|
|
||||||
|
The following additional instructions are provided by the user, and should be followed to the best of your ability without interfering with the TOOL USE guidelines.
|
||||||
|
|
||||||
|
# Rules from .clinerules-architect:
|
||||||
|
# Architect Mode Rules
|
||||||
|
1. Architect specific rule
|
||||||
|
|
||||||
|
# Rules from .clinerules:
|
||||||
|
# Test Rules
|
||||||
|
1. First rule
|
||||||
|
2. Second rule"
|
||||||
|
`;
|
||||||
|
|
||||||
|
exports[`addCustomInstructions should prioritize mode-specific rules for ask mode 1`] = `
|
||||||
|
"
|
||||||
|
====
|
||||||
|
|
||||||
|
USER'S CUSTOM INSTRUCTIONS
|
||||||
|
|
||||||
|
The following additional instructions are provided by the user, and should be followed to the best of your ability without interfering with the TOOL USE guidelines.
|
||||||
|
|
||||||
|
# Rules from .clinerules-ask:
|
||||||
|
# Ask Mode Rules
|
||||||
|
1. Ask specific rule
|
||||||
|
|
||||||
|
# Rules from .clinerules:
|
||||||
|
# Test Rules
|
||||||
|
1. First rule
|
||||||
|
2. Second rule"
|
||||||
|
`;
|
||||||
|
|
||||||
|
exports[`addCustomInstructions should prioritize mode-specific rules for code mode 1`] = `
|
||||||
|
"
|
||||||
|
====
|
||||||
|
|
||||||
|
USER'S CUSTOM INSTRUCTIONS
|
||||||
|
|
||||||
|
The following additional instructions are provided by the user, and should be followed to the best of your ability without interfering with the TOOL USE guidelines.
|
||||||
|
|
||||||
|
# Rules from .clinerules-code:
|
||||||
|
# Code Mode Rules
|
||||||
|
1. Code specific rule
|
||||||
|
|
||||||
|
# Rules from .clinerules:
|
||||||
|
# Test Rules
|
||||||
|
1. First rule
|
||||||
|
2. Second rule"
|
||||||
|
`;
|
||||||
|
|
||||||
|
exports[`addCustomInstructions should trim mode-specific instructions 1`] = `
|
||||||
|
"
|
||||||
|
====
|
||||||
|
|
||||||
|
USER'S CUSTOM INSTRUCTIONS
|
||||||
|
|
||||||
|
The following additional instructions are provided by the user, and should be followed to the best of your ability without interfering with the TOOL USE guidelines.
|
||||||
|
|
||||||
|
Custom mode instructions
|
||||||
|
|
||||||
# Rules from .clinerules:
|
# Rules from .clinerules:
|
||||||
# Test Rules
|
# Test Rules
|
||||||
1. First rule
|
1. First rule
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import { ClineProvider } from '../../../core/webview/ClineProvider'
|
|||||||
import { SearchReplaceDiffStrategy } from '../../../core/diff/strategies/search-replace'
|
import { SearchReplaceDiffStrategy } from '../../../core/diff/strategies/search-replace'
|
||||||
import fs from 'fs/promises'
|
import fs from 'fs/promises'
|
||||||
import os from 'os'
|
import os from 'os'
|
||||||
|
import { codeMode, askMode, architectMode } from '../modes'
|
||||||
// Import path utils to get access to toPosix string extension
|
// Import path utils to get access to toPosix string extension
|
||||||
import '../../../utils/path'
|
import '../../../utils/path'
|
||||||
|
|
||||||
@@ -18,13 +19,22 @@ jest.mock('default-shell', () => '/bin/bash')
|
|||||||
|
|
||||||
jest.mock('os-name', () => () => 'Linux')
|
jest.mock('os-name', () => () => 'Linux')
|
||||||
|
|
||||||
// Mock fs.readFile to return empty mcpServers config and mock .clinerules
|
// Mock fs.readFile to return empty mcpServers config and mock rules files
|
||||||
jest.mock('fs/promises', () => ({
|
jest.mock('fs/promises', () => ({
|
||||||
...jest.requireActual('fs/promises'),
|
...jest.requireActual('fs/promises'),
|
||||||
readFile: jest.fn().mockImplementation(async (path: string) => {
|
readFile: jest.fn().mockImplementation(async (path: string) => {
|
||||||
if (path.endsWith('mcpSettings.json')) {
|
if (path.endsWith('mcpSettings.json')) {
|
||||||
return '{"mcpServers": {}}'
|
return '{"mcpServers": {}}'
|
||||||
}
|
}
|
||||||
|
if (path.endsWith('.clinerules-code')) {
|
||||||
|
return '# Code Mode Rules\n1. Code specific rule'
|
||||||
|
}
|
||||||
|
if (path.endsWith('.clinerules-ask')) {
|
||||||
|
return '# Ask Mode Rules\n1. Ask specific rule'
|
||||||
|
}
|
||||||
|
if (path.endsWith('.clinerules-architect')) {
|
||||||
|
return '# Architect Mode Rules\n1. Architect specific rule'
|
||||||
|
}
|
||||||
if (path.endsWith('.clinerules')) {
|
if (path.endsWith('.clinerules')) {
|
||||||
return '# Test Rules\n1. First rule\n2. Second rule'
|
return '# Test Rules\n1. First rule\n2. Second rule'
|
||||||
}
|
}
|
||||||
@@ -159,42 +169,149 @@ describe('addCustomInstructions', () => {
|
|||||||
jest.clearAllMocks()
|
jest.clearAllMocks()
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should include preferred language when provided', async () => {
|
it('should prioritize mode-specific rules for code mode', async () => {
|
||||||
const result = await addCustomInstructions(
|
const instructions = await addCustomInstructions(
|
||||||
'',
|
{},
|
||||||
'/test/path',
|
'/test/path',
|
||||||
'Spanish'
|
codeMode
|
||||||
|
)
|
||||||
|
expect(instructions).toMatchSnapshot()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should prioritize mode-specific rules for ask mode', async () => {
|
||||||
|
const instructions = await addCustomInstructions(
|
||||||
|
{},
|
||||||
|
'/test/path',
|
||||||
|
askMode
|
||||||
|
)
|
||||||
|
expect(instructions).toMatchSnapshot()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should prioritize mode-specific rules for architect mode', async () => {
|
||||||
|
const instructions = await addCustomInstructions(
|
||||||
|
{},
|
||||||
|
'/test/path',
|
||||||
|
architectMode
|
||||||
)
|
)
|
||||||
|
|
||||||
expect(result).toMatchSnapshot()
|
expect(instructions).toMatchSnapshot()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should fall back to generic rules when mode-specific rules not found', async () => {
|
||||||
|
// Mock readFile to return ENOENT for mode-specific file
|
||||||
|
const mockReadFile = jest.fn().mockImplementation(async (path: string) => {
|
||||||
|
if (path.endsWith('.clinerules-code')) {
|
||||||
|
const error = new Error('ENOENT') as NodeJS.ErrnoException
|
||||||
|
error.code = 'ENOENT'
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
if (path.endsWith('.clinerules')) {
|
||||||
|
return '# Test Rules\n1. First rule\n2. Second rule'
|
||||||
|
}
|
||||||
|
return ''
|
||||||
|
})
|
||||||
|
jest.spyOn(fs, 'readFile').mockImplementation(mockReadFile)
|
||||||
|
|
||||||
|
const instructions = await addCustomInstructions(
|
||||||
|
{},
|
||||||
|
'/test/path',
|
||||||
|
codeMode
|
||||||
|
)
|
||||||
|
|
||||||
|
expect(instructions).toMatchSnapshot()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should include preferred language when provided', async () => {
|
||||||
|
const instructions = await addCustomInstructions(
|
||||||
|
{ preferredLanguage: 'Spanish' },
|
||||||
|
'/test/path',
|
||||||
|
codeMode
|
||||||
|
)
|
||||||
|
|
||||||
|
expect(instructions).toMatchSnapshot()
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should include custom instructions when provided', async () => {
|
it('should include custom instructions when provided', async () => {
|
||||||
const result = await addCustomInstructions(
|
const instructions = await addCustomInstructions(
|
||||||
'Custom test instructions',
|
{ customInstructions: 'Custom test instructions' },
|
||||||
'/test/path'
|
'/test/path'
|
||||||
)
|
)
|
||||||
|
|
||||||
expect(result).toMatchSnapshot()
|
expect(instructions).toMatchSnapshot()
|
||||||
})
|
|
||||||
|
|
||||||
it('should include rules from .clinerules', async () => {
|
|
||||||
const result = await addCustomInstructions(
|
|
||||||
'',
|
|
||||||
'/test/path'
|
|
||||||
)
|
|
||||||
|
|
||||||
expect(result).toMatchSnapshot()
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should combine all custom instructions', async () => {
|
it('should combine all custom instructions', async () => {
|
||||||
const result = await addCustomInstructions(
|
const instructions = await addCustomInstructions(
|
||||||
'Custom test instructions',
|
{
|
||||||
|
customInstructions: 'Custom test instructions',
|
||||||
|
preferredLanguage: 'French'
|
||||||
|
},
|
||||||
'/test/path',
|
'/test/path',
|
||||||
'French'
|
codeMode
|
||||||
|
)
|
||||||
|
expect(instructions).toMatchSnapshot()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle undefined mode-specific instructions', async () => {
|
||||||
|
const instructions = await addCustomInstructions(
|
||||||
|
{},
|
||||||
|
'/test/path'
|
||||||
)
|
)
|
||||||
|
|
||||||
expect(result).toMatchSnapshot()
|
expect(instructions).toMatchSnapshot()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should trim mode-specific instructions', async () => {
|
||||||
|
const instructions = await addCustomInstructions(
|
||||||
|
{ customInstructions: ' Custom mode instructions ' },
|
||||||
|
'/test/path'
|
||||||
|
)
|
||||||
|
|
||||||
|
expect(instructions).toMatchSnapshot()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle empty mode-specific instructions', async () => {
|
||||||
|
const instructions = await addCustomInstructions(
|
||||||
|
{ customInstructions: '' },
|
||||||
|
'/test/path'
|
||||||
|
)
|
||||||
|
|
||||||
|
expect(instructions).toMatchSnapshot()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should combine global and mode-specific instructions', async () => {
|
||||||
|
const instructions = await addCustomInstructions(
|
||||||
|
{
|
||||||
|
customInstructions: 'Global instructions',
|
||||||
|
customPrompts: {
|
||||||
|
code: { customInstructions: 'Mode-specific instructions' }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'/test/path',
|
||||||
|
codeMode
|
||||||
|
)
|
||||||
|
|
||||||
|
expect(instructions).toMatchSnapshot()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should prioritize mode-specific instructions after global ones', async () => {
|
||||||
|
const instructions = await addCustomInstructions(
|
||||||
|
{
|
||||||
|
customInstructions: 'First instruction',
|
||||||
|
customPrompts: {
|
||||||
|
code: { customInstructions: 'Second instruction' }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'/test/path',
|
||||||
|
codeMode
|
||||||
|
)
|
||||||
|
|
||||||
|
const instructionParts = instructions.split('\n\n')
|
||||||
|
const globalIndex = instructionParts.findIndex(part => part.includes('First instruction'))
|
||||||
|
const modeSpecificIndex = instructionParts.findIndex(part => part.includes('Second instruction'))
|
||||||
|
|
||||||
|
expect(globalIndex).toBeLessThan(modeSpecificIndex)
|
||||||
|
expect(instructions).toMatchSnapshot()
|
||||||
})
|
})
|
||||||
|
|
||||||
afterAll(() => {
|
afterAll(() => {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import { architectMode } from "./modes"
|
import { architectMode, defaultPrompts, PromptComponent } from "../../shared/modes"
|
||||||
import { getToolDescriptionsForMode } from "./tools"
|
import { getToolDescriptionsForMode } from "./tools"
|
||||||
import {
|
import {
|
||||||
getRulesSection,
|
getRulesSection,
|
||||||
@@ -20,7 +20,8 @@ export const ARCHITECT_PROMPT = async (
|
|||||||
mcpHub?: McpHub,
|
mcpHub?: McpHub,
|
||||||
diffStrategy?: DiffStrategy,
|
diffStrategy?: DiffStrategy,
|
||||||
browserViewportSize?: string,
|
browserViewportSize?: string,
|
||||||
) => `You are Cline, a software architecture expert specializing in analyzing codebases, identifying patterns, and providing high-level technical guidance. You excel at understanding complex systems, evaluating architectural decisions, and suggesting improvements while maintaining a read-only approach to the codebase. Make sure to help the user come up with a solid implementation plan for their project and don't rush to switch to implementing code.
|
customPrompt?: PromptComponent,
|
||||||
|
) => `${customPrompt?.roleDefinition || defaultPrompts[architectMode].roleDefinition}
|
||||||
|
|
||||||
${getSharedToolUseSection()}
|
${getSharedToolUseSection()}
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
import { Mode, askMode } from "./modes"
|
import { Mode, askMode, defaultPrompts, PromptComponent } from "../../shared/modes"
|
||||||
import { getToolDescriptionsForMode } from "./tools"
|
import { getToolDescriptionsForMode } from "./tools"
|
||||||
import {
|
import {
|
||||||
getRulesSection,
|
getRulesSection,
|
||||||
getSystemInfoSection,
|
getSystemInfoSection,
|
||||||
getObjectiveSection,
|
getObjectiveSection,
|
||||||
addCustomInstructions,
|
|
||||||
getSharedToolUseSection,
|
getSharedToolUseSection,
|
||||||
getMcpServersSection,
|
getMcpServersSection,
|
||||||
getToolUseGuidelinesSection,
|
getToolUseGuidelinesSection,
|
||||||
@@ -21,7 +20,8 @@ export const ASK_PROMPT = async (
|
|||||||
mcpHub?: McpHub,
|
mcpHub?: McpHub,
|
||||||
diffStrategy?: DiffStrategy,
|
diffStrategy?: DiffStrategy,
|
||||||
browserViewportSize?: string,
|
browserViewportSize?: string,
|
||||||
) => `You are Cline, a knowledgeable technical assistant focused on answering questions and providing information about software development, technology, and related topics. You can analyze code, explain concepts, and access external resources while maintaining a read-only approach to the codebase. Make sure to answer the user's questions and don't rush to switch to implementing code.
|
customPrompt?: PromptComponent,
|
||||||
|
) => `${customPrompt?.roleDefinition || defaultPrompts[askMode].roleDefinition}
|
||||||
|
|
||||||
${getSharedToolUseSection()}
|
${getSharedToolUseSection()}
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
import { Mode, codeMode } from "./modes"
|
import { Mode, codeMode, defaultPrompts, PromptComponent } from "../../shared/modes"
|
||||||
import { getToolDescriptionsForMode } from "./tools"
|
import { getToolDescriptionsForMode } from "./tools"
|
||||||
import {
|
import {
|
||||||
getRulesSection,
|
getRulesSection,
|
||||||
getSystemInfoSection,
|
getSystemInfoSection,
|
||||||
getObjectiveSection,
|
getObjectiveSection,
|
||||||
addCustomInstructions,
|
|
||||||
getSharedToolUseSection,
|
getSharedToolUseSection,
|
||||||
getMcpServersSection,
|
getMcpServersSection,
|
||||||
getToolUseGuidelinesSection,
|
getToolUseGuidelinesSection,
|
||||||
@@ -21,7 +20,8 @@ export const CODE_PROMPT = async (
|
|||||||
mcpHub?: McpHub,
|
mcpHub?: McpHub,
|
||||||
diffStrategy?: DiffStrategy,
|
diffStrategy?: DiffStrategy,
|
||||||
browserViewportSize?: string,
|
browserViewportSize?: string,
|
||||||
) => `You are Cline, a highly skilled software engineer with extensive knowledge in many programming languages, frameworks, design patterns, and best practices.
|
customPrompt?: PromptComponent,
|
||||||
|
) => `${customPrompt?.roleDefinition || defaultPrompts[codeMode].roleDefinition}
|
||||||
|
|
||||||
${getSharedToolUseSection()}
|
${getSharedToolUseSection()}
|
||||||
|
|
||||||
|
|||||||
@@ -4,14 +4,30 @@ import { CODE_PROMPT } from "./code"
|
|||||||
import { ARCHITECT_PROMPT } from "./architect"
|
import { ARCHITECT_PROMPT } from "./architect"
|
||||||
import { ASK_PROMPT } from "./ask"
|
import { ASK_PROMPT } from "./ask"
|
||||||
import { Mode, codeMode, architectMode, askMode } from "./modes"
|
import { Mode, codeMode, architectMode, askMode } from "./modes"
|
||||||
|
import { CustomPrompts } from "../../shared/modes"
|
||||||
import fs from 'fs/promises'
|
import fs from 'fs/promises'
|
||||||
import path from 'path'
|
import path from 'path'
|
||||||
|
|
||||||
async function loadRuleFiles(cwd: string): Promise<string> {
|
async function loadRuleFiles(cwd: string, mode: Mode): Promise<string> {
|
||||||
const ruleFiles = ['.clinerules', '.cursorrules', '.windsurfrules']
|
|
||||||
let combinedRules = ''
|
let combinedRules = ''
|
||||||
|
|
||||||
for (const file of ruleFiles) {
|
// First try mode-specific rules
|
||||||
|
const modeSpecificFile = `.clinerules-${mode}`
|
||||||
|
try {
|
||||||
|
const content = await fs.readFile(path.join(cwd, modeSpecificFile), 'utf-8')
|
||||||
|
if (content.trim()) {
|
||||||
|
combinedRules += `\n# Rules from ${modeSpecificFile}:\n${content.trim()}\n`
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
// Silently skip if file doesn't exist
|
||||||
|
if ((err as NodeJS.ErrnoException).code !== 'ENOENT') {
|
||||||
|
throw err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then try generic rules files
|
||||||
|
const genericRuleFiles = ['.clinerules']
|
||||||
|
for (const file of genericRuleFiles) {
|
||||||
try {
|
try {
|
||||||
const content = await fs.readFile(path.join(cwd, file), 'utf-8')
|
const content = await fs.readFile(path.join(cwd, file), 'utf-8')
|
||||||
if (content.trim()) {
|
if (content.trim()) {
|
||||||
@@ -28,16 +44,30 @@ async function loadRuleFiles(cwd: string): Promise<string> {
|
|||||||
return combinedRules
|
return combinedRules
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function addCustomInstructions(customInstructions: string, cwd: string, preferredLanguage?: string): Promise<string> {
|
interface State {
|
||||||
const ruleFileContent = await loadRuleFiles(cwd)
|
customInstructions?: string;
|
||||||
|
customPrompts?: CustomPrompts;
|
||||||
|
preferredLanguage?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function addCustomInstructions(
|
||||||
|
state: State,
|
||||||
|
cwd: string,
|
||||||
|
mode: Mode = codeMode
|
||||||
|
): Promise<string> {
|
||||||
|
const ruleFileContent = await loadRuleFiles(cwd, mode)
|
||||||
const allInstructions = []
|
const allInstructions = []
|
||||||
|
|
||||||
if (preferredLanguage) {
|
if (state.preferredLanguage) {
|
||||||
allInstructions.push(`You should always speak and think in the ${preferredLanguage} language.`)
|
allInstructions.push(`You should always speak and think in the ${state.preferredLanguage} language.`)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (customInstructions.trim()) {
|
if (state.customInstructions?.trim()) {
|
||||||
allInstructions.push(customInstructions.trim())
|
allInstructions.push(state.customInstructions.trim())
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.customPrompts?.[mode]?.customInstructions?.trim()) {
|
||||||
|
allInstructions.push(state.customPrompts[mode].customInstructions.trim())
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ruleFileContent && ruleFileContent.trim()) {
|
if (ruleFileContent && ruleFileContent.trim()) {
|
||||||
@@ -58,20 +88,21 @@ ${joinedInstructions}`
|
|||||||
}
|
}
|
||||||
|
|
||||||
export const SYSTEM_PROMPT = async (
|
export const SYSTEM_PROMPT = async (
|
||||||
cwd: string,
|
cwd: string,
|
||||||
supportsComputerUse: boolean,
|
supportsComputerUse: boolean,
|
||||||
mcpHub?: McpHub,
|
mcpHub?: McpHub,
|
||||||
diffStrategy?: DiffStrategy,
|
diffStrategy?: DiffStrategy,
|
||||||
browserViewportSize?: string,
|
browserViewportSize?: string,
|
||||||
mode: Mode = codeMode,
|
mode: Mode = codeMode,
|
||||||
|
customPrompts?: CustomPrompts,
|
||||||
) => {
|
) => {
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case architectMode:
|
case architectMode:
|
||||||
return ARCHITECT_PROMPT(cwd, supportsComputerUse, mcpHub, diffStrategy, browserViewportSize)
|
return ARCHITECT_PROMPT(cwd, supportsComputerUse, mcpHub, diffStrategy, browserViewportSize, customPrompts?.architect)
|
||||||
case askMode:
|
case askMode:
|
||||||
return ASK_PROMPT(cwd, supportsComputerUse, mcpHub, diffStrategy, browserViewportSize)
|
return ASK_PROMPT(cwd, supportsComputerUse, mcpHub, diffStrategy, browserViewportSize, customPrompts?.ask)
|
||||||
default:
|
default:
|
||||||
return CODE_PROMPT(cwd, supportsComputerUse, mcpHub, diffStrategy, browserViewportSize)
|
return CODE_PROMPT(cwd, supportsComputerUse, mcpHub, diffStrategy, browserViewportSize, customPrompts?.code)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,9 @@ import { ApiConfiguration, ApiProvider, ModelInfo } from "../../shared/api"
|
|||||||
import { findLast } from "../../shared/array"
|
import { findLast } from "../../shared/array"
|
||||||
import { ApiConfigMeta, ExtensionMessage } from "../../shared/ExtensionMessage"
|
import { ApiConfigMeta, ExtensionMessage } from "../../shared/ExtensionMessage"
|
||||||
import { HistoryItem } from "../../shared/HistoryItem"
|
import { HistoryItem } from "../../shared/HistoryItem"
|
||||||
import { WebviewMessage } from "../../shared/WebviewMessage"
|
import { WebviewMessage, PromptMode } from "../../shared/WebviewMessage"
|
||||||
|
import { defaultPrompts } from "../../shared/modes"
|
||||||
|
import { SYSTEM_PROMPT, addCustomInstructions } from "../prompts/system"
|
||||||
import { fileExistsAtPath } from "../../utils/fs"
|
import { fileExistsAtPath } from "../../utils/fs"
|
||||||
import { Cline } from "../Cline"
|
import { Cline } from "../Cline"
|
||||||
import { openMention } from "../mentions"
|
import { openMention } from "../mentions"
|
||||||
@@ -28,7 +30,7 @@ import { enhancePrompt } from "../../utils/enhance-prompt"
|
|||||||
import { getCommitInfo, searchCommits, getWorkingState } from "../../utils/git"
|
import { getCommitInfo, searchCommits, getWorkingState } from "../../utils/git"
|
||||||
import { ConfigManager } from "../config/ConfigManager"
|
import { ConfigManager } from "../config/ConfigManager"
|
||||||
import { Mode } from "../prompts/types"
|
import { Mode } from "../prompts/types"
|
||||||
import { codeMode } from "../prompts/system"
|
import { codeMode, CustomPrompts } from "../../shared/modes"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
https://github.com/microsoft/vscode-webview-ui-toolkit-samples/blob/main/default/weather-webview/src/providers/WeatherViewProvider.ts
|
https://github.com/microsoft/vscode-webview-ui-toolkit-samples/blob/main/default/weather-webview/src/providers/WeatherViewProvider.ts
|
||||||
@@ -93,6 +95,8 @@ type GlobalStateKey =
|
|||||||
| "listApiConfigMeta"
|
| "listApiConfigMeta"
|
||||||
| "mode"
|
| "mode"
|
||||||
| "modeApiConfigs"
|
| "modeApiConfigs"
|
||||||
|
| "customPrompts"
|
||||||
|
| "enhancementApiConfigId"
|
||||||
|
|
||||||
export const GlobalFileNames = {
|
export const GlobalFileNames = {
|
||||||
apiConversationHistory: "api_conversation_history.json",
|
apiConversationHistory: "api_conversation_history.json",
|
||||||
@@ -111,7 +115,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
private cline?: Cline
|
private cline?: Cline
|
||||||
private workspaceTracker?: WorkspaceTracker
|
private workspaceTracker?: WorkspaceTracker
|
||||||
mcpHub?: McpHub
|
mcpHub?: McpHub
|
||||||
private latestAnnouncementId = "dec-10-2024" // update to some unique identifier when we add a new announcement
|
private latestAnnouncementId = "jan-13-2025-custom-prompt" // update to some unique identifier when we add a new announcement
|
||||||
configManager: ConfigManager
|
configManager: ConfigManager
|
||||||
|
|
||||||
constructor(
|
constructor(
|
||||||
@@ -242,15 +246,22 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
await this.clearTask()
|
await this.clearTask()
|
||||||
const {
|
const {
|
||||||
apiConfiguration,
|
apiConfiguration,
|
||||||
customInstructions,
|
customPrompts,
|
||||||
diffEnabled,
|
diffEnabled,
|
||||||
fuzzyMatchThreshold
|
fuzzyMatchThreshold,
|
||||||
|
mode,
|
||||||
|
customInstructions: globalInstructions,
|
||||||
} = await this.getState()
|
} = await this.getState()
|
||||||
|
|
||||||
|
const modeInstructions = customPrompts?.[mode]?.customInstructions
|
||||||
|
const effectiveInstructions = [globalInstructions, modeInstructions]
|
||||||
|
.filter(Boolean)
|
||||||
|
.join('\n\n')
|
||||||
|
|
||||||
this.cline = new Cline(
|
this.cline = new Cline(
|
||||||
this,
|
this,
|
||||||
apiConfiguration,
|
apiConfiguration,
|
||||||
customInstructions,
|
effectiveInstructions,
|
||||||
diffEnabled,
|
diffEnabled,
|
||||||
fuzzyMatchThreshold,
|
fuzzyMatchThreshold,
|
||||||
task,
|
task,
|
||||||
@@ -262,15 +273,22 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
await this.clearTask()
|
await this.clearTask()
|
||||||
const {
|
const {
|
||||||
apiConfiguration,
|
apiConfiguration,
|
||||||
customInstructions,
|
customPrompts,
|
||||||
diffEnabled,
|
diffEnabled,
|
||||||
fuzzyMatchThreshold
|
fuzzyMatchThreshold,
|
||||||
|
mode,
|
||||||
|
customInstructions: globalInstructions,
|
||||||
} = await this.getState()
|
} = await this.getState()
|
||||||
|
|
||||||
|
const modeInstructions = customPrompts?.[mode]?.customInstructions
|
||||||
|
const effectiveInstructions = [globalInstructions, modeInstructions]
|
||||||
|
.filter(Boolean)
|
||||||
|
.join('\n\n')
|
||||||
|
|
||||||
this.cline = new Cline(
|
this.cline = new Cline(
|
||||||
this,
|
this,
|
||||||
apiConfiguration,
|
apiConfiguration,
|
||||||
customInstructions,
|
effectiveInstructions,
|
||||||
diffEnabled,
|
diffEnabled,
|
||||||
fuzzyMatchThreshold,
|
fuzzyMatchThreshold,
|
||||||
undefined,
|
undefined,
|
||||||
@@ -375,6 +393,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
async (message: WebviewMessage) => {
|
async (message: WebviewMessage) => {
|
||||||
switch (message.type) {
|
switch (message.type) {
|
||||||
case "webviewDidLaunch":
|
case "webviewDidLaunch":
|
||||||
|
|
||||||
this.postStateToWebview()
|
this.postStateToWebview()
|
||||||
this.workspaceTracker?.initializeFilePaths() // don't await
|
this.workspaceTracker?.initializeFilePaths() // don't await
|
||||||
getTheme().then((theme) =>
|
getTheme().then((theme) =>
|
||||||
@@ -568,7 +587,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
openImage(message.text!)
|
openImage(message.text!)
|
||||||
break
|
break
|
||||||
case "openFile":
|
case "openFile":
|
||||||
openFile(message.text!)
|
openFile(message.text!, message.values as { create?: boolean; content?: string })
|
||||||
break
|
break
|
||||||
case "openMention":
|
case "openMention":
|
||||||
openMention(message.text)
|
openMention(message.text)
|
||||||
@@ -727,6 +746,56 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
|
|
||||||
await this.postStateToWebview()
|
await this.postStateToWebview()
|
||||||
break
|
break
|
||||||
|
case "updateEnhancedPrompt":
|
||||||
|
const existingPrompts = await this.getGlobalState("customPrompts") || {}
|
||||||
|
|
||||||
|
const updatedPrompts = {
|
||||||
|
...existingPrompts,
|
||||||
|
enhance: message.text
|
||||||
|
}
|
||||||
|
|
||||||
|
await this.updateGlobalState("customPrompts", updatedPrompts)
|
||||||
|
|
||||||
|
// Get current state and explicitly include customPrompts
|
||||||
|
const currentState = await this.getState()
|
||||||
|
|
||||||
|
const stateWithPrompts = {
|
||||||
|
...currentState,
|
||||||
|
customPrompts: updatedPrompts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Post state with prompts
|
||||||
|
this.view?.webview.postMessage({
|
||||||
|
type: "state",
|
||||||
|
state: stateWithPrompts
|
||||||
|
})
|
||||||
|
break
|
||||||
|
case "updatePrompt":
|
||||||
|
if (message.promptMode && message.customPrompt !== undefined) {
|
||||||
|
const existingPrompts = await this.getGlobalState("customPrompts") || {}
|
||||||
|
|
||||||
|
const updatedPrompts = {
|
||||||
|
...existingPrompts,
|
||||||
|
[message.promptMode]: message.customPrompt
|
||||||
|
}
|
||||||
|
|
||||||
|
await this.updateGlobalState("customPrompts", updatedPrompts)
|
||||||
|
|
||||||
|
// Get current state and explicitly include customPrompts
|
||||||
|
const currentState = await this.getState()
|
||||||
|
|
||||||
|
const stateWithPrompts = {
|
||||||
|
...currentState,
|
||||||
|
customPrompts: updatedPrompts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Post state with prompts
|
||||||
|
this.view?.webview.postMessage({
|
||||||
|
type: "state",
|
||||||
|
state: stateWithPrompts
|
||||||
|
})
|
||||||
|
}
|
||||||
|
break
|
||||||
case "deleteMessage": {
|
case "deleteMessage": {
|
||||||
const answer = await vscode.window.showInformationMessage(
|
const answer = await vscode.window.showInformationMessage(
|
||||||
"What would you like to delete?",
|
"What would you like to delete?",
|
||||||
@@ -797,16 +866,28 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
await this.updateGlobalState("screenshotQuality", message.value)
|
await this.updateGlobalState("screenshotQuality", message.value)
|
||||||
await this.postStateToWebview()
|
await this.postStateToWebview()
|
||||||
break
|
break
|
||||||
|
case "enhancementApiConfigId":
|
||||||
|
await this.updateGlobalState("enhancementApiConfigId", message.text)
|
||||||
|
await this.postStateToWebview()
|
||||||
|
break
|
||||||
case "enhancePrompt":
|
case "enhancePrompt":
|
||||||
if (message.text) {
|
if (message.text) {
|
||||||
try {
|
try {
|
||||||
const { apiConfiguration } = await this.getState()
|
const { apiConfiguration, customPrompts, listApiConfigMeta, enhancementApiConfigId } = await this.getState()
|
||||||
const enhanceConfig = {
|
|
||||||
...apiConfiguration,
|
// Try to get enhancement config first, fall back to current config
|
||||||
apiProvider: "openrouter" as const,
|
let configToUse: ApiConfiguration = apiConfiguration
|
||||||
openRouterModelId: "gpt-4o",
|
if (enhancementApiConfigId) {
|
||||||
|
const config = listApiConfigMeta?.find(c => c.id === enhancementApiConfigId)
|
||||||
|
if (config?.name) {
|
||||||
|
const loadedConfig = await this.configManager.LoadConfig(config.name)
|
||||||
|
if (loadedConfig.apiProvider) {
|
||||||
|
configToUse = loadedConfig
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
const enhancedPrompt = await enhancePrompt(enhanceConfig, message.text)
|
|
||||||
|
const enhancedPrompt = await enhancePrompt(configToUse, message.text, customPrompts?.enhance)
|
||||||
await this.postMessageToWebview({
|
await this.postMessageToWebview({
|
||||||
type: "enhancedPrompt",
|
type: "enhancedPrompt",
|
||||||
text: enhancedPrompt
|
text: enhancedPrompt
|
||||||
@@ -814,11 +895,45 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Error enhancing prompt:", error)
|
console.error("Error enhancing prompt:", error)
|
||||||
vscode.window.showErrorMessage("Failed to enhance prompt")
|
vscode.window.showErrorMessage("Failed to enhance prompt")
|
||||||
|
await this.postMessageToWebview({
|
||||||
|
type: "enhancedPrompt"
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
case "getSystemPrompt":
|
||||||
|
try {
|
||||||
|
const { apiConfiguration, customPrompts, customInstructions, preferredLanguage, browserViewportSize, mcpEnabled } = await this.getState()
|
||||||
|
const cwd = vscode.workspace.workspaceFolders?.map((folder) => folder.uri.fsPath).at(0) || ''
|
||||||
|
|
||||||
|
const mode = message.mode ?? codeMode
|
||||||
|
const instructions = await addCustomInstructions(
|
||||||
|
{ customInstructions, customPrompts, preferredLanguage },
|
||||||
|
cwd,
|
||||||
|
mode
|
||||||
|
)
|
||||||
|
|
||||||
|
const systemPrompt = await SYSTEM_PROMPT(
|
||||||
|
cwd,
|
||||||
|
apiConfiguration.openRouterModelInfo?.supportsComputerUse ?? false,
|
||||||
|
mcpEnabled ? this.mcpHub : undefined,
|
||||||
|
undefined,
|
||||||
|
browserViewportSize ?? "900x600",
|
||||||
|
mode,
|
||||||
|
customPrompts
|
||||||
|
)
|
||||||
|
const fullPrompt = instructions ? `${systemPrompt}${instructions}` : systemPrompt
|
||||||
|
|
||||||
|
await this.postMessageToWebview({
|
||||||
|
type: "systemPrompt",
|
||||||
|
text: fullPrompt,
|
||||||
|
mode: message.mode
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error getting system prompt:", error)
|
||||||
|
vscode.window.showErrorMessage("Failed to get system prompt")
|
||||||
|
}
|
||||||
|
break
|
||||||
case "searchCommits": {
|
case "searchCommits": {
|
||||||
const cwd = vscode.workspace.workspaceFolders?.map((folder) => folder.uri.fsPath).at(0)
|
const cwd = vscode.workspace.workspaceFolders?.map((folder) => folder.uri.fsPath).at(0)
|
||||||
if (cwd) {
|
if (cwd) {
|
||||||
@@ -1482,6 +1597,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
currentApiConfigName,
|
currentApiConfigName,
|
||||||
listApiConfigMeta,
|
listApiConfigMeta,
|
||||||
mode,
|
mode,
|
||||||
|
customPrompts,
|
||||||
|
enhancementApiConfigId,
|
||||||
} = await this.getState()
|
} = await this.getState()
|
||||||
|
|
||||||
const allowedCommands = vscode.workspace
|
const allowedCommands = vscode.workspace
|
||||||
@@ -1500,11 +1617,11 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
uriScheme: vscode.env.uriScheme,
|
uriScheme: vscode.env.uriScheme,
|
||||||
clineMessages: this.cline?.clineMessages || [],
|
clineMessages: this.cline?.clineMessages || [],
|
||||||
taskHistory: (taskHistory || [])
|
taskHistory: (taskHistory || [])
|
||||||
.filter((item) => item.ts && item.task)
|
.filter((item: HistoryItem) => item.ts && item.task)
|
||||||
.sort((a, b) => b.ts - a.ts),
|
.sort((a: HistoryItem, b: HistoryItem) => b.ts - a.ts),
|
||||||
soundEnabled: soundEnabled ?? false,
|
soundEnabled: soundEnabled ?? false,
|
||||||
diffEnabled: diffEnabled ?? true,
|
diffEnabled: diffEnabled ?? true,
|
||||||
shouldShowAnnouncement: false, // lastShownAnnouncementId !== this.latestAnnouncementId,
|
shouldShowAnnouncement: lastShownAnnouncementId !== this.latestAnnouncementId,
|
||||||
allowedCommands,
|
allowedCommands,
|
||||||
soundVolume: soundVolume ?? 0.5,
|
soundVolume: soundVolume ?? 0.5,
|
||||||
browserViewportSize: browserViewportSize ?? "900x600",
|
browserViewportSize: browserViewportSize ?? "900x600",
|
||||||
@@ -1519,6 +1636,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
currentApiConfigName: currentApiConfigName ?? "default",
|
currentApiConfigName: currentApiConfigName ?? "default",
|
||||||
listApiConfigMeta: listApiConfigMeta ?? [],
|
listApiConfigMeta: listApiConfigMeta ?? [],
|
||||||
mode: mode ?? codeMode,
|
mode: mode ?? codeMode,
|
||||||
|
customPrompts: customPrompts ?? {},
|
||||||
|
enhancementApiConfigId,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1630,6 +1749,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
listApiConfigMeta,
|
listApiConfigMeta,
|
||||||
mode,
|
mode,
|
||||||
modeApiConfigs,
|
modeApiConfigs,
|
||||||
|
customPrompts,
|
||||||
|
enhancementApiConfigId,
|
||||||
] = await Promise.all([
|
] = await Promise.all([
|
||||||
this.getGlobalState("apiProvider") as Promise<ApiProvider | undefined>,
|
this.getGlobalState("apiProvider") as Promise<ApiProvider | undefined>,
|
||||||
this.getGlobalState("apiModelId") as Promise<string | undefined>,
|
this.getGlobalState("apiModelId") as Promise<string | undefined>,
|
||||||
@@ -1686,6 +1807,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
this.getGlobalState("listApiConfigMeta") as Promise<ApiConfigMeta[] | undefined>,
|
this.getGlobalState("listApiConfigMeta") as Promise<ApiConfigMeta[] | undefined>,
|
||||||
this.getGlobalState("mode") as Promise<Mode | undefined>,
|
this.getGlobalState("mode") as Promise<Mode | undefined>,
|
||||||
this.getGlobalState("modeApiConfigs") as Promise<Record<Mode, string> | undefined>,
|
this.getGlobalState("modeApiConfigs") as Promise<Record<Mode, string> | undefined>,
|
||||||
|
this.getGlobalState("customPrompts") as Promise<CustomPrompts | undefined>,
|
||||||
|
this.getGlobalState("enhancementApiConfigId") as Promise<string | undefined>,
|
||||||
])
|
])
|
||||||
|
|
||||||
let apiProvider: ApiProvider
|
let apiProvider: ApiProvider
|
||||||
@@ -1786,6 +1909,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
|
|||||||
currentApiConfigName: currentApiConfigName ?? "default",
|
currentApiConfigName: currentApiConfigName ?? "default",
|
||||||
listApiConfigMeta: listApiConfigMeta ?? [],
|
listApiConfigMeta: listApiConfigMeta ?? [],
|
||||||
modeApiConfigs: modeApiConfigs ?? {} as Record<Mode, string>,
|
modeApiConfigs: modeApiConfigs ?? {} as Record<Mode, string>,
|
||||||
|
customPrompts: customPrompts ?? {},
|
||||||
|
enhancementApiConfigId,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -62,6 +62,7 @@ jest.mock('vscode', () => ({
|
|||||||
},
|
},
|
||||||
window: {
|
window: {
|
||||||
showInformationMessage: jest.fn(),
|
showInformationMessage: jest.fn(),
|
||||||
|
showErrorMessage: jest.fn(),
|
||||||
},
|
},
|
||||||
workspace: {
|
workspace: {
|
||||||
getConfiguration: jest.fn().mockReturnValue({
|
getConfiguration: jest.fn().mockReturnValue({
|
||||||
@@ -113,6 +114,13 @@ jest.mock('../../../api', () => ({
|
|||||||
buildApiHandler: jest.fn()
|
buildApiHandler: jest.fn()
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
// Mock system prompt
|
||||||
|
jest.mock('../../prompts/system', () => ({
|
||||||
|
SYSTEM_PROMPT: jest.fn().mockImplementation(async () => 'mocked system prompt'),
|
||||||
|
codeMode: 'code',
|
||||||
|
addCustomInstructions: jest.fn().mockImplementation(async () => '')
|
||||||
|
}))
|
||||||
|
|
||||||
// Mock WorkspaceTracker
|
// Mock WorkspaceTracker
|
||||||
jest.mock('../../../integrations/workspace/WorkspaceTracker', () => {
|
jest.mock('../../../integrations/workspace/WorkspaceTracker', () => {
|
||||||
return jest.fn().mockImplementation(() => ({
|
return jest.fn().mockImplementation(() => ({
|
||||||
@@ -122,19 +130,25 @@ jest.mock('../../../integrations/workspace/WorkspaceTracker', () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Mock Cline
|
// Mock Cline
|
||||||
jest.mock('../../Cline', () => {
|
jest.mock('../../Cline', () => ({
|
||||||
return {
|
Cline: jest.fn().mockImplementation((
|
||||||
Cline: jest.fn().mockImplementation(() => ({
|
provider,
|
||||||
abortTask: jest.fn(),
|
apiConfiguration,
|
||||||
handleWebviewAskResponse: jest.fn(),
|
customInstructions,
|
||||||
clineMessages: [],
|
diffEnabled,
|
||||||
apiConversationHistory: [],
|
fuzzyMatchThreshold,
|
||||||
overwriteClineMessages: jest.fn(),
|
task,
|
||||||
overwriteApiConversationHistory: jest.fn(),
|
taskId
|
||||||
taskId: 'test-task-id'
|
) => ({
|
||||||
}))
|
abortTask: jest.fn(),
|
||||||
}
|
handleWebviewAskResponse: jest.fn(),
|
||||||
})
|
clineMessages: [],
|
||||||
|
apiConversationHistory: [],
|
||||||
|
overwriteClineMessages: jest.fn(),
|
||||||
|
overwriteApiConversationHistory: jest.fn(),
|
||||||
|
taskId: taskId || 'test-task-id'
|
||||||
|
}))
|
||||||
|
}))
|
||||||
|
|
||||||
// Mock extract-text
|
// Mock extract-text
|
||||||
jest.mock('../../../integrations/misc/extract-text', () => ({
|
jest.mock('../../../integrations/misc/extract-text', () => ({
|
||||||
@@ -504,6 +518,182 @@ describe('ClineProvider', () => {
|
|||||||
expect(mockPostMessage).toHaveBeenCalled()
|
expect(mockPostMessage).toHaveBeenCalled()
|
||||||
})
|
})
|
||||||
|
|
||||||
|
test('handles updatePrompt message correctly', async () => {
|
||||||
|
provider.resolveWebviewView(mockWebviewView)
|
||||||
|
const messageHandler = (mockWebviewView.webview.onDidReceiveMessage as jest.Mock).mock.calls[0][0]
|
||||||
|
|
||||||
|
// Mock existing prompts
|
||||||
|
const existingPrompts = {
|
||||||
|
code: 'existing code prompt',
|
||||||
|
architect: 'existing architect prompt'
|
||||||
|
}
|
||||||
|
;(mockContext.globalState.get as jest.Mock).mockImplementation((key: string) => {
|
||||||
|
if (key === 'customPrompts') {
|
||||||
|
return existingPrompts
|
||||||
|
}
|
||||||
|
return undefined
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test updating a prompt
|
||||||
|
await messageHandler({
|
||||||
|
type: 'updatePrompt',
|
||||||
|
promptMode: 'code',
|
||||||
|
customPrompt: 'new code prompt'
|
||||||
|
})
|
||||||
|
|
||||||
|
// Verify state was updated correctly
|
||||||
|
expect(mockContext.globalState.update).toHaveBeenCalledWith(
|
||||||
|
'customPrompts',
|
||||||
|
{
|
||||||
|
...existingPrompts,
|
||||||
|
code: 'new code prompt'
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Verify state was posted to webview
|
||||||
|
expect(mockPostMessage).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({
|
||||||
|
type: 'state',
|
||||||
|
state: expect.objectContaining({
|
||||||
|
customPrompts: {
|
||||||
|
...existingPrompts,
|
||||||
|
code: 'new code prompt'
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('customPrompts defaults to empty object', async () => {
|
||||||
|
// Mock globalState.get to return undefined for customPrompts
|
||||||
|
(mockContext.globalState.get as jest.Mock).mockImplementation((key: string) => {
|
||||||
|
if (key === 'customPrompts') {
|
||||||
|
return undefined
|
||||||
|
}
|
||||||
|
return null
|
||||||
|
})
|
||||||
|
|
||||||
|
const state = await provider.getState()
|
||||||
|
expect(state.customPrompts).toEqual({})
|
||||||
|
})
|
||||||
|
|
||||||
|
test('uses mode-specific custom instructions in Cline initialization', async () => {
|
||||||
|
// Setup mock state
|
||||||
|
const modeCustomInstructions = 'Code mode instructions';
|
||||||
|
const mockApiConfig = {
|
||||||
|
apiProvider: 'openrouter',
|
||||||
|
openRouterModelInfo: { supportsComputerUse: true }
|
||||||
|
};
|
||||||
|
|
||||||
|
jest.spyOn(provider, 'getState').mockResolvedValue({
|
||||||
|
apiConfiguration: mockApiConfig,
|
||||||
|
customPrompts: {
|
||||||
|
code: { customInstructions: modeCustomInstructions }
|
||||||
|
},
|
||||||
|
mode: 'code',
|
||||||
|
diffEnabled: true,
|
||||||
|
fuzzyMatchThreshold: 1.0
|
||||||
|
} as any);
|
||||||
|
|
||||||
|
// Reset Cline mock
|
||||||
|
const { Cline } = require('../../Cline');
|
||||||
|
(Cline as jest.Mock).mockClear();
|
||||||
|
|
||||||
|
// Initialize Cline with a task
|
||||||
|
await provider.initClineWithTask('Test task');
|
||||||
|
|
||||||
|
// Verify Cline was initialized with mode-specific instructions
|
||||||
|
expect(Cline).toHaveBeenCalledWith(
|
||||||
|
provider,
|
||||||
|
mockApiConfig,
|
||||||
|
modeCustomInstructions,
|
||||||
|
true,
|
||||||
|
1.0,
|
||||||
|
'Test task',
|
||||||
|
undefined
|
||||||
|
);
|
||||||
|
});
|
||||||
|
test('handles mode-specific custom instructions updates', async () => {
|
||||||
|
provider.resolveWebviewView(mockWebviewView)
|
||||||
|
const messageHandler = (mockWebviewView.webview.onDidReceiveMessage as jest.Mock).mock.calls[0][0]
|
||||||
|
|
||||||
|
// Mock existing prompts
|
||||||
|
const existingPrompts = {
|
||||||
|
code: {
|
||||||
|
roleDefinition: 'Code role',
|
||||||
|
customInstructions: 'Old instructions'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mockContext.globalState.get = jest.fn((key: string) => {
|
||||||
|
if (key === 'customPrompts') {
|
||||||
|
return existingPrompts
|
||||||
|
}
|
||||||
|
return undefined
|
||||||
|
})
|
||||||
|
|
||||||
|
// Update custom instructions for code mode
|
||||||
|
await messageHandler({
|
||||||
|
type: 'updatePrompt',
|
||||||
|
promptMode: 'code',
|
||||||
|
customPrompt: {
|
||||||
|
roleDefinition: 'Code role',
|
||||||
|
customInstructions: 'New instructions'
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Verify state was updated correctly
|
||||||
|
expect(mockContext.globalState.update).toHaveBeenCalledWith(
|
||||||
|
'customPrompts',
|
||||||
|
{
|
||||||
|
code: {
|
||||||
|
roleDefinition: 'Code role',
|
||||||
|
customInstructions: 'New instructions'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('saves mode config when updating API configuration', async () => {
|
||||||
|
// Setup mock context with mode and config name
|
||||||
|
mockContext = {
|
||||||
|
...mockContext,
|
||||||
|
globalState: {
|
||||||
|
...mockContext.globalState,
|
||||||
|
get: jest.fn((key: string) => {
|
||||||
|
if (key === 'mode') {
|
||||||
|
return 'code'
|
||||||
|
} else if (key === 'currentApiConfigName') {
|
||||||
|
return 'test-config'
|
||||||
|
}
|
||||||
|
return undefined
|
||||||
|
}),
|
||||||
|
update: jest.fn(),
|
||||||
|
keys: jest.fn().mockReturnValue([]),
|
||||||
|
}
|
||||||
|
} as unknown as vscode.ExtensionContext
|
||||||
|
|
||||||
|
// Create new provider with updated mock context
|
||||||
|
provider = new ClineProvider(mockContext, mockOutputChannel)
|
||||||
|
provider.resolveWebviewView(mockWebviewView)
|
||||||
|
const messageHandler = (mockWebviewView.webview.onDidReceiveMessage as jest.Mock).mock.calls[0][0]
|
||||||
|
|
||||||
|
provider.configManager = {
|
||||||
|
ListConfig: jest.fn().mockResolvedValue([
|
||||||
|
{ name: 'test-config', id: 'test-id', apiProvider: 'anthropic' }
|
||||||
|
]),
|
||||||
|
SetModeConfig: jest.fn()
|
||||||
|
} as any
|
||||||
|
|
||||||
|
// Update API configuration
|
||||||
|
await messageHandler({
|
||||||
|
type: 'apiConfiguration',
|
||||||
|
apiConfiguration: { apiProvider: 'anthropic' }
|
||||||
|
})
|
||||||
|
|
||||||
|
// Should save config as default for current mode
|
||||||
|
expect(provider.configManager.SetModeConfig).toHaveBeenCalledWith('code', 'test-id')
|
||||||
|
})
|
||||||
|
|
||||||
test('file content includes line numbers', async () => {
|
test('file content includes line numbers', async () => {
|
||||||
const { extractTextFromFile } = require('../../../integrations/misc/extract-text')
|
const { extractTextFromFile } = require('../../../integrations/misc/extract-text')
|
||||||
const result = await extractTextFromFile('test.js')
|
const result = await extractTextFromFile('test.js')
|
||||||
@@ -654,4 +844,165 @@ describe('ClineProvider', () => {
|
|||||||
expect(mockCline.overwriteApiConversationHistory).not.toHaveBeenCalled()
|
expect(mockCline.overwriteApiConversationHistory).not.toHaveBeenCalled()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('getSystemPrompt', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
mockPostMessage.mockClear();
|
||||||
|
provider.resolveWebviewView(mockWebviewView);
|
||||||
|
});
|
||||||
|
|
||||||
|
const getMessageHandler = () => {
|
||||||
|
const mockCalls = (mockWebviewView.webview.onDidReceiveMessage as jest.Mock).mock.calls;
|
||||||
|
expect(mockCalls.length).toBeGreaterThan(0);
|
||||||
|
return mockCalls[0][0];
|
||||||
|
};
|
||||||
|
|
||||||
|
test('handles mcpEnabled setting correctly', async () => {
|
||||||
|
// Mock getState to return mcpEnabled: true
|
||||||
|
jest.spyOn(provider, 'getState').mockResolvedValue({
|
||||||
|
apiConfiguration: {
|
||||||
|
apiProvider: 'openrouter' as const,
|
||||||
|
openRouterModelInfo: {
|
||||||
|
supportsComputerUse: true,
|
||||||
|
supportsPromptCache: false,
|
||||||
|
maxTokens: 4096,
|
||||||
|
contextWindow: 8192,
|
||||||
|
supportsImages: false,
|
||||||
|
inputPrice: 0.0,
|
||||||
|
outputPrice: 0.0,
|
||||||
|
description: undefined
|
||||||
|
}
|
||||||
|
},
|
||||||
|
mcpEnabled: true,
|
||||||
|
mode: 'code' as const
|
||||||
|
} as any);
|
||||||
|
|
||||||
|
const handler1 = getMessageHandler();
|
||||||
|
expect(typeof handler1).toBe('function');
|
||||||
|
await handler1({ type: 'getSystemPrompt', mode: 'code' });
|
||||||
|
|
||||||
|
// Verify mcpHub is passed when mcpEnabled is true
|
||||||
|
expect(mockPostMessage).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({
|
||||||
|
type: 'systemPrompt',
|
||||||
|
text: expect.any(String)
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Mock getState to return mcpEnabled: false
|
||||||
|
jest.spyOn(provider, 'getState').mockResolvedValue({
|
||||||
|
apiConfiguration: {
|
||||||
|
apiProvider: 'openrouter' as const,
|
||||||
|
openRouterModelInfo: {
|
||||||
|
supportsComputerUse: true,
|
||||||
|
supportsPromptCache: false,
|
||||||
|
maxTokens: 4096,
|
||||||
|
contextWindow: 8192,
|
||||||
|
supportsImages: false,
|
||||||
|
inputPrice: 0.0,
|
||||||
|
outputPrice: 0.0,
|
||||||
|
description: undefined
|
||||||
|
}
|
||||||
|
},
|
||||||
|
mcpEnabled: false,
|
||||||
|
mode: 'code' as const
|
||||||
|
} as any);
|
||||||
|
|
||||||
|
const handler2 = getMessageHandler();
|
||||||
|
await handler2({ type: 'getSystemPrompt', mode: 'code' });
|
||||||
|
|
||||||
|
// Verify mcpHub is not passed when mcpEnabled is false
|
||||||
|
expect(mockPostMessage).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({
|
||||||
|
type: 'systemPrompt',
|
||||||
|
text: expect.any(String)
|
||||||
|
})
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('handles errors gracefully', async () => {
|
||||||
|
// Mock SYSTEM_PROMPT to throw an error
|
||||||
|
const systemPrompt = require('../../prompts/system')
|
||||||
|
jest.spyOn(systemPrompt, 'SYSTEM_PROMPT').mockRejectedValueOnce(new Error('Test error'))
|
||||||
|
|
||||||
|
const messageHandler = (mockWebviewView.webview.onDidReceiveMessage as jest.Mock).mock.calls[0][0]
|
||||||
|
await messageHandler({ type: 'getSystemPrompt', mode: 'code' })
|
||||||
|
|
||||||
|
expect(vscode.window.showErrorMessage).toHaveBeenCalledWith('Failed to get system prompt')
|
||||||
|
})
|
||||||
|
|
||||||
|
test('uses mode-specific custom instructions in system prompt', async () => {
|
||||||
|
const systemPrompt = require('../../prompts/system')
|
||||||
|
const { addCustomInstructions } = systemPrompt
|
||||||
|
|
||||||
|
// Mock getState to return mode-specific custom instructions
|
||||||
|
jest.spyOn(provider, 'getState').mockResolvedValue({
|
||||||
|
apiConfiguration: {
|
||||||
|
apiProvider: 'openrouter',
|
||||||
|
openRouterModelInfo: { supportsComputerUse: true }
|
||||||
|
},
|
||||||
|
customPrompts: {
|
||||||
|
code: { customInstructions: 'Code mode specific instructions' }
|
||||||
|
},
|
||||||
|
mode: 'code',
|
||||||
|
mcpEnabled: false,
|
||||||
|
browserViewportSize: '900x600'
|
||||||
|
} as any)
|
||||||
|
|
||||||
|
const messageHandler = (mockWebviewView.webview.onDidReceiveMessage as jest.Mock).mock.calls[0][0]
|
||||||
|
await messageHandler({ type: 'getSystemPrompt', mode: 'code' })
|
||||||
|
|
||||||
|
// Verify addCustomInstructions was called with mode-specific instructions
|
||||||
|
expect(addCustomInstructions).toHaveBeenCalledWith(
|
||||||
|
{
|
||||||
|
customInstructions: undefined,
|
||||||
|
customPrompts: {
|
||||||
|
code: { customInstructions: 'Code mode specific instructions' }
|
||||||
|
},
|
||||||
|
preferredLanguage: undefined
|
||||||
|
},
|
||||||
|
expect.any(String),
|
||||||
|
'code'
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('uses correct mode-specific instructions when mode is specified', async () => {
|
||||||
|
const systemPrompt = require('../../prompts/system')
|
||||||
|
const { addCustomInstructions } = systemPrompt
|
||||||
|
|
||||||
|
// Mock getState to return instructions for multiple modes
|
||||||
|
jest.spyOn(provider, 'getState').mockResolvedValue({
|
||||||
|
apiConfiguration: {
|
||||||
|
apiProvider: 'openrouter',
|
||||||
|
openRouterModelInfo: { supportsComputerUse: true }
|
||||||
|
},
|
||||||
|
customPrompts: {
|
||||||
|
code: { customInstructions: 'Code mode instructions' },
|
||||||
|
architect: { customInstructions: 'Architect mode instructions' }
|
||||||
|
},
|
||||||
|
mode: 'code',
|
||||||
|
mcpEnabled: false,
|
||||||
|
browserViewportSize: '900x600'
|
||||||
|
} as any)
|
||||||
|
|
||||||
|
const messageHandler = (mockWebviewView.webview.onDidReceiveMessage as jest.Mock).mock.calls[0][0]
|
||||||
|
|
||||||
|
// Request architect mode prompt
|
||||||
|
await messageHandler({ type: 'getSystemPrompt', mode: 'architect' })
|
||||||
|
|
||||||
|
// Verify architect mode instructions were used
|
||||||
|
expect(addCustomInstructions).toHaveBeenCalledWith(
|
||||||
|
{
|
||||||
|
customInstructions: undefined,
|
||||||
|
customPrompts: {
|
||||||
|
code: { customInstructions: 'Code mode instructions' },
|
||||||
|
architect: { customInstructions: 'Architect mode instructions' }
|
||||||
|
},
|
||||||
|
preferredLanguage: undefined
|
||||||
|
},
|
||||||
|
expect.any(String),
|
||||||
|
'architect'
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -59,6 +59,12 @@ export function activate(context: vscode.ExtensionContext) {
|
|||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
context.subscriptions.push(
|
||||||
|
vscode.commands.registerCommand("roo-cline.promptsButtonClicked", () => {
|
||||||
|
sidebarProvider.postMessageToWebview({ type: "action", action: "promptsButtonClicked" })
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
const openClineInNewTab = async () => {
|
const openClineInNewTab = async () => {
|
||||||
outputChannel.appendLine("Opening Cline in new tab")
|
outputChannel.appendLine("Opening Cline in new tab")
|
||||||
// (this example uses webviewProvider activation event which is necessary to deserialize cached webview, but since we use retainContextWhenHidden, we don't need to use that event)
|
// (this example uses webviewProvider activation event which is necessary to deserialize cached webview, but since we use retainContextWhenHidden, we don't need to use that event)
|
||||||
|
|||||||
@@ -20,11 +20,41 @@ export async function openImage(dataUri: string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function openFile(absolutePath: string) {
|
interface OpenFileOptions {
|
||||||
try {
|
create?: boolean;
|
||||||
const uri = vscode.Uri.file(absolutePath)
|
content?: string;
|
||||||
|
}
|
||||||
|
|
||||||
// Check if the document is already open in a tab group that's not in the active editor's column. If it is, then close it (if not dirty) so that we don't duplicate tabs
|
export async function openFile(filePath: string, options: OpenFileOptions = {}) {
|
||||||
|
try {
|
||||||
|
// Get workspace root
|
||||||
|
const workspaceRoot = vscode.workspace.workspaceFolders?.[0]?.uri.fsPath
|
||||||
|
if (!workspaceRoot) {
|
||||||
|
throw new Error('No workspace root found')
|
||||||
|
}
|
||||||
|
|
||||||
|
// If path starts with ./, resolve it relative to workspace root
|
||||||
|
const fullPath = filePath.startsWith('./') ?
|
||||||
|
path.join(workspaceRoot, filePath.slice(2)) :
|
||||||
|
filePath
|
||||||
|
|
||||||
|
const uri = vscode.Uri.file(fullPath)
|
||||||
|
|
||||||
|
// Check if file exists
|
||||||
|
try {
|
||||||
|
await vscode.workspace.fs.stat(uri)
|
||||||
|
} catch {
|
||||||
|
// File doesn't exist
|
||||||
|
if (!options.create) {
|
||||||
|
throw new Error('File does not exist')
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create with provided content or empty string
|
||||||
|
const content = options.content || ''
|
||||||
|
await vscode.workspace.fs.writeFile(uri, Buffer.from(content, 'utf8'))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the document is already open in a tab group that's not in the active editor's column
|
||||||
try {
|
try {
|
||||||
for (const group of vscode.window.tabGroups.all) {
|
for (const group of vscode.window.tabGroups.all) {
|
||||||
const existingTab = group.tabs.find(
|
const existingTab = group.tabs.find(
|
||||||
@@ -47,6 +77,10 @@ export async function openFile(absolutePath: string) {
|
|||||||
const document = await vscode.workspace.openTextDocument(uri)
|
const document = await vscode.workspace.openTextDocument(uri)
|
||||||
await vscode.window.showTextDocument(document, { preview: false })
|
await vscode.window.showTextDocument(document, { preview: false })
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
vscode.window.showErrorMessage(`Could not open file!`)
|
if (error instanceof Error) {
|
||||||
|
vscode.window.showErrorMessage(`Could not open file: ${error.message}`)
|
||||||
|
} else {
|
||||||
|
vscode.window.showErrorMessage(`Could not open file!`)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import { ApiConfiguration, ApiProvider, ModelInfo } from "./api"
|
|||||||
import { HistoryItem } from "./HistoryItem"
|
import { HistoryItem } from "./HistoryItem"
|
||||||
import { McpServer } from "./mcp"
|
import { McpServer } from "./mcp"
|
||||||
import { GitCommit } from "../utils/git"
|
import { GitCommit } from "../utils/git"
|
||||||
import { Mode } from "../core/prompts/types"
|
import { Mode, CustomPrompts } from "./modes"
|
||||||
|
|
||||||
// webview will hold state
|
// webview will hold state
|
||||||
export interface ExtensionMessage {
|
export interface ExtensionMessage {
|
||||||
@@ -25,12 +25,15 @@ export interface ExtensionMessage {
|
|||||||
| "enhancedPrompt"
|
| "enhancedPrompt"
|
||||||
| "commitSearchResults"
|
| "commitSearchResults"
|
||||||
| "listApiConfig"
|
| "listApiConfig"
|
||||||
|
| "updatePrompt"
|
||||||
|
| "systemPrompt"
|
||||||
text?: string
|
text?: string
|
||||||
action?:
|
action?:
|
||||||
| "chatButtonClicked"
|
| "chatButtonClicked"
|
||||||
| "mcpButtonClicked"
|
| "mcpButtonClicked"
|
||||||
| "settingsButtonClicked"
|
| "settingsButtonClicked"
|
||||||
| "historyButtonClicked"
|
| "historyButtonClicked"
|
||||||
|
| "promptsButtonClicked"
|
||||||
| "didBecomeVisible"
|
| "didBecomeVisible"
|
||||||
invoke?: "sendMessage" | "primaryButtonClick" | "secondaryButtonClick"
|
invoke?: "sendMessage" | "primaryButtonClick" | "secondaryButtonClick"
|
||||||
state?: ExtensionState
|
state?: ExtensionState
|
||||||
@@ -45,6 +48,7 @@ export interface ExtensionMessage {
|
|||||||
mcpServers?: McpServer[]
|
mcpServers?: McpServer[]
|
||||||
commits?: GitCommit[]
|
commits?: GitCommit[]
|
||||||
listApiConfig?: ApiConfigMeta[]
|
listApiConfig?: ApiConfigMeta[]
|
||||||
|
mode?: Mode
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ApiConfigMeta {
|
export interface ApiConfigMeta {
|
||||||
@@ -62,6 +66,7 @@ export interface ExtensionState {
|
|||||||
currentApiConfigName?: string
|
currentApiConfigName?: string
|
||||||
listApiConfigMeta?: ApiConfigMeta[]
|
listApiConfigMeta?: ApiConfigMeta[]
|
||||||
customInstructions?: string
|
customInstructions?: string
|
||||||
|
customPrompts?: CustomPrompts
|
||||||
alwaysAllowReadOnly?: boolean
|
alwaysAllowReadOnly?: boolean
|
||||||
alwaysAllowWrite?: boolean
|
alwaysAllowWrite?: boolean
|
||||||
alwaysAllowExecute?: boolean
|
alwaysAllowExecute?: boolean
|
||||||
@@ -82,7 +87,8 @@ export interface ExtensionState {
|
|||||||
terminalOutputLineLimit?: number
|
terminalOutputLineLimit?: number
|
||||||
mcpEnabled: boolean
|
mcpEnabled: boolean
|
||||||
mode: Mode
|
mode: Mode
|
||||||
modeApiConfigs?: Record<Mode, string>;
|
modeApiConfigs?: Record<Mode, string>
|
||||||
|
enhancementApiConfigId?: string
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ClineMessage {
|
export interface ClineMessage {
|
||||||
|
|||||||
@@ -1,4 +1,7 @@
|
|||||||
import { ApiConfiguration, ApiProvider } from "./api"
|
import { ApiConfiguration, ApiProvider } from "./api"
|
||||||
|
import { Mode, PromptComponent } from "./modes"
|
||||||
|
|
||||||
|
export type PromptMode = Mode | 'enhance'
|
||||||
|
|
||||||
export type AudioType = "notification" | "celebration" | "progress_loop"
|
export type AudioType = "notification" | "celebration" | "progress_loop"
|
||||||
|
|
||||||
@@ -62,6 +65,11 @@ export interface WebviewMessage {
|
|||||||
| "requestDelaySeconds"
|
| "requestDelaySeconds"
|
||||||
| "setApiConfigPassword"
|
| "setApiConfigPassword"
|
||||||
| "mode"
|
| "mode"
|
||||||
|
| "updatePrompt"
|
||||||
|
| "updateEnhancedPrompt"
|
||||||
|
| "getSystemPrompt"
|
||||||
|
| "systemPrompt"
|
||||||
|
| "enhancementApiConfigId"
|
||||||
text?: string
|
text?: string
|
||||||
disabled?: boolean
|
disabled?: boolean
|
||||||
askResponse?: ClineAskResponse
|
askResponse?: ClineAskResponse
|
||||||
@@ -74,6 +82,9 @@ export interface WebviewMessage {
|
|||||||
serverName?: string
|
serverName?: string
|
||||||
toolName?: string
|
toolName?: string
|
||||||
alwaysAllow?: boolean
|
alwaysAllow?: boolean
|
||||||
|
mode?: Mode
|
||||||
|
promptMode?: PromptMode
|
||||||
|
customPrompt?: PromptComponent
|
||||||
dataUrls?: string[]
|
dataUrls?: string[]
|
||||||
values?: Record<string, any>
|
values?: Record<string, any>
|
||||||
query?: string
|
query?: string
|
||||||
|
|||||||
@@ -3,3 +3,28 @@ export const architectMode = 'architect' as const;
|
|||||||
export const askMode = 'ask' as const;
|
export const askMode = 'ask' as const;
|
||||||
|
|
||||||
export type Mode = typeof codeMode | typeof architectMode | typeof askMode;
|
export type Mode = typeof codeMode | typeof architectMode | typeof askMode;
|
||||||
|
|
||||||
|
export type PromptComponent = {
|
||||||
|
roleDefinition?: string;
|
||||||
|
customInstructions?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export type CustomPrompts = {
|
||||||
|
ask?: PromptComponent;
|
||||||
|
code?: PromptComponent;
|
||||||
|
architect?: PromptComponent;
|
||||||
|
enhance?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const defaultPrompts = {
|
||||||
|
[askMode]: {
|
||||||
|
roleDefinition: "You are Cline, a knowledgeable technical assistant focused on answering questions and providing information about software development, technology, and related topics. You can analyze code, explain concepts, and access external resources while maintaining a read-only approach to the codebase. Make sure to answer the user's questions and don't rush to switch to implementing code.",
|
||||||
|
},
|
||||||
|
[codeMode]: {
|
||||||
|
roleDefinition: "You are Cline, a highly skilled software engineer with extensive knowledge in many programming languages, frameworks, design patterns, and best practices.",
|
||||||
|
},
|
||||||
|
[architectMode]: {
|
||||||
|
roleDefinition: "You are Cline, a software architecture expert specializing in analyzing codebases, identifying patterns, and providing high-level technical guidance. You excel at understanding complex systems, evaluating architectural decisions, and suggesting improvements while maintaining a read-only approach to the codebase. Make sure to help the user come up with a solid implementation plan for their project and don't rush to switch to implementing code.",
|
||||||
|
},
|
||||||
|
enhance: "Generate an enhanced version of this prompt (reply with only the enhanced prompt - no conversation, explanations, lead-in, bullet points, placeholders, or surrounding quotes):"
|
||||||
|
} as const;
|
||||||
@@ -1,80 +1,126 @@
|
|||||||
import { enhancePrompt } from '../enhance-prompt'
|
import { enhancePrompt } from '../enhance-prompt'
|
||||||
import { buildApiHandler } from '../../api'
|
|
||||||
import { ApiConfiguration } from '../../shared/api'
|
import { ApiConfiguration } from '../../shared/api'
|
||||||
import { OpenRouterHandler } from '../../api/providers/openrouter'
|
import { buildApiHandler, SingleCompletionHandler } from '../../api'
|
||||||
|
import { defaultPrompts } from '../../shared/modes'
|
||||||
|
|
||||||
// Mock the buildApiHandler function
|
// Mock the API handler
|
||||||
jest.mock('../../api', () => ({
|
jest.mock('../../api', () => ({
|
||||||
buildApiHandler: jest.fn()
|
buildApiHandler: jest.fn()
|
||||||
}))
|
}))
|
||||||
|
|
||||||
describe('enhancePrompt', () => {
|
describe('enhancePrompt', () => {
|
||||||
const mockApiConfig: ApiConfiguration = {
|
const mockApiConfig: ApiConfiguration = {
|
||||||
apiProvider: 'openrouter',
|
apiProvider: 'openai',
|
||||||
apiKey: 'test-key',
|
openAiApiKey: 'test-key',
|
||||||
openRouterApiKey: 'test-key',
|
openAiBaseUrl: 'https://api.openai.com/v1'
|
||||||
openRouterModelId: 'test-model'
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Create a mock handler that looks like OpenRouterHandler
|
beforeEach(() => {
|
||||||
const mockHandler = {
|
jest.clearAllMocks()
|
||||||
completePrompt: jest.fn(),
|
|
||||||
createMessage: jest.fn(),
|
|
||||||
getModel: jest.fn()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make instanceof check work
|
// Mock the API handler with a completePrompt method
|
||||||
Object.setPrototypeOf(mockHandler, OpenRouterHandler.prototype)
|
;(buildApiHandler as jest.Mock).mockReturnValue({
|
||||||
|
completePrompt: jest.fn().mockResolvedValue('Enhanced prompt'),
|
||||||
beforeEach(() => {
|
createMessage: jest.fn(),
|
||||||
jest.clearAllMocks()
|
getModel: jest.fn().mockReturnValue({
|
||||||
;(buildApiHandler as jest.Mock).mockReturnValue(mockHandler)
|
id: 'test-model',
|
||||||
})
|
info: {
|
||||||
|
maxTokens: 4096,
|
||||||
it('should throw error for non-OpenRouter providers', async () => {
|
contextWindow: 8192,
|
||||||
const nonOpenRouterConfig: ApiConfiguration = {
|
supportsPromptCache: false
|
||||||
apiProvider: 'anthropic',
|
|
||||||
apiKey: 'test-key',
|
|
||||||
apiModelId: 'claude-3'
|
|
||||||
}
|
}
|
||||||
await expect(enhancePrompt(nonOpenRouterConfig, 'test')).rejects.toThrow('Prompt enhancement is only available with OpenRouter')
|
})
|
||||||
|
} as unknown as SingleCompletionHandler)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('enhances prompt using default enhancement prompt when no custom prompt provided', async () => {
|
||||||
|
const result = await enhancePrompt(mockApiConfig, 'Test prompt')
|
||||||
|
|
||||||
|
expect(result).toBe('Enhanced prompt')
|
||||||
|
const handler = buildApiHandler(mockApiConfig)
|
||||||
|
expect((handler as any).completePrompt).toHaveBeenCalledWith(
|
||||||
|
`${defaultPrompts.enhance}\n\nTest prompt`
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('enhances prompt using custom enhancement prompt when provided', async () => {
|
||||||
|
const customEnhancePrompt = 'You are a custom prompt enhancer'
|
||||||
|
|
||||||
|
const result = await enhancePrompt(mockApiConfig, 'Test prompt', customEnhancePrompt)
|
||||||
|
|
||||||
|
expect(result).toBe('Enhanced prompt')
|
||||||
|
const handler = buildApiHandler(mockApiConfig)
|
||||||
|
expect((handler as any).completePrompt).toHaveBeenCalledWith(
|
||||||
|
`${customEnhancePrompt}\n\nTest prompt`
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('throws error for empty prompt input', async () => {
|
||||||
|
await expect(enhancePrompt(mockApiConfig, '')).rejects.toThrow('No prompt text provided')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('throws error for missing API configuration', async () => {
|
||||||
|
await expect(enhancePrompt({} as ApiConfiguration, 'Test prompt')).rejects.toThrow('No valid API configuration provided')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('throws error for API provider that does not support prompt enhancement', async () => {
|
||||||
|
(buildApiHandler as jest.Mock).mockReturnValue({
|
||||||
|
// No completePrompt method
|
||||||
|
createMessage: jest.fn(),
|
||||||
|
getModel: jest.fn().mockReturnValue({
|
||||||
|
id: 'test-model',
|
||||||
|
info: {
|
||||||
|
maxTokens: 4096,
|
||||||
|
contextWindow: 8192,
|
||||||
|
supportsPromptCache: false
|
||||||
|
}
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should enhance a valid prompt', async () => {
|
await expect(enhancePrompt(mockApiConfig, 'Test prompt')).rejects.toThrow('The selected API provider does not support prompt enhancement')
|
||||||
const inputPrompt = 'Write a function to sort an array'
|
})
|
||||||
const enhancedPrompt = 'Write a TypeScript function that implements an efficient sorting algorithm for a generic array, including error handling and type safety'
|
|
||||||
|
|
||||||
mockHandler.completePrompt.mockResolvedValue(enhancedPrompt)
|
it('uses appropriate model based on provider', async () => {
|
||||||
|
const openRouterConfig: ApiConfiguration = {
|
||||||
|
apiProvider: 'openrouter',
|
||||||
|
openRouterApiKey: 'test-key',
|
||||||
|
openRouterModelId: 'test-model'
|
||||||
|
}
|
||||||
|
|
||||||
const result = await enhancePrompt(mockApiConfig, inputPrompt)
|
// Mock successful enhancement
|
||||||
|
;(buildApiHandler as jest.Mock).mockReturnValue({
|
||||||
|
completePrompt: jest.fn().mockResolvedValue('Enhanced prompt'),
|
||||||
|
createMessage: jest.fn(),
|
||||||
|
getModel: jest.fn().mockReturnValue({
|
||||||
|
id: 'test-model',
|
||||||
|
info: {
|
||||||
|
maxTokens: 4096,
|
||||||
|
contextWindow: 8192,
|
||||||
|
supportsPromptCache: false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
} as unknown as SingleCompletionHandler)
|
||||||
|
|
||||||
expect(result).toBe(enhancedPrompt)
|
const result = await enhancePrompt(openRouterConfig, 'Test prompt')
|
||||||
expect(buildApiHandler).toHaveBeenCalledWith(mockApiConfig)
|
|
||||||
expect(mockHandler.completePrompt).toHaveBeenCalledWith(
|
|
||||||
expect.stringContaining(inputPrompt)
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('should throw error when no prompt text is provided', async () => {
|
expect(buildApiHandler).toHaveBeenCalledWith(openRouterConfig)
|
||||||
await expect(enhancePrompt(mockApiConfig, '')).rejects.toThrow('No prompt text provided')
|
expect(result).toBe('Enhanced prompt')
|
||||||
expect(mockHandler.completePrompt).not.toHaveBeenCalled()
|
})
|
||||||
})
|
|
||||||
|
|
||||||
it('should pass through API errors', async () => {
|
it('propagates API errors', async () => {
|
||||||
const inputPrompt = 'Test prompt'
|
(buildApiHandler as jest.Mock).mockReturnValue({
|
||||||
mockHandler.completePrompt.mockRejectedValue('API error')
|
completePrompt: jest.fn().mockRejectedValue(new Error('API Error')),
|
||||||
|
createMessage: jest.fn(),
|
||||||
|
getModel: jest.fn().mockReturnValue({
|
||||||
|
id: 'test-model',
|
||||||
|
info: {
|
||||||
|
maxTokens: 4096,
|
||||||
|
contextWindow: 8192,
|
||||||
|
supportsPromptCache: false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
} as unknown as SingleCompletionHandler)
|
||||||
|
|
||||||
await expect(enhancePrompt(mockApiConfig, inputPrompt)).rejects.toBe('API error')
|
await expect(enhancePrompt(mockApiConfig, 'Test prompt')).rejects.toThrow('API Error')
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should pass the correct prompt format to the API', async () => {
|
|
||||||
const inputPrompt = 'Test prompt'
|
|
||||||
mockHandler.completePrompt.mockResolvedValue('Enhanced test prompt')
|
|
||||||
|
|
||||||
await enhancePrompt(mockApiConfig, inputPrompt)
|
|
||||||
|
|
||||||
expect(mockHandler.completePrompt).toHaveBeenCalledWith(
|
|
||||||
'Generate an enhanced version of this prompt (reply with only the enhanced prompt - no conversation, explanations, lead-in, bullet points, placeholders, or surrounding quotes):\n\nTest prompt'
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
@@ -1,26 +1,27 @@
|
|||||||
import { ApiConfiguration } from "../shared/api"
|
import { ApiConfiguration } from "../shared/api"
|
||||||
import { buildApiHandler } from "../api"
|
import { buildApiHandler, SingleCompletionHandler } from "../api"
|
||||||
import { OpenRouterHandler } from "../api/providers/openrouter"
|
import { defaultPrompts } from "../shared/modes"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enhances a prompt using the OpenRouter API without creating a full Cline instance or task history.
|
* Enhances a prompt using the configured API without creating a full Cline instance or task history.
|
||||||
* This is a lightweight alternative that only uses the API's completion functionality.
|
* This is a lightweight alternative that only uses the API's completion functionality.
|
||||||
*/
|
*/
|
||||||
export async function enhancePrompt(apiConfiguration: ApiConfiguration, promptText: string): Promise<string> {
|
export async function enhancePrompt(apiConfiguration: ApiConfiguration, promptText: string, enhancePrompt?: string): Promise<string> {
|
||||||
if (!promptText) {
|
if (!promptText) {
|
||||||
throw new Error("No prompt text provided")
|
throw new Error("No prompt text provided")
|
||||||
}
|
}
|
||||||
if (apiConfiguration.apiProvider !== "openrouter") {
|
if (!apiConfiguration || !apiConfiguration.apiProvider) {
|
||||||
throw new Error("Prompt enhancement is only available with OpenRouter")
|
throw new Error("No valid API configuration provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
const handler = buildApiHandler(apiConfiguration)
|
const handler = buildApiHandler(apiConfiguration)
|
||||||
|
|
||||||
// Type guard to check if handler is OpenRouterHandler
|
// Check if handler supports single completions
|
||||||
if (!(handler instanceof OpenRouterHandler)) {
|
if (!('completePrompt' in handler)) {
|
||||||
throw new Error("Expected OpenRouter handler")
|
throw new Error("The selected API provider does not support prompt enhancement")
|
||||||
}
|
}
|
||||||
|
|
||||||
const prompt = `Generate an enhanced version of this prompt (reply with only the enhanced prompt - no conversation, explanations, lead-in, bullet points, placeholders, or surrounding quotes):\n\n${promptText}`
|
const enhancePromptText = enhancePrompt ?? defaultPrompts.enhance
|
||||||
return handler.completePrompt(prompt)
|
const prompt = `${enhancePromptText}\n\n${promptText}`
|
||||||
|
return (handler as SingleCompletionHandler).completePrompt(prompt)
|
||||||
}
|
}
|
||||||
22
webview-ui/config-overrides.js
Normal file
22
webview-ui/config-overrides.js
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
const { override } = require('customize-cra');
|
||||||
|
|
||||||
|
module.exports = override();
|
||||||
|
|
||||||
|
// Jest configuration override
|
||||||
|
module.exports.jest = function(config) {
|
||||||
|
// Configure reporters
|
||||||
|
config.reporters = [["jest-simple-dot-reporter", {}]];
|
||||||
|
|
||||||
|
// Configure module name mapper for CSS modules
|
||||||
|
config.moduleNameMapper = {
|
||||||
|
...config.moduleNameMapper,
|
||||||
|
"\\.(css|less|scss|sass)$": "identity-obj-proxy"
|
||||||
|
};
|
||||||
|
|
||||||
|
// Configure transform ignore patterns for ES modules
|
||||||
|
config.transformIgnorePatterns = [
|
||||||
|
'/node_modules/(?!(rehype-highlight|react-remark|unist-util-visit|unist-util-find-after|vfile|unified|bail|is-plain-obj|trough|vfile-message|unist-util-stringify-position|mdast-util-from-markdown|mdast-util-to-string|micromark|decode-named-character-reference|character-entities|markdown-table|zwitch|longest-streak|escape-string-regexp|unist-util-is|hast-util-to-text|@vscode/webview-ui-toolkit|@microsoft/fast-react-wrapper|@microsoft/fast-element|@microsoft/fast-foundation|@microsoft/fast-web-utilities|exenv-es6)/)'
|
||||||
|
];
|
||||||
|
|
||||||
|
return config;
|
||||||
|
}
|
||||||
50
webview-ui/package-lock.json
generated
50
webview-ui/package-lock.json
generated
@@ -37,7 +37,10 @@
|
|||||||
"@babel/plugin-proposal-private-property-in-object": "^7.21.11",
|
"@babel/plugin-proposal-private-property-in-object": "^7.21.11",
|
||||||
"@types/shell-quote": "^1.7.5",
|
"@types/shell-quote": "^1.7.5",
|
||||||
"@types/vscode-webview": "^1.57.5",
|
"@types/vscode-webview": "^1.57.5",
|
||||||
"eslint": "^8.57.0"
|
"customize-cra": "^1.0.0",
|
||||||
|
"eslint": "^8.57.0",
|
||||||
|
"jest-simple-dot-reporter": "^1.0.5",
|
||||||
|
"react-app-rewired": "^2.2.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@adobe/css-tools": {
|
"node_modules/@adobe/css-tools": {
|
||||||
@@ -5624,6 +5627,15 @@
|
|||||||
"version": "3.1.3",
|
"version": "3.1.3",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
|
"node_modules/customize-cra": {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/customize-cra/-/customize-cra-1.0.0.tgz",
|
||||||
|
"integrity": "sha512-DbtaLuy59224U+xCiukkxSq8clq++MOtJ1Et7LED1fLszWe88EoblEYFBJ895sB1mC6B4uu3xPT/IjClELhMbA==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"lodash.flow": "^3.5.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/damerau-levenshtein": {
|
"node_modules/damerau-levenshtein": {
|
||||||
"version": "1.0.8",
|
"version": "1.0.8",
|
||||||
"license": "BSD-2-Clause"
|
"license": "BSD-2-Clause"
|
||||||
@@ -9257,6 +9269,12 @@
|
|||||||
"node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0"
|
"node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/jest-simple-dot-reporter": {
|
||||||
|
"version": "1.0.5",
|
||||||
|
"resolved": "https://registry.npmjs.org/jest-simple-dot-reporter/-/jest-simple-dot-reporter-1.0.5.tgz",
|
||||||
|
"integrity": "sha512-cZLFG/C7k0+WYoIGGuGXKm0vmJiXlWG/m3uCZ4RaMPYxt8lxjdXMLHYkxXaQ7gVWaSPe7uAPCEUcRxthC5xskg==",
|
||||||
|
"dev": true
|
||||||
|
},
|
||||||
"node_modules/jest-snapshot": {
|
"node_modules/jest-snapshot": {
|
||||||
"version": "27.5.1",
|
"version": "27.5.1",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
@@ -9896,6 +9914,12 @@
|
|||||||
"version": "4.0.8",
|
"version": "4.0.8",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
|
"node_modules/lodash.flow": {
|
||||||
|
"version": "3.5.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/lodash.flow/-/lodash.flow-3.5.0.tgz",
|
||||||
|
"integrity": "sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw==",
|
||||||
|
"dev": true
|
||||||
|
},
|
||||||
"node_modules/lodash.memoize": {
|
"node_modules/lodash.memoize": {
|
||||||
"version": "4.1.2",
|
"version": "4.1.2",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
@@ -12269,6 +12293,30 @@
|
|||||||
"version": "0.13.11",
|
"version": "0.13.11",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
|
"node_modules/react-app-rewired": {
|
||||||
|
"version": "2.2.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/react-app-rewired/-/react-app-rewired-2.2.1.tgz",
|
||||||
|
"integrity": "sha512-uFQWTErXeLDrMzOJHKp0h8P1z0LV9HzPGsJ6adOtGlA/B9WfT6Shh4j2tLTTGlXOfiVx6w6iWpp7SOC5pvk+gA==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"semver": "^5.6.0"
|
||||||
|
},
|
||||||
|
"bin": {
|
||||||
|
"react-app-rewired": "bin/index.js"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"react-scripts": ">=2.1.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/react-app-rewired/node_modules/semver": {
|
||||||
|
"version": "5.7.2",
|
||||||
|
"resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz",
|
||||||
|
"integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==",
|
||||||
|
"dev": true,
|
||||||
|
"bin": {
|
||||||
|
"semver": "bin/semver"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/react-dev-utils": {
|
"node_modules/react-dev-utils": {
|
||||||
"version": "12.0.1",
|
"version": "12.0.1",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
|
|||||||
@@ -29,9 +29,9 @@
|
|||||||
"web-vitals": "^2.1.4"
|
"web-vitals": "^2.1.4"
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"start": "react-scripts start",
|
"start": "react-app-rewired start",
|
||||||
"build": "node ./scripts/build-react-no-split.js",
|
"build": "node ./scripts/build-react-no-split.js",
|
||||||
"test": "react-scripts test --watchAll=false",
|
"test": "react-app-rewired test --watchAll=false",
|
||||||
"eject": "react-scripts eject",
|
"eject": "react-scripts eject",
|
||||||
"lint": "eslint src --ext ts,tsx"
|
"lint": "eslint src --ext ts,tsx"
|
||||||
},
|
},
|
||||||
@@ -57,14 +57,9 @@
|
|||||||
"@babel/plugin-proposal-private-property-in-object": "^7.21.11",
|
"@babel/plugin-proposal-private-property-in-object": "^7.21.11",
|
||||||
"@types/shell-quote": "^1.7.5",
|
"@types/shell-quote": "^1.7.5",
|
||||||
"@types/vscode-webview": "^1.57.5",
|
"@types/vscode-webview": "^1.57.5",
|
||||||
"eslint": "^8.57.0"
|
"customize-cra": "^1.0.0",
|
||||||
},
|
"eslint": "^8.57.0",
|
||||||
"jest": {
|
"jest-simple-dot-reporter": "^1.0.5",
|
||||||
"transformIgnorePatterns": [
|
"react-app-rewired": "^2.2.1"
|
||||||
"/node_modules/(?!(rehype-highlight|react-remark|unist-util-visit|unist-util-find-after|vfile|unified|bail|is-plain-obj|trough|vfile-message|unist-util-stringify-position|mdast-util-from-markdown|mdast-util-to-string|micromark|decode-named-character-reference|character-entities|markdown-table|zwitch|longest-streak|escape-string-regexp|unist-util-is|hast-util-to-text|@vscode/webview-ui-toolkit|@microsoft/fast-react-wrapper|@microsoft/fast-element|@microsoft/fast-foundation|@microsoft/fast-web-utilities|exenv-es6)/)"
|
|
||||||
],
|
|
||||||
"moduleNameMapper": {
|
|
||||||
"\\.(css|less|scss|sass)$": "identity-obj-proxy"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,12 +8,14 @@ import WelcomeView from "./components/welcome/WelcomeView"
|
|||||||
import { ExtensionStateContextProvider, useExtensionState } from "./context/ExtensionStateContext"
|
import { ExtensionStateContextProvider, useExtensionState } from "./context/ExtensionStateContext"
|
||||||
import { vscode } from "./utils/vscode"
|
import { vscode } from "./utils/vscode"
|
||||||
import McpView from "./components/mcp/McpView"
|
import McpView from "./components/mcp/McpView"
|
||||||
|
import PromptsView from "./components/prompts/PromptsView"
|
||||||
|
|
||||||
const AppContent = () => {
|
const AppContent = () => {
|
||||||
const { didHydrateState, showWelcome, shouldShowAnnouncement } = useExtensionState()
|
const { didHydrateState, showWelcome, shouldShowAnnouncement } = useExtensionState()
|
||||||
const [showSettings, setShowSettings] = useState(false)
|
const [showSettings, setShowSettings] = useState(false)
|
||||||
const [showHistory, setShowHistory] = useState(false)
|
const [showHistory, setShowHistory] = useState(false)
|
||||||
const [showMcp, setShowMcp] = useState(false)
|
const [showMcp, setShowMcp] = useState(false)
|
||||||
|
const [showPrompts, setShowPrompts] = useState(false)
|
||||||
const [showAnnouncement, setShowAnnouncement] = useState(false)
|
const [showAnnouncement, setShowAnnouncement] = useState(false)
|
||||||
|
|
||||||
const handleMessage = useCallback((e: MessageEvent) => {
|
const handleMessage = useCallback((e: MessageEvent) => {
|
||||||
@@ -25,21 +27,31 @@ const AppContent = () => {
|
|||||||
setShowSettings(true)
|
setShowSettings(true)
|
||||||
setShowHistory(false)
|
setShowHistory(false)
|
||||||
setShowMcp(false)
|
setShowMcp(false)
|
||||||
|
setShowPrompts(false)
|
||||||
break
|
break
|
||||||
case "historyButtonClicked":
|
case "historyButtonClicked":
|
||||||
setShowSettings(false)
|
setShowSettings(false)
|
||||||
setShowHistory(true)
|
setShowHistory(true)
|
||||||
setShowMcp(false)
|
setShowMcp(false)
|
||||||
|
setShowPrompts(false)
|
||||||
break
|
break
|
||||||
case "mcpButtonClicked":
|
case "mcpButtonClicked":
|
||||||
setShowSettings(false)
|
setShowSettings(false)
|
||||||
setShowHistory(false)
|
setShowHistory(false)
|
||||||
setShowMcp(true)
|
setShowMcp(true)
|
||||||
|
setShowPrompts(false)
|
||||||
|
break
|
||||||
|
case "promptsButtonClicked":
|
||||||
|
setShowSettings(false)
|
||||||
|
setShowHistory(false)
|
||||||
|
setShowMcp(false)
|
||||||
|
setShowPrompts(true)
|
||||||
break
|
break
|
||||||
case "chatButtonClicked":
|
case "chatButtonClicked":
|
||||||
setShowSettings(false)
|
setShowSettings(false)
|
||||||
setShowHistory(false)
|
setShowHistory(false)
|
||||||
setShowMcp(false)
|
setShowMcp(false)
|
||||||
|
setShowPrompts(false)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
@@ -68,14 +80,16 @@ const AppContent = () => {
|
|||||||
{showSettings && <SettingsView onDone={() => setShowSettings(false)} />}
|
{showSettings && <SettingsView onDone={() => setShowSettings(false)} />}
|
||||||
{showHistory && <HistoryView onDone={() => setShowHistory(false)} />}
|
{showHistory && <HistoryView onDone={() => setShowHistory(false)} />}
|
||||||
{showMcp && <McpView onDone={() => setShowMcp(false)} />}
|
{showMcp && <McpView onDone={() => setShowMcp(false)} />}
|
||||||
|
{showPrompts && <PromptsView onDone={() => setShowPrompts(false)} />}
|
||||||
{/* Do not conditionally load ChatView, it's expensive and there's state we don't want to lose (user input, disableInput, askResponse promise, etc.) */}
|
{/* Do not conditionally load ChatView, it's expensive and there's state we don't want to lose (user input, disableInput, askResponse promise, etc.) */}
|
||||||
<ChatView
|
<ChatView
|
||||||
showHistoryView={() => {
|
showHistoryView={() => {
|
||||||
setShowSettings(false)
|
setShowSettings(false)
|
||||||
setShowMcp(false)
|
setShowMcp(false)
|
||||||
|
setShowPrompts(false)
|
||||||
setShowHistory(true)
|
setShowHistory(true)
|
||||||
}}
|
}}
|
||||||
isHidden={showSettings || showHistory || showMcp}
|
isHidden={showSettings || showHistory || showMcp || showPrompts}
|
||||||
showAnnouncement={showAnnouncement}
|
showAnnouncement={showAnnouncement}
|
||||||
hideAnnouncement={() => {
|
hideAnnouncement={() => {
|
||||||
setShowAnnouncement(false)
|
setShowAnnouncement(false)
|
||||||
|
|||||||
@@ -29,100 +29,39 @@ const Announcement = ({ version, hideAnnouncement }: AnnouncementProps) => {
|
|||||||
style={{ position: "absolute", top: "8px", right: "8px" }}>
|
style={{ position: "absolute", top: "8px", right: "8px" }}>
|
||||||
<span className="codicon codicon-close"></span>
|
<span className="codicon codicon-close"></span>
|
||||||
</VSCodeButton>
|
</VSCodeButton>
|
||||||
|
<h2 style={{ margin: "0 0 8px" }}>
|
||||||
|
🎉{" "}Introducing Roo Cline v{minorVersion}
|
||||||
|
</h2>
|
||||||
|
|
||||||
<h3 style={{ margin: "0 0 8px" }}>
|
<h3 style={{ margin: "0 0 8px" }}>
|
||||||
🎉{" "}New in Cline v{minorVersion}
|
Agent Modes Customization
|
||||||
</h3>
|
</h3>
|
||||||
<p style={{ margin: "5px 0px", fontWeight: "bold" }}>Add custom tools to Cline using MCP!</p>
|
|
||||||
<p style={{ margin: "5px 0px" }}>
|
<p style={{ margin: "5px 0px" }}>
|
||||||
The Model Context Protocol allows agents like Cline to plug and play custom tools,{" "}
|
Click the new <span className="codicon codicon-notebook" style={{ fontSize: "10px" }}></span> icon in the menu bar to open the Prompts Settings and customize Agent Modes for new levels of productivity.
|
||||||
<VSCodeLink href="https://github.com/modelcontextprotocol/servers" style={{ display: "inline" }}>
|
|
||||||
e.g. a web-search tool or GitHub tool.
|
|
||||||
</VSCodeLink>
|
|
||||||
</p>
|
|
||||||
<p style={{ margin: "5px 0px" }}>
|
|
||||||
You can add and configure MCP servers by clicking the new{" "}
|
|
||||||
<span className="codicon codicon-server" style={{ fontSize: "10px" }}></span> icon in the menu bar.
|
|
||||||
</p>
|
|
||||||
<p style={{ margin: "5px 0px" }}>
|
|
||||||
To take things a step further, Cline also has the ability to create custom tools for himself. Just say
|
|
||||||
"add a tool that..." and watch as he builds and installs new capabilities specific to{" "}
|
|
||||||
<i>your workflow</i>. For example:
|
|
||||||
<ul style={{ margin: "4px 0 6px 20px", padding: 0 }}>
|
<ul style={{ margin: "4px 0 6px 20px", padding: 0 }}>
|
||||||
<li>"...fetches Jira tickets": Get ticket ACs and put Cline to work</li>
|
<li>Tailor how Roo Cline behaves in different modes: Code, Architect, and Ask.</li>
|
||||||
<li>"...manages AWS EC2s": Check server metrics and scale up or down</li>
|
<li>Preview and verify your changes using the Preview System Prompt button.</li>
|
||||||
<li>"...pulls PagerDuty incidents": Pulls details to help Cline fix bugs</li>
|
|
||||||
</ul>
|
</ul>
|
||||||
Cline handles everything from creating the MCP server to installing it in the extension, ready to use in
|
|
||||||
future tasks. The servers are saved to <code>~/Documents/Cline/MCP</code> so you can easily share them
|
|
||||||
with others too.{" "}
|
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<h3 style={{ margin: "0 0 8px" }}>
|
||||||
|
Prompt Enhancement Configuration
|
||||||
|
</h3>
|
||||||
<p style={{ margin: "5px 0px" }}>
|
<p style={{ margin: "5px 0px" }}>
|
||||||
Try it yourself by asking Cline to "add a tool that gets the latest npm docs", or
|
Now available for all providers! Access it directly in the chat box by clicking the <span className="codicon codicon-sparkle" style={{ fontSize: "10px" }}></span> sparkle icon next to the input field. From there, you can customize the enhancement logic and provider to best suit your workflow.
|
||||||
<VSCodeLink href="https://x.com/sdrzn/status/1867271665086074969" style={{ display: "inline" }}>
|
<ul style={{ margin: "4px 0 6px 20px", padding: 0 }}>
|
||||||
see a demo of MCP in action here.
|
<li>Customize how prompts are enhanced for better results in your workflow.</li>
|
||||||
</VSCodeLink>
|
<li>Use the sparkle icon in the chat box to select a API configuration and provider (e.g., GPT-4) and configure your own enhancement logic.</li>
|
||||||
|
<li>Test your changes instantly with the Preview Prompt Enhancement tool.</li>
|
||||||
|
</ul>
|
||||||
</p>
|
</p>
|
||||||
{/*<ul style={{ margin: "0 0 8px", paddingLeft: "12px" }}>
|
|
||||||
<li>
|
<p style={{ margin: "5px 0px" }}>
|
||||||
OpenRouter now supports prompt caching! They also have much higher rate limits than other providers,
|
We're very excited to see what you build with this new feature! Join us at
|
||||||
so I recommend trying them out.
|
<VSCodeLink href="https://www.reddit.com/r/roocline" style={{ display: "inline" }}>
|
||||||
<br />
|
reddit.com/r/roocline
|
||||||
{!apiConfiguration?.openRouterApiKey && (
|
|
||||||
<VSCodeButtonLink
|
|
||||||
href={getOpenRouterAuthUrl(vscodeUriScheme)}
|
|
||||||
style={{
|
|
||||||
transform: "scale(0.85)",
|
|
||||||
transformOrigin: "left center",
|
|
||||||
margin: "4px -30px 2px 0",
|
|
||||||
}}>
|
|
||||||
Get OpenRouter API Key
|
|
||||||
</VSCodeButtonLink>
|
|
||||||
)}
|
|
||||||
{apiConfiguration?.openRouterApiKey && apiConfiguration?.apiProvider !== "openrouter" && (
|
|
||||||
<VSCodeButton
|
|
||||||
onClick={() => {
|
|
||||||
vscode.postMessage({
|
|
||||||
type: "apiConfiguration",
|
|
||||||
apiConfiguration: { ...apiConfiguration, apiProvider: "openrouter" },
|
|
||||||
})
|
|
||||||
}}
|
|
||||||
style={{
|
|
||||||
transform: "scale(0.85)",
|
|
||||||
transformOrigin: "left center",
|
|
||||||
margin: "4px -30px 2px 0",
|
|
||||||
}}>
|
|
||||||
Switch to OpenRouter
|
|
||||||
</VSCodeButton>
|
|
||||||
)}
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<b>Edit Cline's changes before accepting!</b> When he creates or edits a file, you can modify his
|
|
||||||
changes directly in the right side of the diff view (+ hover over the 'Revert Block' arrow button in
|
|
||||||
the center to undo "<code>{"// rest of code here"}</code>" shenanigans)
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
New <code>search_files</code> tool that lets Cline perform regex searches in your project, letting
|
|
||||||
him refactor code, address TODOs and FIXMEs, remove dead code, and more!
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
When Cline runs commands, you can now type directly in the terminal (+ support for Python
|
|
||||||
environments)
|
|
||||||
</li>
|
|
||||||
</ul>*/}
|
|
||||||
<div
|
|
||||||
style={{
|
|
||||||
height: "1px",
|
|
||||||
background: "var(--vscode-foreground)",
|
|
||||||
opacity: 0.1,
|
|
||||||
margin: "8px 0",
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
<p style={{ margin: "0" }}>
|
|
||||||
Join
|
|
||||||
<VSCodeLink style={{ display: "inline" }} href="https://discord.gg/cline">
|
|
||||||
discord.gg/cline
|
|
||||||
</VSCodeLink>
|
</VSCodeLink>
|
||||||
for more updates!
|
to discuss and share feedback.
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
|
|||||||
},
|
},
|
||||||
ref,
|
ref,
|
||||||
) => {
|
) => {
|
||||||
const { filePaths, apiConfiguration, currentApiConfigName, listApiConfigMeta } = useExtensionState()
|
const { filePaths, currentApiConfigName, listApiConfigMeta } = useExtensionState()
|
||||||
const [isTextAreaFocused, setIsTextAreaFocused] = useState(false)
|
const [isTextAreaFocused, setIsTextAreaFocused] = useState(false)
|
||||||
const [gitCommits, setGitCommits] = useState<any[]>([])
|
const [gitCommits, setGitCommits] = useState<any[]>([])
|
||||||
const [showDropdown, setShowDropdown] = useState(false)
|
const [showDropdown, setShowDropdown] = useState(false)
|
||||||
@@ -69,8 +69,10 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
|
|||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const messageHandler = (event: MessageEvent) => {
|
const messageHandler = (event: MessageEvent) => {
|
||||||
const message = event.data
|
const message = event.data
|
||||||
if (message.type === 'enhancedPrompt' && message.text) {
|
if (message.type === 'enhancedPrompt') {
|
||||||
setInputValue(message.text)
|
if (message.text) {
|
||||||
|
setInputValue(message.text)
|
||||||
|
}
|
||||||
setIsEnhancingPrompt(false)
|
setIsEnhancingPrompt(false)
|
||||||
} else if (message.type === 'commitSearchResults') {
|
} else if (message.type === 'commitSearchResults') {
|
||||||
const commits = message.commits.map((commit: any) => ({
|
const commits = message.commits.map((commit: any) => ({
|
||||||
@@ -652,6 +654,7 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
|
|||||||
borderBottom: `${thumbnailsHeight + 6}px solid transparent`,
|
borderBottom: `${thumbnailsHeight + 6}px solid transparent`,
|
||||||
borderColor: "transparent",
|
borderColor: "transparent",
|
||||||
padding: "9px 9px 25px 9px",
|
padding: "9px 9px 25px 9px",
|
||||||
|
marginBottom: "15px",
|
||||||
cursor: textAreaDisabled ? "not-allowed" : undefined,
|
cursor: textAreaDisabled ? "not-allowed" : undefined,
|
||||||
flex: 1,
|
flex: 1,
|
||||||
zIndex: 1,
|
zIndex: 1,
|
||||||
@@ -766,19 +769,25 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
|
|||||||
</div>
|
</div>
|
||||||
<div className="button-row" style={{ position: "absolute", right: 16, display: "flex", alignItems: "center", height: 31, bottom: 11, zIndex: 3, padding: "0 8px", justifyContent: "flex-end", backgroundColor: "var(--vscode-input-background)", }}>
|
<div className="button-row" style={{ position: "absolute", right: 16, display: "flex", alignItems: "center", height: 31, bottom: 11, zIndex: 3, padding: "0 8px", justifyContent: "flex-end", backgroundColor: "var(--vscode-input-background)", }}>
|
||||||
<span style={{ display: "flex", alignItems: "center", gap: 12 }}>
|
<span style={{ display: "flex", alignItems: "center", gap: 12 }}>
|
||||||
{apiConfiguration?.apiProvider === "openrouter" && (
|
<div style={{ display: "flex", alignItems: "center" }}>
|
||||||
<div style={{ display: "flex", alignItems: "center" }}>
|
{isEnhancingPrompt ? (
|
||||||
{isEnhancingPrompt && <span style={{ marginRight: 10, color: "var(--vscode-input-foreground)", opacity: 0.5 }}>Enhancing prompt...</span>}
|
<span className="codicon codicon-loading codicon-modifier-spin" style={{
|
||||||
<span
|
color: "var(--vscode-input-foreground)",
|
||||||
role="button"
|
opacity: 0.5,
|
||||||
aria-label="enhance prompt"
|
fontSize: 16.5,
|
||||||
data-testid="enhance-prompt-button"
|
marginRight: 10
|
||||||
className={`input-icon-button ${textAreaDisabled ? "disabled" : ""} codicon codicon-sparkle`}
|
}}></span>
|
||||||
onClick={() => !textAreaDisabled && handleEnhancePrompt()}
|
) : (
|
||||||
style={{ fontSize: 16.5 }}
|
<span
|
||||||
/>
|
role="button"
|
||||||
</div>
|
aria-label="enhance prompt"
|
||||||
)}
|
data-testid="enhance-prompt-button"
|
||||||
|
className={`input-icon-button ${textAreaDisabled ? "disabled" : ""} codicon codicon-sparkle`}
|
||||||
|
onClick={() => !textAreaDisabled && handleEnhancePrompt()}
|
||||||
|
style={{ fontSize: 16.5 }}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
<span className={`input-icon-button ${shouldDisableImages ? "disabled" : ""} codicon codicon-device-camera`} onClick={() => !shouldDisableImages && onSelectImages()} style={{ fontSize: 16.5 }} />
|
<span className={`input-icon-button ${shouldDisableImages ? "disabled" : ""} codicon codicon-device-camera`} onClick={() => !shouldDisableImages && onSelectImages()} style={{ fontSize: 16.5 }} />
|
||||||
<span className={`input-icon-button ${textAreaDisabled ? "disabled" : ""} codicon codicon-send`} onClick={() => !textAreaDisabled && onSend()} style={{ fontSize: 15 }} />
|
<span className={`input-icon-button ${textAreaDisabled ? "disabled" : ""} codicon codicon-send`} onClick={() => !textAreaDisabled && onSend()} style={{ fontSize: 15 }} />
|
||||||
</span>
|
</span>
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ import '@testing-library/jest-dom';
|
|||||||
import ChatTextArea from '../ChatTextArea';
|
import ChatTextArea from '../ChatTextArea';
|
||||||
import { useExtensionState } from '../../../context/ExtensionStateContext';
|
import { useExtensionState } from '../../../context/ExtensionStateContext';
|
||||||
import { vscode } from '../../../utils/vscode';
|
import { vscode } from '../../../utils/vscode';
|
||||||
|
import { codeMode } from '../../../../../src/shared/modes';
|
||||||
|
|
||||||
// Mock modules
|
// Mock modules
|
||||||
jest.mock('../../../utils/vscode', () => ({
|
jest.mock('../../../utils/vscode', () => ({
|
||||||
@@ -32,6 +33,8 @@ describe('ChatTextArea', () => {
|
|||||||
selectedImages: [],
|
selectedImages: [],
|
||||||
setSelectedImages: jest.fn(),
|
setSelectedImages: jest.fn(),
|
||||||
onHeightChange: jest.fn(),
|
onHeightChange: jest.fn(),
|
||||||
|
mode: codeMode,
|
||||||
|
setMode: jest.fn(),
|
||||||
};
|
};
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
@@ -46,37 +49,9 @@ describe('ChatTextArea', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
describe('enhance prompt button', () => {
|
describe('enhance prompt button', () => {
|
||||||
it('should show enhance prompt button only when apiProvider is openrouter', () => {
|
|
||||||
// Test with non-openrouter provider
|
|
||||||
(useExtensionState as jest.Mock).mockReturnValue({
|
|
||||||
filePaths: [],
|
|
||||||
apiConfiguration: {
|
|
||||||
apiProvider: 'anthropic',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
const { rerender } = render(<ChatTextArea {...defaultProps} />);
|
|
||||||
expect(screen.queryByTestId('enhance-prompt-button')).not.toBeInTheDocument();
|
|
||||||
|
|
||||||
// Test with openrouter provider
|
|
||||||
(useExtensionState as jest.Mock).mockReturnValue({
|
|
||||||
filePaths: [],
|
|
||||||
apiConfiguration: {
|
|
||||||
apiProvider: 'openrouter',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
rerender(<ChatTextArea {...defaultProps} />);
|
|
||||||
const enhanceButton = screen.getByRole('button', { name: /enhance prompt/i });
|
|
||||||
expect(enhanceButton).toBeInTheDocument();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should be disabled when textAreaDisabled is true', () => {
|
it('should be disabled when textAreaDisabled is true', () => {
|
||||||
(useExtensionState as jest.Mock).mockReturnValue({
|
(useExtensionState as jest.Mock).mockReturnValue({
|
||||||
filePaths: [],
|
filePaths: [],
|
||||||
apiConfiguration: {
|
|
||||||
apiProvider: 'openrouter',
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
|
|
||||||
render(<ChatTextArea {...defaultProps} textAreaDisabled={true} />);
|
render(<ChatTextArea {...defaultProps} textAreaDisabled={true} />);
|
||||||
@@ -137,7 +112,8 @@ describe('ChatTextArea', () => {
|
|||||||
const enhanceButton = screen.getByRole('button', { name: /enhance prompt/i });
|
const enhanceButton = screen.getByRole('button', { name: /enhance prompt/i });
|
||||||
fireEvent.click(enhanceButton);
|
fireEvent.click(enhanceButton);
|
||||||
|
|
||||||
expect(screen.getByText('Enhancing prompt...')).toBeInTheDocument();
|
const loadingSpinner = screen.getByText('', { selector: '.codicon-loading' });
|
||||||
|
expect(loadingSpinner).toBeInTheDocument();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
477
webview-ui/src/components/prompts/PromptsView.tsx
Normal file
477
webview-ui/src/components/prompts/PromptsView.tsx
Normal file
@@ -0,0 +1,477 @@
|
|||||||
|
import { VSCodeButton, VSCodeTextArea, VSCodeDropdown, VSCodeOption } from "@vscode/webview-ui-toolkit/react"
|
||||||
|
import { useExtensionState } from "../../context/ExtensionStateContext"
|
||||||
|
import { defaultPrompts, askMode, codeMode, architectMode, Mode, PromptComponent } from "../../../../src/shared/modes"
|
||||||
|
import { vscode } from "../../utils/vscode"
|
||||||
|
import React, { useState, useEffect } from "react"
|
||||||
|
|
||||||
|
type PromptsViewProps = {
|
||||||
|
onDone: () => void
|
||||||
|
}
|
||||||
|
|
||||||
|
const AGENT_MODES = [
|
||||||
|
{ id: codeMode, label: 'Code' },
|
||||||
|
{ id: architectMode, label: 'Architect' },
|
||||||
|
{ id: askMode, label: 'Ask' },
|
||||||
|
] as const
|
||||||
|
|
||||||
|
const PromptsView = ({ onDone }: PromptsViewProps) => {
|
||||||
|
const {
|
||||||
|
customPrompts,
|
||||||
|
listApiConfigMeta,
|
||||||
|
enhancementApiConfigId,
|
||||||
|
setEnhancementApiConfigId,
|
||||||
|
mode,
|
||||||
|
customInstructions
|
||||||
|
} = useExtensionState()
|
||||||
|
const [testPrompt, setTestPrompt] = useState('')
|
||||||
|
const [isEnhancing, setIsEnhancing] = useState(false)
|
||||||
|
const [activeTab, setActiveTab] = useState<Mode>(mode)
|
||||||
|
const [isDialogOpen, setIsDialogOpen] = useState(false)
|
||||||
|
const [selectedPromptContent, setSelectedPromptContent] = useState('')
|
||||||
|
const [selectedPromptTitle, setSelectedPromptTitle] = useState('')
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const handler = (event: MessageEvent) => {
|
||||||
|
const message = event.data
|
||||||
|
if (message.type === 'enhancedPrompt') {
|
||||||
|
if (message.text) {
|
||||||
|
setTestPrompt(message.text)
|
||||||
|
}
|
||||||
|
setIsEnhancing(false)
|
||||||
|
} else if (message.type === 'systemPrompt') {
|
||||||
|
if (message.text) {
|
||||||
|
setSelectedPromptContent(message.text)
|
||||||
|
setSelectedPromptTitle(`System Prompt (${message.mode} mode)`)
|
||||||
|
setIsDialogOpen(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
window.addEventListener('message', handler)
|
||||||
|
return () => window.removeEventListener('message', handler)
|
||||||
|
}, [])
|
||||||
|
|
||||||
|
type AgentMode = typeof codeMode | typeof architectMode | typeof askMode
|
||||||
|
|
||||||
|
const updateAgentPrompt = (mode: AgentMode, promptData: PromptComponent) => {
|
||||||
|
const updatedPrompt = {
|
||||||
|
...customPrompts?.[mode],
|
||||||
|
...promptData
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only include properties that differ from defaults
|
||||||
|
if (updatedPrompt.roleDefinition === defaultPrompts[mode].roleDefinition) {
|
||||||
|
delete updatedPrompt.roleDefinition
|
||||||
|
}
|
||||||
|
|
||||||
|
vscode.postMessage({
|
||||||
|
type: "updatePrompt",
|
||||||
|
promptMode: mode,
|
||||||
|
customPrompt: updatedPrompt
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const updateEnhancePrompt = (value: string | undefined) => {
|
||||||
|
vscode.postMessage({
|
||||||
|
type: "updateEnhancedPrompt",
|
||||||
|
text: value
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleAgentPromptChange = (mode: AgentMode, e: Event | React.FormEvent<HTMLElement>) => {
|
||||||
|
const value = (e as CustomEvent)?.detail?.target?.value || ((e as any).target as HTMLTextAreaElement).value
|
||||||
|
updateAgentPrompt(mode, { roleDefinition: value.trim() || undefined })
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleEnhancePromptChange = (e: Event | React.FormEvent<HTMLElement>) => {
|
||||||
|
const value = (e as CustomEvent)?.detail?.target?.value || ((e as any).target as HTMLTextAreaElement).value
|
||||||
|
const trimmedValue = value.trim()
|
||||||
|
if (trimmedValue !== defaultPrompts.enhance) {
|
||||||
|
updateEnhancePrompt(trimmedValue || undefined)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleAgentReset = (mode: AgentMode) => {
|
||||||
|
updateAgentPrompt(mode, {
|
||||||
|
...customPrompts?.[mode],
|
||||||
|
roleDefinition: undefined
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleEnhanceReset = () => {
|
||||||
|
updateEnhancePrompt(undefined)
|
||||||
|
}
|
||||||
|
|
||||||
|
const getAgentPromptValue = (mode: AgentMode): string => {
|
||||||
|
return customPrompts?.[mode]?.roleDefinition ?? defaultPrompts[mode].roleDefinition
|
||||||
|
}
|
||||||
|
|
||||||
|
const getEnhancePromptValue = (): string => {
|
||||||
|
return customPrompts?.enhance ?? defaultPrompts.enhance
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleTestEnhancement = () => {
|
||||||
|
if (!testPrompt.trim()) return
|
||||||
|
|
||||||
|
setIsEnhancing(true)
|
||||||
|
vscode.postMessage({
|
||||||
|
type: "enhancePrompt",
|
||||||
|
text: testPrompt
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
position: "fixed",
|
||||||
|
top: 0,
|
||||||
|
left: 0,
|
||||||
|
right: 0,
|
||||||
|
bottom: 0,
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
}}>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
alignItems: "center",
|
||||||
|
padding: "10px 17px 10px 20px",
|
||||||
|
}}>
|
||||||
|
<h3 style={{ color: "var(--vscode-foreground)", margin: 0 }}>Prompts</h3>
|
||||||
|
<VSCodeButton onClick={onDone}>Done</VSCodeButton>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div style={{ flex: 1, overflow: "auto", padding: "0 20px" }}>
|
||||||
|
<div style={{ marginBottom: '20px' }}>
|
||||||
|
<div style={{ fontWeight: "bold", marginBottom: "4px" }}>Custom Instructions for All Modes</div>
|
||||||
|
<div style={{ fontSize: "13px", color: "var(--vscode-descriptionForeground)", marginBottom: "8px" }}>
|
||||||
|
These instructions apply to all modes. They provide a base set of behaviors that can be enhanced by mode-specific instructions below.
|
||||||
|
</div>
|
||||||
|
<VSCodeTextArea
|
||||||
|
value={customInstructions ?? ''}
|
||||||
|
onChange={(e) => {
|
||||||
|
const value = (e as CustomEvent)?.detail?.target?.value || ((e as any).target as HTMLTextAreaElement).value
|
||||||
|
vscode.postMessage({
|
||||||
|
type: "customInstructions",
|
||||||
|
text: value.trim() || undefined
|
||||||
|
})
|
||||||
|
}}
|
||||||
|
rows={4}
|
||||||
|
resize="vertical"
|
||||||
|
style={{ width: "100%" }}
|
||||||
|
data-testid="global-custom-instructions-textarea"
|
||||||
|
/>
|
||||||
|
<div style={{ fontSize: "12px", color: "var(--vscode-descriptionForeground)", marginTop: "5px" }}>
|
||||||
|
Instructions can also be loaded from <span
|
||||||
|
style={{
|
||||||
|
color: 'var(--vscode-textLink-foreground)',
|
||||||
|
cursor: 'pointer',
|
||||||
|
textDecoration: 'underline'
|
||||||
|
}}
|
||||||
|
onClick={() => vscode.postMessage({
|
||||||
|
type: "openFile",
|
||||||
|
text: "./.clinerules",
|
||||||
|
values: {
|
||||||
|
create: true,
|
||||||
|
content: "",
|
||||||
|
}
|
||||||
|
})}
|
||||||
|
>.clinerules</span> in your workspace.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h3 style={{ color: "var(--vscode-foreground)", margin: "0 0 20px 0" }}>Mode-Specific Prompts</h3>
|
||||||
|
|
||||||
|
<div style={{
|
||||||
|
display: 'flex',
|
||||||
|
gap: '16px',
|
||||||
|
alignItems: 'center',
|
||||||
|
marginBottom: '12px'
|
||||||
|
}}>
|
||||||
|
{AGENT_MODES.map((tab) => (
|
||||||
|
<button
|
||||||
|
key={tab.id}
|
||||||
|
data-testid={`${tab.id}-tab`}
|
||||||
|
data-active={activeTab === tab.id ? "true" : "false"}
|
||||||
|
onClick={() => setActiveTab(tab.id)}
|
||||||
|
style={{
|
||||||
|
padding: '4px 8px',
|
||||||
|
border: 'none',
|
||||||
|
background: activeTab === tab.id ? 'var(--vscode-button-background)' : 'none',
|
||||||
|
color: activeTab === tab.id ? 'var(--vscode-button-foreground)' : 'var(--vscode-foreground)',
|
||||||
|
cursor: 'pointer',
|
||||||
|
opacity: activeTab === tab.id ? 1 : 0.8,
|
||||||
|
borderRadius: '3px',
|
||||||
|
fontWeight: 'bold'
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{tab.label}
|
||||||
|
</button>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div style={{ marginBottom: '20px' }}>
|
||||||
|
<div style={{ marginBottom: '8px' }}>
|
||||||
|
<div>
|
||||||
|
<div style={{
|
||||||
|
display: 'flex',
|
||||||
|
justifyContent: 'space-between',
|
||||||
|
alignItems: 'center',
|
||||||
|
marginBottom: "4px"
|
||||||
|
}}>
|
||||||
|
<div style={{ fontWeight: "bold" }}>Role Definition</div>
|
||||||
|
<VSCodeButton
|
||||||
|
appearance="icon"
|
||||||
|
onClick={() => handleAgentReset(activeTab)}
|
||||||
|
data-testid="reset-prompt-button"
|
||||||
|
title="Revert to default"
|
||||||
|
>
|
||||||
|
<span className="codicon codicon-discard"></span>
|
||||||
|
</VSCodeButton>
|
||||||
|
</div>
|
||||||
|
<div style={{ fontSize: "13px", color: "var(--vscode-descriptionForeground)", marginBottom: "8px" }}>
|
||||||
|
Define Cline's expertise and personality for this mode. This description shapes how Cline presents itself and approaches tasks.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<VSCodeTextArea
|
||||||
|
value={getAgentPromptValue(activeTab)}
|
||||||
|
onChange={(e) => handleAgentPromptChange(activeTab, e)}
|
||||||
|
rows={4}
|
||||||
|
resize="vertical"
|
||||||
|
style={{ width: "100%" }}
|
||||||
|
data-testid={`${activeTab}-prompt-textarea`}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<div style={{ marginBottom: '8px' }}>
|
||||||
|
<div style={{ fontWeight: "bold", marginBottom: "4px" }}>Mode-specific Custom Instructions</div>
|
||||||
|
<div style={{ fontSize: "13px", color: "var(--vscode-descriptionForeground)", marginBottom: "8px" }}>
|
||||||
|
Add behavioral guidelines specific to {activeTab} mode. These instructions enhance the base behaviors defined above.
|
||||||
|
</div>
|
||||||
|
<VSCodeTextArea
|
||||||
|
value={customPrompts?.[activeTab]?.customInstructions ?? ''}
|
||||||
|
onChange={(e) => {
|
||||||
|
const value = (e as CustomEvent)?.detail?.target?.value || ((e as any).target as HTMLTextAreaElement).value
|
||||||
|
updateAgentPrompt(activeTab, {
|
||||||
|
...customPrompts?.[activeTab],
|
||||||
|
customInstructions: value.trim() || undefined
|
||||||
|
})
|
||||||
|
}}
|
||||||
|
rows={4}
|
||||||
|
resize="vertical"
|
||||||
|
style={{ width: "100%" }}
|
||||||
|
data-testid={`${activeTab}-custom-instructions-textarea`}
|
||||||
|
/>
|
||||||
|
<div style={{ fontSize: "12px", color: "var(--vscode-descriptionForeground)", marginTop: "5px" }}>
|
||||||
|
Custom instructions specific to {activeTab} mode can also be loaded from <span
|
||||||
|
style={{
|
||||||
|
color: 'var(--vscode-textLink-foreground)',
|
||||||
|
cursor: 'pointer',
|
||||||
|
textDecoration: 'underline'
|
||||||
|
}}
|
||||||
|
onClick={() => {
|
||||||
|
// First create/update the file with current custom instructions
|
||||||
|
const defaultContent = `# ${activeTab} Mode Rules\n\nAdd mode-specific rules and guidelines here.`
|
||||||
|
vscode.postMessage({
|
||||||
|
type: "updatePrompt",
|
||||||
|
promptMode: activeTab,
|
||||||
|
customPrompt: {
|
||||||
|
...customPrompts?.[activeTab],
|
||||||
|
customInstructions: customPrompts?.[activeTab]?.customInstructions || defaultContent
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// Then open the file
|
||||||
|
vscode.postMessage({
|
||||||
|
type: "openFile",
|
||||||
|
text: `./.clinerules-${activeTab}`,
|
||||||
|
values: {
|
||||||
|
create: true,
|
||||||
|
content: "",
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}}
|
||||||
|
>.clinerules-{activeTab}</span> in your workspace.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div style={{ marginBottom: '20px', display: 'flex', justifyContent: 'flex-start' }}>
|
||||||
|
<VSCodeButton
|
||||||
|
appearance="primary"
|
||||||
|
onClick={() => {
|
||||||
|
vscode.postMessage({
|
||||||
|
type: "getSystemPrompt",
|
||||||
|
mode: activeTab
|
||||||
|
})
|
||||||
|
}}
|
||||||
|
data-testid="preview-prompt-button"
|
||||||
|
>
|
||||||
|
Preview System Prompt
|
||||||
|
</VSCodeButton>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h3 style={{ color: "var(--vscode-foreground)", margin: "40px 0 20px 0" }}>Prompt Enhancement</h3>
|
||||||
|
|
||||||
|
<div style={{
|
||||||
|
color: "var(--vscode-foreground)",
|
||||||
|
fontSize: "13px",
|
||||||
|
marginBottom: "20px",
|
||||||
|
marginTop: "5px",
|
||||||
|
}}>
|
||||||
|
Use prompt enhancement to get tailored suggestions or improvements for your inputs. This ensures Cline understands your intent and provides the best possible responses.
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div style={{ display: "flex", flexDirection: "column", gap: "20px" }}>
|
||||||
|
<div>
|
||||||
|
<div style={{ marginBottom: "12px" }}>
|
||||||
|
<div style={{ marginBottom: "8px" }}>
|
||||||
|
<div style={{ fontWeight: "bold", marginBottom: "4px" }}>API Configuration</div>
|
||||||
|
<div style={{ fontSize: "13px", color: "var(--vscode-descriptionForeground)" }}>
|
||||||
|
You can select an API configuration to always use for enhancing prompts, or just use whatever is currently selected
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<VSCodeDropdown
|
||||||
|
value={enhancementApiConfigId || ''}
|
||||||
|
data-testid="api-config-dropdown"
|
||||||
|
onChange={(e: any) => {
|
||||||
|
const value = e.detail?.target?.value || e.target?.value
|
||||||
|
setEnhancementApiConfigId(value)
|
||||||
|
vscode.postMessage({
|
||||||
|
type: "enhancementApiConfigId",
|
||||||
|
text: value
|
||||||
|
})
|
||||||
|
}}
|
||||||
|
style={{ width: "300px" }}
|
||||||
|
>
|
||||||
|
<VSCodeOption value="">Use currently selected API configuration</VSCodeOption>
|
||||||
|
{(listApiConfigMeta || []).map((config) => (
|
||||||
|
<VSCodeOption key={config.id} value={config.id}>
|
||||||
|
{config.name}
|
||||||
|
</VSCodeOption>
|
||||||
|
))}
|
||||||
|
</VSCodeDropdown>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div style={{ marginBottom: "8px" }}>
|
||||||
|
<div style={{ display: "flex", justifyContent: "space-between", alignItems: "center", marginBottom: "4px" }}>
|
||||||
|
<div style={{ fontWeight: "bold" }}>Enhancement Prompt</div>
|
||||||
|
<div style={{ display: "flex", gap: "8px" }}>
|
||||||
|
<VSCodeButton appearance="icon" onClick={handleEnhanceReset} title="Revert to default">
|
||||||
|
<span className="codicon codicon-discard"></span>
|
||||||
|
</VSCodeButton>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div style={{ fontSize: "13px", color: "var(--vscode-descriptionForeground)", marginBottom: "8px" }}>
|
||||||
|
This prompt will be used to refine your input when you hit the sparkle icon in chat.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<VSCodeTextArea
|
||||||
|
value={getEnhancePromptValue()}
|
||||||
|
onChange={handleEnhancePromptChange}
|
||||||
|
rows={4}
|
||||||
|
resize="vertical"
|
||||||
|
style={{ width: "100%" }}
|
||||||
|
/>
|
||||||
|
|
||||||
|
<div style={{ marginTop: "12px" }}>
|
||||||
|
<VSCodeTextArea
|
||||||
|
value={testPrompt}
|
||||||
|
onChange={(e) => setTestPrompt((e.target as HTMLTextAreaElement).value)}
|
||||||
|
placeholder="Enter a prompt to test the enhancement"
|
||||||
|
rows={3}
|
||||||
|
resize="vertical"
|
||||||
|
style={{ width: "100%" }}
|
||||||
|
data-testid="test-prompt-textarea"
|
||||||
|
/>
|
||||||
|
<div style={{
|
||||||
|
marginTop: "8px",
|
||||||
|
display: "flex",
|
||||||
|
justifyContent: "flex-start",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: 8
|
||||||
|
}}>
|
||||||
|
<VSCodeButton
|
||||||
|
onClick={handleTestEnhancement}
|
||||||
|
disabled={isEnhancing}
|
||||||
|
appearance="primary"
|
||||||
|
>
|
||||||
|
Preview Prompt Enhancement
|
||||||
|
</VSCodeButton>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Bottom padding */}
|
||||||
|
<div style={{ height: "20px" }} />
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{isDialogOpen && (
|
||||||
|
<div style={{
|
||||||
|
position: 'fixed',
|
||||||
|
inset: 0,
|
||||||
|
display: 'flex',
|
||||||
|
justifyContent: 'flex-end',
|
||||||
|
backgroundColor: 'rgba(0, 0, 0, 0.5)',
|
||||||
|
zIndex: 1000
|
||||||
|
}}>
|
||||||
|
<div style={{
|
||||||
|
width: 'calc(100vw - 100px)',
|
||||||
|
height: '100%',
|
||||||
|
backgroundColor: 'var(--vscode-editor-background)',
|
||||||
|
boxShadow: '-2px 0 5px rgba(0, 0, 0, 0.2)',
|
||||||
|
display: 'flex',
|
||||||
|
flexDirection: 'column',
|
||||||
|
position: 'relative'
|
||||||
|
}}>
|
||||||
|
<div style={{
|
||||||
|
flex: 1,
|
||||||
|
padding: '20px',
|
||||||
|
overflowY: 'auto',
|
||||||
|
minHeight: 0
|
||||||
|
}}>
|
||||||
|
<VSCodeButton
|
||||||
|
appearance="icon"
|
||||||
|
onClick={() => setIsDialogOpen(false)}
|
||||||
|
style={{
|
||||||
|
position: 'absolute',
|
||||||
|
top: '20px',
|
||||||
|
right: '20px'
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span className="codicon codicon-close"></span>
|
||||||
|
</VSCodeButton>
|
||||||
|
<h2 style={{ margin: '0 0 16px' }}>{selectedPromptTitle}</h2>
|
||||||
|
<pre style={{
|
||||||
|
padding: '8px',
|
||||||
|
whiteSpace: 'pre-wrap',
|
||||||
|
wordBreak: 'break-word',
|
||||||
|
fontFamily: 'var(--vscode-editor-font-family)',
|
||||||
|
fontSize: 'var(--vscode-editor-font-size)',
|
||||||
|
color: 'var(--vscode-editor-foreground)',
|
||||||
|
backgroundColor: 'var(--vscode-editor-background)',
|
||||||
|
border: '1px solid var(--vscode-editor-lineHighlightBorder)',
|
||||||
|
borderRadius: '4px',
|
||||||
|
overflowY: 'auto'
|
||||||
|
}}>
|
||||||
|
{selectedPromptContent}
|
||||||
|
</pre>
|
||||||
|
</div>
|
||||||
|
<div style={{
|
||||||
|
display: 'flex',
|
||||||
|
justifyContent: 'flex-end',
|
||||||
|
padding: '12px 20px',
|
||||||
|
borderTop: '1px solid var(--vscode-editor-lineHighlightBorder)',
|
||||||
|
backgroundColor: 'var(--vscode-editor-background)'
|
||||||
|
}}>
|
||||||
|
<VSCodeButton onClick={() => setIsDialogOpen(false)}>
|
||||||
|
Close
|
||||||
|
</VSCodeButton>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export default PromptsView
|
||||||
134
webview-ui/src/components/prompts/__tests__/PromptsView.test.tsx
Normal file
134
webview-ui/src/components/prompts/__tests__/PromptsView.test.tsx
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
import { render, screen, fireEvent } from '@testing-library/react'
|
||||||
|
import '@testing-library/jest-dom'
|
||||||
|
import PromptsView from '../PromptsView'
|
||||||
|
import { ExtensionStateContext } from '../../../context/ExtensionStateContext'
|
||||||
|
import { vscode } from '../../../utils/vscode'
|
||||||
|
|
||||||
|
// Mock vscode API
|
||||||
|
jest.mock('../../../utils/vscode', () => ({
|
||||||
|
vscode: {
|
||||||
|
postMessage: jest.fn()
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
|
||||||
|
const mockExtensionState = {
|
||||||
|
customPrompts: {},
|
||||||
|
listApiConfigMeta: [
|
||||||
|
{ id: 'config1', name: 'Config 1' },
|
||||||
|
{ id: 'config2', name: 'Config 2' }
|
||||||
|
],
|
||||||
|
enhancementApiConfigId: '',
|
||||||
|
setEnhancementApiConfigId: jest.fn(),
|
||||||
|
mode: 'code'
|
||||||
|
}
|
||||||
|
|
||||||
|
const renderPromptsView = (props = {}) => {
|
||||||
|
const mockOnDone = jest.fn()
|
||||||
|
return render(
|
||||||
|
<ExtensionStateContext.Provider value={{ ...mockExtensionState, ...props } as any}>
|
||||||
|
<PromptsView onDone={mockOnDone} />
|
||||||
|
</ExtensionStateContext.Provider>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('PromptsView', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.clearAllMocks()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('renders all mode tabs', () => {
|
||||||
|
renderPromptsView()
|
||||||
|
expect(screen.getByTestId('code-tab')).toBeInTheDocument()
|
||||||
|
expect(screen.getByTestId('ask-tab')).toBeInTheDocument()
|
||||||
|
expect(screen.getByTestId('architect-tab')).toBeInTheDocument()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('defaults to current mode as active tab', () => {
|
||||||
|
renderPromptsView({ mode: 'ask' })
|
||||||
|
|
||||||
|
const codeTab = screen.getByTestId('code-tab')
|
||||||
|
const askTab = screen.getByTestId('ask-tab')
|
||||||
|
const architectTab = screen.getByTestId('architect-tab')
|
||||||
|
|
||||||
|
expect(askTab).toHaveAttribute('data-active', 'true')
|
||||||
|
expect(codeTab).toHaveAttribute('data-active', 'false')
|
||||||
|
expect(architectTab).toHaveAttribute('data-active', 'false')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('switches between tabs correctly', () => {
|
||||||
|
renderPromptsView({ mode: 'code' })
|
||||||
|
|
||||||
|
const codeTab = screen.getByTestId('code-tab')
|
||||||
|
const askTab = screen.getByTestId('ask-tab')
|
||||||
|
const architectTab = screen.getByTestId('architect-tab')
|
||||||
|
|
||||||
|
// Initial state matches current mode (code)
|
||||||
|
expect(codeTab).toHaveAttribute('data-active', 'true')
|
||||||
|
expect(askTab).toHaveAttribute('data-active', 'false')
|
||||||
|
expect(architectTab).toHaveAttribute('data-active', 'false')
|
||||||
|
expect(architectTab).toHaveAttribute('data-active', 'false')
|
||||||
|
|
||||||
|
// Click Ask tab
|
||||||
|
fireEvent.click(askTab)
|
||||||
|
expect(askTab).toHaveAttribute('data-active', 'true')
|
||||||
|
expect(codeTab).toHaveAttribute('data-active', 'false')
|
||||||
|
expect(architectTab).toHaveAttribute('data-active', 'false')
|
||||||
|
|
||||||
|
// Click Architect tab
|
||||||
|
fireEvent.click(architectTab)
|
||||||
|
expect(architectTab).toHaveAttribute('data-active', 'true')
|
||||||
|
expect(askTab).toHaveAttribute('data-active', 'false')
|
||||||
|
expect(codeTab).toHaveAttribute('data-active', 'false')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles prompt changes correctly', () => {
|
||||||
|
renderPromptsView()
|
||||||
|
|
||||||
|
const textarea = screen.getByTestId('code-prompt-textarea')
|
||||||
|
fireEvent(textarea, new CustomEvent('change', {
|
||||||
|
detail: {
|
||||||
|
target: {
|
||||||
|
value: 'New prompt value'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
|
||||||
|
expect(vscode.postMessage).toHaveBeenCalledWith({
|
||||||
|
type: 'updatePrompt',
|
||||||
|
promptMode: 'code',
|
||||||
|
customPrompt: { roleDefinition: 'New prompt value' }
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('resets prompt to default value', () => {
|
||||||
|
renderPromptsView()
|
||||||
|
|
||||||
|
const resetButton = screen.getByTestId('reset-prompt-button')
|
||||||
|
fireEvent.click(resetButton)
|
||||||
|
|
||||||
|
expect(vscode.postMessage).toHaveBeenCalledWith({
|
||||||
|
type: 'updatePrompt',
|
||||||
|
promptMode: 'code',
|
||||||
|
customPrompt: { roleDefinition: undefined }
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles API configuration selection', () => {
|
||||||
|
renderPromptsView()
|
||||||
|
|
||||||
|
const dropdown = screen.getByTestId('api-config-dropdown')
|
||||||
|
fireEvent(dropdown, new CustomEvent('change', {
|
||||||
|
detail: {
|
||||||
|
target: {
|
||||||
|
value: 'config1'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
|
||||||
|
expect(mockExtensionState.setEnhancementApiConfigId).toHaveBeenCalledWith('config1')
|
||||||
|
expect(vscode.postMessage).toHaveBeenCalledWith({
|
||||||
|
type: 'enhancementApiConfigId',
|
||||||
|
text: 'config1'
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
@@ -278,24 +278,26 @@ const SettingsView = ({ onDone }: SettingsViewProps) => {
|
|||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<VSCodeTextArea
|
<div style={{ marginBottom: 15 }}>
|
||||||
value={customInstructions ?? ""}
|
|
||||||
style={{ width: "100%" }}
|
|
||||||
rows={4}
|
|
||||||
placeholder={
|
|
||||||
'e.g. "Run unit tests at the end", "Use TypeScript with async/await", "Speak in Spanish"'
|
|
||||||
}
|
|
||||||
onInput={(e: any) => setCustomInstructions(e.target?.value ?? "")}>
|
|
||||||
<span style={{ fontWeight: "500" }}>Custom Instructions</span>
|
<span style={{ fontWeight: "500" }}>Custom Instructions</span>
|
||||||
</VSCodeTextArea>
|
<VSCodeTextArea
|
||||||
<p
|
value={customInstructions ?? ""}
|
||||||
style={{
|
style={{ width: "100%" }}
|
||||||
fontSize: "12px",
|
rows={4}
|
||||||
marginTop: "5px",
|
placeholder={
|
||||||
color: "var(--vscode-descriptionForeground)",
|
'e.g. "Run unit tests at the end", "Use TypeScript with async/await", "Speak in Spanish"'
|
||||||
}}>
|
}
|
||||||
These instructions are added to the end of the system prompt sent with every request. Custom instructions set in .clinerules and .cursorrules in the working directory are also included.
|
onInput={(e: any) => setCustomInstructions(e.target?.value ?? "")}
|
||||||
</p>
|
/>
|
||||||
|
<p
|
||||||
|
style={{
|
||||||
|
fontSize: "12px",
|
||||||
|
marginTop: "5px",
|
||||||
|
color: "var(--vscode-descriptionForeground)",
|
||||||
|
}}>
|
||||||
|
These instructions are added to the end of the system prompt sent with every request. Custom instructions set in .clinerules in the working directory are also included. For mode-specific instructions, use the <span className="codicon codicon-notebook" style={{ fontSize: "10px" }}></span> Prompts tab in the top menu.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
<McpEnabledToggle />
|
<McpEnabledToggle />
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ import {
|
|||||||
checkExistKey
|
checkExistKey
|
||||||
} from "../../../src/shared/checkExistApiConfig"
|
} from "../../../src/shared/checkExistApiConfig"
|
||||||
import { Mode } from "../../../src/core/prompts/types"
|
import { Mode } from "../../../src/core/prompts/types"
|
||||||
import { codeMode } from "../../../src/shared/modes"
|
import { codeMode, CustomPrompts, defaultPrompts } from "../../../src/shared/modes"
|
||||||
|
|
||||||
export interface ExtensionStateContextType extends ExtensionState {
|
export interface ExtensionStateContextType extends ExtensionState {
|
||||||
didHydrateState: boolean
|
didHydrateState: boolean
|
||||||
@@ -60,6 +60,9 @@ export interface ExtensionStateContextType extends ExtensionState {
|
|||||||
onUpdateApiConfig: (apiConfig: ApiConfiguration) => void
|
onUpdateApiConfig: (apiConfig: ApiConfiguration) => void
|
||||||
mode: Mode
|
mode: Mode
|
||||||
setMode: (value: Mode) => void
|
setMode: (value: Mode) => void
|
||||||
|
setCustomPrompts: (value: CustomPrompts) => void
|
||||||
|
enhancementApiConfigId?: string
|
||||||
|
setEnhancementApiConfigId: (value: string) => void
|
||||||
}
|
}
|
||||||
|
|
||||||
export const ExtensionStateContext = createContext<ExtensionStateContextType | undefined>(undefined)
|
export const ExtensionStateContext = createContext<ExtensionStateContextType | undefined>(undefined)
|
||||||
@@ -86,6 +89,8 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
|
|||||||
currentApiConfigName: 'default',
|
currentApiConfigName: 'default',
|
||||||
listApiConfigMeta: [],
|
listApiConfigMeta: [],
|
||||||
mode: codeMode,
|
mode: codeMode,
|
||||||
|
customPrompts: defaultPrompts,
|
||||||
|
enhancementApiConfigId: '',
|
||||||
})
|
})
|
||||||
const [didHydrateState, setDidHydrateState] = useState(false)
|
const [didHydrateState, setDidHydrateState] = useState(false)
|
||||||
const [showWelcome, setShowWelcome] = useState(false)
|
const [showWelcome, setShowWelcome] = useState(false)
|
||||||
@@ -230,6 +235,8 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
|
|||||||
setListApiConfigMeta,
|
setListApiConfigMeta,
|
||||||
onUpdateApiConfig,
|
onUpdateApiConfig,
|
||||||
setMode: (value: Mode) => setState((prevState) => ({ ...prevState, mode: value })),
|
setMode: (value: Mode) => setState((prevState) => ({ ...prevState, mode: value })),
|
||||||
|
setCustomPrompts: (value) => setState((prevState) => ({ ...prevState, customPrompts: value })),
|
||||||
|
setEnhancementApiConfigId: (value) => setState((prevState) => ({ ...prevState, enhancementApiConfigId: value })),
|
||||||
}
|
}
|
||||||
|
|
||||||
return <ExtensionStateContext.Provider value={contextValue}>{children}</ExtensionStateContext.Provider>
|
return <ExtensionStateContext.Provider value={contextValue}>{children}</ExtensionStateContext.Provider>
|
||||||
|
|||||||
Reference in New Issue
Block a user