Merge remote-tracking branch 'origin/main' into vscode-lm-provider

This commit is contained in:
Matt Rubens
2025-01-15 01:37:37 -05:00
103 changed files with 14677 additions and 2160 deletions

View File

@@ -0,0 +1,239 @@
import { AnthropicHandler } from '../anthropic';
import { ApiHandlerOptions } from '../../../shared/api';
import { ApiStream } from '../../transform/stream';
import { Anthropic } from '@anthropic-ai/sdk';
// Mock Anthropic client
const mockBetaCreate = jest.fn();
const mockCreate = jest.fn();
jest.mock('@anthropic-ai/sdk', () => {
return {
Anthropic: jest.fn().mockImplementation(() => ({
beta: {
promptCaching: {
messages: {
create: mockBetaCreate.mockImplementation(async () => ({
async *[Symbol.asyncIterator]() {
yield {
type: 'message_start',
message: {
usage: {
input_tokens: 100,
output_tokens: 50,
cache_creation_input_tokens: 20,
cache_read_input_tokens: 10
}
}
};
yield {
type: 'content_block_start',
index: 0,
content_block: {
type: 'text',
text: 'Hello'
}
};
yield {
type: 'content_block_delta',
delta: {
type: 'text_delta',
text: ' world'
}
};
}
}))
}
}
},
messages: {
create: mockCreate.mockImplementation(async (options) => {
if (!options.stream) {
return {
id: 'test-completion',
content: [
{ type: 'text', text: 'Test response' }
],
role: 'assistant',
model: options.model,
usage: {
input_tokens: 10,
output_tokens: 5
}
}
}
return {
async *[Symbol.asyncIterator]() {
yield {
type: 'message_start',
message: {
usage: {
input_tokens: 10,
output_tokens: 5
}
}
}
yield {
type: 'content_block_start',
content_block: {
type: 'text',
text: 'Test response'
}
}
}
}
})
}
}))
};
});
describe('AnthropicHandler', () => {
let handler: AnthropicHandler;
let mockOptions: ApiHandlerOptions;
beforeEach(() => {
mockOptions = {
apiKey: 'test-api-key',
apiModelId: 'claude-3-5-sonnet-20241022'
};
handler = new AnthropicHandler(mockOptions);
mockBetaCreate.mockClear();
mockCreate.mockClear();
});
describe('constructor', () => {
it('should initialize with provided options', () => {
expect(handler).toBeInstanceOf(AnthropicHandler);
expect(handler.getModel().id).toBe(mockOptions.apiModelId);
});
it('should initialize with undefined API key', () => {
// The SDK will handle API key validation, so we just verify it initializes
const handlerWithoutKey = new AnthropicHandler({
...mockOptions,
apiKey: undefined
});
expect(handlerWithoutKey).toBeInstanceOf(AnthropicHandler);
});
it('should use custom base URL if provided', () => {
const customBaseUrl = 'https://custom.anthropic.com';
const handlerWithCustomUrl = new AnthropicHandler({
...mockOptions,
anthropicBaseUrl: customBaseUrl
});
expect(handlerWithCustomUrl).toBeInstanceOf(AnthropicHandler);
});
});
describe('createMessage', () => {
const systemPrompt = 'You are a helpful assistant.';
const messages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: [{
type: 'text' as const,
text: 'Hello!'
}]
}
];
it('should handle prompt caching for supported models', async () => {
const stream = handler.createMessage(systemPrompt, [
{
role: 'user',
content: [{ type: 'text' as const, text: 'First message' }]
},
{
role: 'assistant',
content: [{ type: 'text' as const, text: 'Response' }]
},
{
role: 'user',
content: [{ type: 'text' as const, text: 'Second message' }]
}
]);
const chunks: any[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
// Verify usage information
const usageChunk = chunks.find(chunk => chunk.type === 'usage');
expect(usageChunk).toBeDefined();
expect(usageChunk?.inputTokens).toBe(100);
expect(usageChunk?.outputTokens).toBe(50);
expect(usageChunk?.cacheWriteTokens).toBe(20);
expect(usageChunk?.cacheReadTokens).toBe(10);
// Verify text content
const textChunks = chunks.filter(chunk => chunk.type === 'text');
expect(textChunks).toHaveLength(2);
expect(textChunks[0].text).toBe('Hello');
expect(textChunks[1].text).toBe(' world');
// Verify beta API was used
expect(mockBetaCreate).toHaveBeenCalled();
expect(mockCreate).not.toHaveBeenCalled();
});
});
describe('completePrompt', () => {
it('should complete prompt successfully', async () => {
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockCreate).toHaveBeenCalledWith({
model: mockOptions.apiModelId,
messages: [{ role: 'user', content: 'Test prompt' }],
max_tokens: 8192,
temperature: 0,
stream: false
});
});
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
await expect(handler.completePrompt('Test prompt'))
.rejects.toThrow('Anthropic completion error: API Error');
});
it('should handle non-text content', async () => {
mockCreate.mockImplementationOnce(async () => ({
content: [{ type: 'image' }]
}));
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
it('should handle empty response', async () => {
mockCreate.mockImplementationOnce(async () => ({
content: [{ type: 'text', text: '' }]
}));
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
});
describe('getModel', () => {
it('should return default model if no model ID is provided', () => {
const handlerWithoutModel = new AnthropicHandler({
...mockOptions,
apiModelId: undefined
});
const model = handlerWithoutModel.getModel();
expect(model.id).toBeDefined();
expect(model.info).toBeDefined();
});
it('should return specified model if valid model ID is provided', () => {
const model = handler.getModel();
expect(model.id).toBe(mockOptions.apiModelId);
expect(model.info).toBeDefined();
expect(model.info.maxTokens).toBe(8192);
expect(model.info.contextWindow).toBe(200_000);
expect(model.info.supportsImages).toBe(true);
expect(model.info.supportsPromptCache).toBe(true);
});
});
});

View File

@@ -1,191 +1,246 @@
import { AwsBedrockHandler } from '../bedrock'
import { ApiHandlerOptions, ModelInfo } from '../../../shared/api'
import { Anthropic } from '@anthropic-ai/sdk'
import { StreamEvent } from '../bedrock'
// Simplified mock for BedrockRuntimeClient
class MockBedrockRuntimeClient {
private _region: string
private mockStream: StreamEvent[] = []
constructor(config: { region: string }) {
this._region = config.region
}
async send(command: any): Promise<{ stream: AsyncIterableIterator<StreamEvent> }> {
return {
stream: this.createMockStream()
}
}
private createMockStream(): AsyncIterableIterator<StreamEvent> {
const self = this;
return {
async *[Symbol.asyncIterator]() {
for (const event of self.mockStream) {
yield event;
}
},
next: async () => {
const value = this.mockStream.shift();
return value ? { value, done: false } : { value: undefined, done: true };
},
return: async () => ({ value: undefined, done: true }),
throw: async (e) => { throw e; }
};
}
setMockStream(stream: StreamEvent[]) {
this.mockStream = stream;
}
get config() {
return { region: this._region };
}
}
import { AwsBedrockHandler } from '../bedrock';
import { MessageContent } from '../../../shared/api';
import { BedrockRuntimeClient } from '@aws-sdk/client-bedrock-runtime';
import { Anthropic } from '@anthropic-ai/sdk';
describe('AwsBedrockHandler', () => {
const mockOptions: ApiHandlerOptions = {
awsRegion: 'us-east-1',
awsAccessKey: 'mock-access-key',
awsSecretKey: 'mock-secret-key',
apiModelId: 'anthropic.claude-v2',
}
let handler: AwsBedrockHandler;
// Override the BedrockRuntimeClient creation in the constructor
class TestAwsBedrockHandler extends AwsBedrockHandler {
constructor(options: ApiHandlerOptions, mockClient?: MockBedrockRuntimeClient) {
super(options)
if (mockClient) {
// Force type casting to bypass strict type checking
(this as any)['client'] = mockClient
}
}
}
beforeEach(() => {
handler = new AwsBedrockHandler({
apiModelId: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
awsAccessKey: 'test-access-key',
awsSecretKey: 'test-secret-key',
awsRegion: 'us-east-1'
});
});
test('constructor initializes with correct AWS credentials', () => {
const mockClient = new MockBedrockRuntimeClient({
region: 'us-east-1'
})
describe('constructor', () => {
it('should initialize with provided config', () => {
expect(handler['options'].awsAccessKey).toBe('test-access-key');
expect(handler['options'].awsSecretKey).toBe('test-secret-key');
expect(handler['options'].awsRegion).toBe('us-east-1');
expect(handler['options'].apiModelId).toBe('anthropic.claude-3-5-sonnet-20241022-v2:0');
});
const handler = new TestAwsBedrockHandler(mockOptions, mockClient)
// Verify that the client is created with the correct configuration
expect(handler['client']).toBeDefined()
expect(handler['client'].config.region).toBe('us-east-1')
})
it('should initialize with missing AWS credentials', () => {
const handlerWithoutCreds = new AwsBedrockHandler({
apiModelId: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
awsRegion: 'us-east-1'
});
expect(handlerWithoutCreds).toBeInstanceOf(AwsBedrockHandler);
});
});
test('getModel returns correct model info', () => {
const mockClient = new MockBedrockRuntimeClient({
region: 'us-east-1'
})
const handler = new TestAwsBedrockHandler(mockOptions, mockClient)
const result = handler.getModel()
expect(result).toEqual({
id: 'anthropic.claude-v2',
info: {
maxTokens: 5000,
contextWindow: 128_000,
supportsPromptCache: false
}
})
})
test('createMessage handles successful stream events', async () => {
const mockClient = new MockBedrockRuntimeClient({
region: 'us-east-1'
})
// Mock stream events
const mockStreamEvents: StreamEvent[] = [
describe('createMessage', () => {
const mockMessages: Anthropic.Messages.MessageParam[] = [
{
metadata: {
usage: {
inputTokens: 50,
outputTokens: 100
}
}
role: 'user',
content: 'Hello'
},
{
contentBlockStart: {
start: {
text: 'Hello'
}
}
},
{
contentBlockDelta: {
delta: {
text: ' world'
}
}
},
{
messageStop: {
stopReason: 'end_turn'
}
role: 'assistant',
content: 'Hi there!'
}
]
];
mockClient.setMockStream(mockStreamEvents)
const systemPrompt = 'You are a helpful assistant';
const handler = new TestAwsBedrockHandler(mockOptions, mockClient)
it('should handle text messages correctly', async () => {
const mockResponse = {
messages: [{
role: 'assistant',
content: [{ type: 'text', text: 'Hello! How can I help you?' }]
}],
usage: {
input_tokens: 10,
output_tokens: 5
}
};
const systemPrompt = 'You are a helpful assistant'
const messages: Anthropic.Messages.MessageParam[] = [
{ role: 'user', content: 'Say hello' }
]
// Mock AWS SDK invoke
const mockStream = {
[Symbol.asyncIterator]: async function* () {
yield {
metadata: {
usage: {
inputTokens: 10,
outputTokens: 5
}
}
};
}
};
const generator = handler.createMessage(systemPrompt, messages)
const chunks = []
const mockInvoke = jest.fn().mockResolvedValue({
stream: mockStream
});
for await (const chunk of generator) {
chunks.push(chunk)
}
handler['client'] = {
send: mockInvoke
} as unknown as BedrockRuntimeClient;
// Verify the chunks match expected stream events
expect(chunks).toHaveLength(3)
expect(chunks[0]).toEqual({
type: 'usage',
inputTokens: 50,
outputTokens: 100
})
expect(chunks[1]).toEqual({
type: 'text',
text: 'Hello'
})
expect(chunks[2]).toEqual({
type: 'text',
text: ' world'
})
})
test('createMessage handles error scenarios', async () => {
const mockClient = new MockBedrockRuntimeClient({
region: 'us-east-1'
})
// Simulate an error by overriding the send method
mockClient.send = () => {
throw new Error('API request failed')
}
const handler = new TestAwsBedrockHandler(mockOptions, mockClient)
const systemPrompt = 'You are a helpful assistant'
const messages: Anthropic.Messages.MessageParam[] = [
{ role: 'user', content: 'Cause an error' }
]
await expect(async () => {
const generator = handler.createMessage(systemPrompt, messages)
const chunks = []
const stream = handler.createMessage(systemPrompt, mockMessages);
const chunks = [];
for await (const chunk of generator) {
chunks.push(chunk)
for await (const chunk of stream) {
chunks.push(chunk);
}
}).rejects.toThrow('API request failed')
})
})
expect(chunks.length).toBeGreaterThan(0);
expect(chunks[0]).toEqual({
type: 'usage',
inputTokens: 10,
outputTokens: 5
});
expect(mockInvoke).toHaveBeenCalledWith(expect.objectContaining({
input: expect.objectContaining({
modelId: 'anthropic.claude-3-5-sonnet-20241022-v2:0'
})
}));
});
it('should handle API errors', async () => {
// Mock AWS SDK invoke with error
const mockInvoke = jest.fn().mockRejectedValue(new Error('AWS Bedrock error'));
handler['client'] = {
send: mockInvoke
} as unknown as BedrockRuntimeClient;
const stream = handler.createMessage(systemPrompt, mockMessages);
await expect(async () => {
for await (const chunk of stream) {
// Should throw before yielding any chunks
}
}).rejects.toThrow('AWS Bedrock error');
});
});
describe('completePrompt', () => {
it('should complete prompt successfully', async () => {
const mockResponse = {
output: new TextEncoder().encode(JSON.stringify({
content: 'Test response'
}))
};
const mockSend = jest.fn().mockResolvedValue(mockResponse);
handler['client'] = {
send: mockSend
} as unknown as BedrockRuntimeClient;
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockSend).toHaveBeenCalledWith(expect.objectContaining({
input: expect.objectContaining({
modelId: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
messages: expect.arrayContaining([
expect.objectContaining({
role: 'user',
content: [{ text: 'Test prompt' }]
})
]),
inferenceConfig: expect.objectContaining({
maxTokens: 5000,
temperature: 0.3,
topP: 0.1
})
})
}));
});
it('should handle API errors', async () => {
const mockError = new Error('AWS Bedrock error');
const mockSend = jest.fn().mockRejectedValue(mockError);
handler['client'] = {
send: mockSend
} as unknown as BedrockRuntimeClient;
await expect(handler.completePrompt('Test prompt'))
.rejects.toThrow('Bedrock completion error: AWS Bedrock error');
});
it('should handle invalid response format', async () => {
const mockResponse = {
output: new TextEncoder().encode('invalid json')
};
const mockSend = jest.fn().mockResolvedValue(mockResponse);
handler['client'] = {
send: mockSend
} as unknown as BedrockRuntimeClient;
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
it('should handle empty response', async () => {
const mockResponse = {
output: new TextEncoder().encode(JSON.stringify({}))
};
const mockSend = jest.fn().mockResolvedValue(mockResponse);
handler['client'] = {
send: mockSend
} as unknown as BedrockRuntimeClient;
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
it('should handle cross-region inference', async () => {
handler = new AwsBedrockHandler({
apiModelId: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
awsAccessKey: 'test-access-key',
awsSecretKey: 'test-secret-key',
awsRegion: 'us-east-1',
awsUseCrossRegionInference: true
});
const mockResponse = {
output: new TextEncoder().encode(JSON.stringify({
content: 'Test response'
}))
};
const mockSend = jest.fn().mockResolvedValue(mockResponse);
handler['client'] = {
send: mockSend
} as unknown as BedrockRuntimeClient;
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockSend).toHaveBeenCalledWith(expect.objectContaining({
input: expect.objectContaining({
modelId: 'us.anthropic.claude-3-5-sonnet-20241022-v2:0'
})
}));
});
});
describe('getModel', () => {
it('should return correct model info in test environment', () => {
const modelInfo = handler.getModel();
expect(modelInfo.id).toBe('anthropic.claude-3-5-sonnet-20241022-v2:0');
expect(modelInfo.info).toBeDefined();
expect(modelInfo.info.maxTokens).toBe(5000); // Test environment value
expect(modelInfo.info.contextWindow).toBe(128_000); // Test environment value
});
it('should return test model info for invalid model in test environment', () => {
const invalidHandler = new AwsBedrockHandler({
apiModelId: 'invalid-model',
awsAccessKey: 'test-access-key',
awsSecretKey: 'test-secret-key',
awsRegion: 'us-east-1'
});
const modelInfo = invalidHandler.getModel();
expect(modelInfo.id).toBe('invalid-model'); // In test env, returns whatever is passed
expect(modelInfo.info.maxTokens).toBe(5000);
expect(modelInfo.info.contextWindow).toBe(128_000);
});
});
});

View File

@@ -1,167 +1,203 @@
import { DeepSeekHandler } from '../deepseek'
import { ApiHandlerOptions } from '../../../shared/api'
import OpenAI from 'openai'
import { Anthropic } from '@anthropic-ai/sdk'
import { DeepSeekHandler } from '../deepseek';
import { ApiHandlerOptions, deepSeekDefaultModelId } from '../../../shared/api';
import OpenAI from 'openai';
import { Anthropic } from '@anthropic-ai/sdk';
// Mock dependencies
jest.mock('openai')
// Mock OpenAI client
const mockCreate = jest.fn();
jest.mock('openai', () => {
return {
__esModule: true,
default: jest.fn().mockImplementation(() => ({
chat: {
completions: {
create: mockCreate.mockImplementation(async (options) => {
if (!options.stream) {
return {
id: 'test-completion',
choices: [{
message: { role: 'assistant', content: 'Test response', refusal: null },
finish_reason: 'stop',
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
};
}
// Return async iterator for streaming
return {
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{
delta: { content: 'Test response' },
index: 0
}],
usage: null
};
yield {
choices: [{
delta: {},
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
};
}
};
})
}
}
}))
};
});
describe('DeepSeekHandler', () => {
const mockOptions: ApiHandlerOptions = {
deepSeekApiKey: 'test-key',
deepSeekModelId: 'deepseek-chat',
}
let handler: DeepSeekHandler;
let mockOptions: ApiHandlerOptions;
beforeEach(() => {
jest.clearAllMocks()
})
mockOptions = {
deepSeekApiKey: 'test-api-key',
deepSeekModelId: 'deepseek-chat',
deepSeekBaseUrl: 'https://api.deepseek.com/v1'
};
handler = new DeepSeekHandler(mockOptions);
mockCreate.mockClear();
});
test('constructor initializes with correct options', () => {
const handler = new DeepSeekHandler(mockOptions)
expect(handler).toBeInstanceOf(DeepSeekHandler)
expect(OpenAI).toHaveBeenCalledWith({
baseURL: 'https://api.deepseek.com/v1',
apiKey: mockOptions.deepSeekApiKey,
})
})
describe('constructor', () => {
it('should initialize with provided options', () => {
expect(handler).toBeInstanceOf(DeepSeekHandler);
expect(handler.getModel().id).toBe(mockOptions.deepSeekModelId);
});
test('getModel returns correct model info', () => {
const handler = new DeepSeekHandler(mockOptions)
const result = handler.getModel()
expect(result).toEqual({
id: mockOptions.deepSeekModelId,
info: expect.objectContaining({
maxTokens: 8192,
contextWindow: 64000,
supportsPromptCache: false,
supportsImages: false,
inputPrice: 0.014,
outputPrice: 0.28,
})
})
})
it('should throw error if API key is missing', () => {
expect(() => {
new DeepSeekHandler({
...mockOptions,
deepSeekApiKey: undefined
});
}).toThrow('DeepSeek API key is required');
});
test('getModel returns default model info when no model specified', () => {
const handler = new DeepSeekHandler({ deepSeekApiKey: 'test-key' })
const result = handler.getModel()
expect(result.id).toBe('deepseek-chat')
expect(result.info.maxTokens).toBe(8192)
})
it('should use default model ID if not provided', () => {
const handlerWithoutModel = new DeepSeekHandler({
...mockOptions,
deepSeekModelId: undefined
});
expect(handlerWithoutModel.getModel().id).toBe(deepSeekDefaultModelId);
});
test('createMessage handles string content correctly', async () => {
const handler = new DeepSeekHandler(mockOptions)
const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
choices: [{
delta: {
content: 'test response'
}
}]
}
}
}
it('should use default base URL if not provided', () => {
const handlerWithoutBaseUrl = new DeepSeekHandler({
...mockOptions,
deepSeekBaseUrl: undefined
});
expect(handlerWithoutBaseUrl).toBeInstanceOf(DeepSeekHandler);
// The base URL is passed to OpenAI client internally
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({
baseURL: 'https://api.deepseek.com/v1'
}));
});
const mockCreate = jest.fn().mockResolvedValue(mockStream)
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
completions: { create: mockCreate }
} as any
it('should use custom base URL if provided', () => {
const customBaseUrl = 'https://custom.deepseek.com/v1';
const handlerWithCustomUrl = new DeepSeekHandler({
...mockOptions,
deepSeekBaseUrl: customBaseUrl
});
expect(handlerWithCustomUrl).toBeInstanceOf(DeepSeekHandler);
// The custom base URL is passed to OpenAI client
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({
baseURL: customBaseUrl
}));
});
const systemPrompt = 'test system prompt'
const messages: Anthropic.Messages.MessageParam[] = [
{ role: 'user', content: 'test message' }
]
it('should set includeMaxTokens to true', () => {
// Create a new handler and verify OpenAI client was called with includeMaxTokens
new DeepSeekHandler(mockOptions);
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({
apiKey: mockOptions.deepSeekApiKey
}));
});
});
const generator = handler.createMessage(systemPrompt, messages)
const chunks = []
for await (const chunk of generator) {
chunks.push(chunk)
}
describe('getModel', () => {
it('should return model info for valid model ID', () => {
const model = handler.getModel();
expect(model.id).toBe(mockOptions.deepSeekModelId);
expect(model.info).toBeDefined();
expect(model.info.maxTokens).toBe(8192);
expect(model.info.contextWindow).toBe(64_000);
expect(model.info.supportsImages).toBe(false);
expect(model.info.supportsPromptCache).toBe(false);
});
expect(chunks).toHaveLength(1)
expect(chunks[0]).toEqual({
type: 'text',
text: 'test response'
})
it('should return provided model ID with default model info if model does not exist', () => {
const handlerWithInvalidModel = new DeepSeekHandler({
...mockOptions,
deepSeekModelId: 'invalid-model'
});
const model = handlerWithInvalidModel.getModel();
expect(model.id).toBe('invalid-model'); // Returns provided ID
expect(model.info).toBeDefined();
expect(model.info).toBe(handler.getModel().info); // But uses default model info
});
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
model: mockOptions.deepSeekModelId,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: 'test message' }
],
temperature: 0,
stream: true,
max_tokens: 8192,
stream_options: { include_usage: true }
}))
})
it('should return default model if no model ID is provided', () => {
const handlerWithoutModel = new DeepSeekHandler({
...mockOptions,
deepSeekModelId: undefined
});
const model = handlerWithoutModel.getModel();
expect(model.id).toBe(deepSeekDefaultModelId);
expect(model.info).toBeDefined();
});
});
test('createMessage handles complex content correctly', async () => {
const handler = new DeepSeekHandler(mockOptions)
const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
choices: [{
delta: {
content: 'test response'
}
}]
}
}
}
const mockCreate = jest.fn().mockResolvedValue(mockStream)
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
completions: { create: mockCreate }
} as any
const systemPrompt = 'test system prompt'
describe('createMessage', () => {
const systemPrompt = 'You are a helpful assistant.';
const messages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: [
{ type: 'text', text: 'part 1' },
{ type: 'text', text: 'part 2' }
]
content: [{
type: 'text' as const,
text: 'Hello!'
}]
}
]
];
const generator = handler.createMessage(systemPrompt, messages)
await generator.next()
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
messages: [
{ role: 'system', content: systemPrompt },
{
role: 'user',
content: [
{ type: 'text', text: 'part 1' },
{ type: 'text', text: 'part 2' }
]
}
]
}))
})
test('createMessage handles API errors', async () => {
const handler = new DeepSeekHandler(mockOptions)
const mockStream = {
async *[Symbol.asyncIterator]() {
throw new Error('API Error')
it('should handle streaming responses', async () => {
const stream = handler.createMessage(systemPrompt, messages);
const chunks: any[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
}
const mockCreate = jest.fn().mockResolvedValue(mockStream)
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
completions: { create: mockCreate }
} as any
expect(chunks.length).toBeGreaterThan(0);
const textChunks = chunks.filter(chunk => chunk.type === 'text');
expect(textChunks).toHaveLength(1);
expect(textChunks[0].text).toBe('Test response');
});
const generator = handler.createMessage('test', [])
await expect(generator.next()).rejects.toThrow('API Error')
})
})
it('should include usage information', async () => {
const stream = handler.createMessage(systemPrompt, messages);
const chunks: any[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
const usageChunks = chunks.filter(chunk => chunk.type === 'usage');
expect(usageChunks.length).toBeGreaterThan(0);
expect(usageChunks[0].inputTokens).toBe(10);
expect(usageChunks[0].outputTokens).toBe(5);
});
});
});

View File

@@ -0,0 +1,212 @@
import { GeminiHandler } from '../gemini';
import { Anthropic } from '@anthropic-ai/sdk';
import { GoogleGenerativeAI } from '@google/generative-ai';
// Mock the Google Generative AI SDK
jest.mock('@google/generative-ai', () => ({
GoogleGenerativeAI: jest.fn().mockImplementation(() => ({
getGenerativeModel: jest.fn().mockReturnValue({
generateContentStream: jest.fn(),
generateContent: jest.fn().mockResolvedValue({
response: {
text: () => 'Test response'
}
})
})
}))
}));
describe('GeminiHandler', () => {
let handler: GeminiHandler;
beforeEach(() => {
handler = new GeminiHandler({
apiKey: 'test-key',
apiModelId: 'gemini-2.0-flash-thinking-exp-1219',
geminiApiKey: 'test-key'
});
});
describe('constructor', () => {
it('should initialize with provided config', () => {
expect(handler['options'].geminiApiKey).toBe('test-key');
expect(handler['options'].apiModelId).toBe('gemini-2.0-flash-thinking-exp-1219');
});
it('should throw if API key is missing', () => {
expect(() => {
new GeminiHandler({
apiModelId: 'gemini-2.0-flash-thinking-exp-1219',
geminiApiKey: ''
});
}).toThrow('API key is required for Google Gemini');
});
});
describe('createMessage', () => {
const mockMessages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: 'Hello'
},
{
role: 'assistant',
content: 'Hi there!'
}
];
const systemPrompt = 'You are a helpful assistant';
it('should handle text messages correctly', async () => {
// Mock the stream response
const mockStream = {
stream: [
{ text: () => 'Hello' },
{ text: () => ' world!' }
],
response: {
usageMetadata: {
promptTokenCount: 10,
candidatesTokenCount: 5
}
}
};
// Setup the mock implementation
const mockGenerateContentStream = jest.fn().mockResolvedValue(mockStream);
const mockGetGenerativeModel = jest.fn().mockReturnValue({
generateContentStream: mockGenerateContentStream
});
(handler['client'] as any).getGenerativeModel = mockGetGenerativeModel;
const stream = handler.createMessage(systemPrompt, mockMessages);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
// Should have 3 chunks: 'Hello', ' world!', and usage info
expect(chunks.length).toBe(3);
expect(chunks[0]).toEqual({
type: 'text',
text: 'Hello'
});
expect(chunks[1]).toEqual({
type: 'text',
text: ' world!'
});
expect(chunks[2]).toEqual({
type: 'usage',
inputTokens: 10,
outputTokens: 5
});
// Verify the model configuration
expect(mockGetGenerativeModel).toHaveBeenCalledWith({
model: 'gemini-2.0-flash-thinking-exp-1219',
systemInstruction: systemPrompt
});
// Verify generation config
expect(mockGenerateContentStream).toHaveBeenCalledWith(
expect.objectContaining({
generationConfig: {
temperature: 0
}
})
);
});
it('should handle API errors', async () => {
const mockError = new Error('Gemini API error');
const mockGenerateContentStream = jest.fn().mockRejectedValue(mockError);
const mockGetGenerativeModel = jest.fn().mockReturnValue({
generateContentStream: mockGenerateContentStream
});
(handler['client'] as any).getGenerativeModel = mockGetGenerativeModel;
const stream = handler.createMessage(systemPrompt, mockMessages);
await expect(async () => {
for await (const chunk of stream) {
// Should throw before yielding any chunks
}
}).rejects.toThrow('Gemini API error');
});
});
describe('completePrompt', () => {
it('should complete prompt successfully', async () => {
const mockGenerateContent = jest.fn().mockResolvedValue({
response: {
text: () => 'Test response'
}
});
const mockGetGenerativeModel = jest.fn().mockReturnValue({
generateContent: mockGenerateContent
});
(handler['client'] as any).getGenerativeModel = mockGetGenerativeModel;
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockGetGenerativeModel).toHaveBeenCalledWith({
model: 'gemini-2.0-flash-thinking-exp-1219'
});
expect(mockGenerateContent).toHaveBeenCalledWith({
contents: [{ role: 'user', parts: [{ text: 'Test prompt' }] }],
generationConfig: {
temperature: 0
}
});
});
it('should handle API errors', async () => {
const mockError = new Error('Gemini API error');
const mockGenerateContent = jest.fn().mockRejectedValue(mockError);
const mockGetGenerativeModel = jest.fn().mockReturnValue({
generateContent: mockGenerateContent
});
(handler['client'] as any).getGenerativeModel = mockGetGenerativeModel;
await expect(handler.completePrompt('Test prompt'))
.rejects.toThrow('Gemini completion error: Gemini API error');
});
it('should handle empty response', async () => {
const mockGenerateContent = jest.fn().mockResolvedValue({
response: {
text: () => ''
}
});
const mockGetGenerativeModel = jest.fn().mockReturnValue({
generateContent: mockGenerateContent
});
(handler['client'] as any).getGenerativeModel = mockGetGenerativeModel;
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
});
describe('getModel', () => {
it('should return correct model info', () => {
const modelInfo = handler.getModel();
expect(modelInfo.id).toBe('gemini-2.0-flash-thinking-exp-1219');
expect(modelInfo.info).toBeDefined();
expect(modelInfo.info.maxTokens).toBe(8192);
expect(modelInfo.info.contextWindow).toBe(32_767);
});
it('should return default model if invalid model specified', () => {
const invalidHandler = new GeminiHandler({
apiModelId: 'invalid-model',
geminiApiKey: 'test-key'
});
const modelInfo = invalidHandler.getModel();
expect(modelInfo.id).toBe('gemini-2.0-flash-thinking-exp-1219'); // Default model
});
});
});

View File

@@ -0,0 +1,226 @@
import { GlamaHandler } from '../glama';
import { ApiHandlerOptions } from '../../../shared/api';
import OpenAI from 'openai';
import { Anthropic } from '@anthropic-ai/sdk';
import axios from 'axios';
// Mock OpenAI client
const mockCreate = jest.fn();
const mockWithResponse = jest.fn();
jest.mock('openai', () => {
return {
__esModule: true,
default: jest.fn().mockImplementation(() => ({
chat: {
completions: {
create: (...args: any[]) => {
const stream = {
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{
delta: { content: 'Test response' },
index: 0
}],
usage: null
};
yield {
choices: [{
delta: {},
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
};
}
};
const result = mockCreate(...args);
if (args[0].stream) {
mockWithResponse.mockReturnValue(Promise.resolve({
data: stream,
response: {
headers: {
get: (name: string) => name === 'x-completion-request-id' ? 'test-request-id' : null
}
}
}));
result.withResponse = mockWithResponse;
}
return result;
}
}
}
}))
};
});
describe('GlamaHandler', () => {
let handler: GlamaHandler;
let mockOptions: ApiHandlerOptions;
beforeEach(() => {
mockOptions = {
apiModelId: 'anthropic/claude-3-5-sonnet',
glamaModelId: 'anthropic/claude-3-5-sonnet',
glamaApiKey: 'test-api-key'
};
handler = new GlamaHandler(mockOptions);
mockCreate.mockClear();
mockWithResponse.mockClear();
// Default mock implementation for non-streaming responses
mockCreate.mockResolvedValue({
id: 'test-completion',
choices: [{
message: { role: 'assistant', content: 'Test response' },
finish_reason: 'stop',
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
});
});
describe('constructor', () => {
it('should initialize with provided options', () => {
expect(handler).toBeInstanceOf(GlamaHandler);
expect(handler.getModel().id).toBe(mockOptions.apiModelId);
});
});
describe('createMessage', () => {
const systemPrompt = 'You are a helpful assistant.';
const messages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: 'Hello!'
}
];
it('should handle streaming responses', async () => {
// Mock axios for token usage request
const mockAxios = jest.spyOn(axios, 'get').mockResolvedValueOnce({
data: {
tokenUsage: {
promptTokens: 10,
completionTokens: 5,
cacheCreationInputTokens: 0,
cacheReadInputTokens: 0
},
totalCostUsd: "0.00"
}
});
const stream = handler.createMessage(systemPrompt, messages);
const chunks: any[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBe(2); // Text chunk and usage chunk
expect(chunks[0]).toEqual({
type: 'text',
text: 'Test response'
});
expect(chunks[1]).toEqual({
type: 'usage',
inputTokens: 10,
outputTokens: 5,
cacheWriteTokens: 0,
cacheReadTokens: 0,
totalCost: 0
});
mockAxios.mockRestore();
});
it('should handle API errors', async () => {
mockCreate.mockImplementationOnce(() => {
throw new Error('API Error');
});
const stream = handler.createMessage(systemPrompt, messages);
const chunks = [];
try {
for await (const chunk of stream) {
chunks.push(chunk);
}
fail('Expected error to be thrown');
} catch (error) {
expect(error).toBeInstanceOf(Error);
expect(error.message).toBe('API Error');
}
});
});
describe('completePrompt', () => {
it('should complete prompt successfully', async () => {
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
model: mockOptions.apiModelId,
messages: [{ role: 'user', content: 'Test prompt' }],
temperature: 0,
max_tokens: 8192
}));
});
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
await expect(handler.completePrompt('Test prompt'))
.rejects.toThrow('Glama completion error: API Error');
});
it('should handle empty response', async () => {
mockCreate.mockResolvedValueOnce({
choices: [{ message: { content: '' } }]
});
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
it('should not set max_tokens for non-Anthropic models', async () => {
// Reset mock to clear any previous calls
mockCreate.mockClear();
const nonAnthropicOptions = {
apiModelId: 'openai/gpt-4',
glamaModelId: 'openai/gpt-4',
glamaApiKey: 'test-key',
glamaModelInfo: {
maxTokens: 4096,
contextWindow: 8192,
supportsImages: true,
supportsPromptCache: false
}
};
const nonAnthropicHandler = new GlamaHandler(nonAnthropicOptions);
await nonAnthropicHandler.completePrompt('Test prompt');
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
model: 'openai/gpt-4',
messages: [{ role: 'user', content: 'Test prompt' }],
temperature: 0
}));
expect(mockCreate.mock.calls[0][0]).not.toHaveProperty('max_tokens');
});
});
describe('getModel', () => {
it('should return model info', () => {
const modelInfo = handler.getModel();
expect(modelInfo.id).toBe(mockOptions.apiModelId);
expect(modelInfo.info).toBeDefined();
expect(modelInfo.info.maxTokens).toBe(8192);
expect(modelInfo.info.contextWindow).toBe(200_000);
});
});
});

View File

@@ -0,0 +1,160 @@
import { LmStudioHandler } from '../lmstudio';
import { ApiHandlerOptions } from '../../../shared/api';
import OpenAI from 'openai';
import { Anthropic } from '@anthropic-ai/sdk';
// Mock OpenAI client
const mockCreate = jest.fn();
jest.mock('openai', () => {
return {
__esModule: true,
default: jest.fn().mockImplementation(() => ({
chat: {
completions: {
create: mockCreate.mockImplementation(async (options) => {
if (!options.stream) {
return {
id: 'test-completion',
choices: [{
message: { role: 'assistant', content: 'Test response' },
finish_reason: 'stop',
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
};
}
return {
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{
delta: { content: 'Test response' },
index: 0
}],
usage: null
};
yield {
choices: [{
delta: {},
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
};
}
};
})
}
}
}))
};
});
describe('LmStudioHandler', () => {
let handler: LmStudioHandler;
let mockOptions: ApiHandlerOptions;
beforeEach(() => {
mockOptions = {
apiModelId: 'local-model',
lmStudioModelId: 'local-model',
lmStudioBaseUrl: 'http://localhost:1234/v1'
};
handler = new LmStudioHandler(mockOptions);
mockCreate.mockClear();
});
describe('constructor', () => {
it('should initialize with provided options', () => {
expect(handler).toBeInstanceOf(LmStudioHandler);
expect(handler.getModel().id).toBe(mockOptions.lmStudioModelId);
});
it('should use default base URL if not provided', () => {
const handlerWithoutUrl = new LmStudioHandler({
apiModelId: 'local-model',
lmStudioModelId: 'local-model'
});
expect(handlerWithoutUrl).toBeInstanceOf(LmStudioHandler);
});
});
describe('createMessage', () => {
const systemPrompt = 'You are a helpful assistant.';
const messages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: 'Hello!'
}
];
it('should handle streaming responses', async () => {
const stream = handler.createMessage(systemPrompt, messages);
const chunks: any[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(0);
const textChunks = chunks.filter(chunk => chunk.type === 'text');
expect(textChunks).toHaveLength(1);
expect(textChunks[0].text).toBe('Test response');
});
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
const stream = handler.createMessage(systemPrompt, messages);
await expect(async () => {
for await (const chunk of stream) {
// Should not reach here
}
}).rejects.toThrow('Please check the LM Studio developer logs to debug what went wrong');
});
});
describe('completePrompt', () => {
it('should complete prompt successfully', async () => {
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockCreate).toHaveBeenCalledWith({
model: mockOptions.lmStudioModelId,
messages: [{ role: 'user', content: 'Test prompt' }],
temperature: 0,
stream: false
});
});
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
await expect(handler.completePrompt('Test prompt'))
.rejects.toThrow('Please check the LM Studio developer logs to debug what went wrong');
});
it('should handle empty response', async () => {
mockCreate.mockResolvedValueOnce({
choices: [{ message: { content: '' } }]
});
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
});
describe('getModel', () => {
it('should return model info', () => {
const modelInfo = handler.getModel();
expect(modelInfo.id).toBe(mockOptions.lmStudioModelId);
expect(modelInfo.info).toBeDefined();
expect(modelInfo.info.maxTokens).toBe(-1);
expect(modelInfo.info.contextWindow).toBe(128_000);
});
});
});

View File

@@ -0,0 +1,160 @@
import { OllamaHandler } from '../ollama';
import { ApiHandlerOptions } from '../../../shared/api';
import OpenAI from 'openai';
import { Anthropic } from '@anthropic-ai/sdk';
// Mock OpenAI client
const mockCreate = jest.fn();
jest.mock('openai', () => {
return {
__esModule: true,
default: jest.fn().mockImplementation(() => ({
chat: {
completions: {
create: mockCreate.mockImplementation(async (options) => {
if (!options.stream) {
return {
id: 'test-completion',
choices: [{
message: { role: 'assistant', content: 'Test response' },
finish_reason: 'stop',
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
};
}
return {
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{
delta: { content: 'Test response' },
index: 0
}],
usage: null
};
yield {
choices: [{
delta: {},
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
};
}
};
})
}
}
}))
};
});
describe('OllamaHandler', () => {
let handler: OllamaHandler;
let mockOptions: ApiHandlerOptions;
beforeEach(() => {
mockOptions = {
apiModelId: 'llama2',
ollamaModelId: 'llama2',
ollamaBaseUrl: 'http://localhost:11434/v1'
};
handler = new OllamaHandler(mockOptions);
mockCreate.mockClear();
});
describe('constructor', () => {
it('should initialize with provided options', () => {
expect(handler).toBeInstanceOf(OllamaHandler);
expect(handler.getModel().id).toBe(mockOptions.ollamaModelId);
});
it('should use default base URL if not provided', () => {
const handlerWithoutUrl = new OllamaHandler({
apiModelId: 'llama2',
ollamaModelId: 'llama2'
});
expect(handlerWithoutUrl).toBeInstanceOf(OllamaHandler);
});
});
describe('createMessage', () => {
const systemPrompt = 'You are a helpful assistant.';
const messages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: 'Hello!'
}
];
it('should handle streaming responses', async () => {
const stream = handler.createMessage(systemPrompt, messages);
const chunks: any[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(0);
const textChunks = chunks.filter(chunk => chunk.type === 'text');
expect(textChunks).toHaveLength(1);
expect(textChunks[0].text).toBe('Test response');
});
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
const stream = handler.createMessage(systemPrompt, messages);
await expect(async () => {
for await (const chunk of stream) {
// Should not reach here
}
}).rejects.toThrow('API Error');
});
});
describe('completePrompt', () => {
it('should complete prompt successfully', async () => {
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockCreate).toHaveBeenCalledWith({
model: mockOptions.ollamaModelId,
messages: [{ role: 'user', content: 'Test prompt' }],
temperature: 0,
stream: false
});
});
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
await expect(handler.completePrompt('Test prompt'))
.rejects.toThrow('Ollama completion error: API Error');
});
it('should handle empty response', async () => {
mockCreate.mockResolvedValueOnce({
choices: [{ message: { content: '' } }]
});
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
});
describe('getModel', () => {
it('should return model info', () => {
const modelInfo = handler.getModel();
expect(modelInfo.id).toBe(mockOptions.ollamaModelId);
expect(modelInfo.info).toBeDefined();
expect(modelInfo.info.maxTokens).toBe(-1);
expect(modelInfo.info.contextWindow).toBe(128_000);
});
});
});

View File

@@ -0,0 +1,209 @@
import { OpenAiNativeHandler } from '../openai-native';
import { ApiHandlerOptions } from '../../../shared/api';
import OpenAI from 'openai';
import { Anthropic } from '@anthropic-ai/sdk';
// Mock OpenAI client
const mockCreate = jest.fn();
jest.mock('openai', () => {
return {
__esModule: true,
default: jest.fn().mockImplementation(() => ({
chat: {
completions: {
create: mockCreate.mockImplementation(async (options) => {
if (!options.stream) {
return {
id: 'test-completion',
choices: [{
message: { role: 'assistant', content: 'Test response' },
finish_reason: 'stop',
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
};
}
return {
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{
delta: { content: 'Test response' },
index: 0
}],
usage: null
};
yield {
choices: [{
delta: {},
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
};
}
};
})
}
}
}))
};
});
describe('OpenAiNativeHandler', () => {
let handler: OpenAiNativeHandler;
let mockOptions: ApiHandlerOptions;
beforeEach(() => {
mockOptions = {
apiModelId: 'gpt-4o',
openAiNativeApiKey: 'test-api-key'
};
handler = new OpenAiNativeHandler(mockOptions);
mockCreate.mockClear();
});
describe('constructor', () => {
it('should initialize with provided options', () => {
expect(handler).toBeInstanceOf(OpenAiNativeHandler);
expect(handler.getModel().id).toBe(mockOptions.apiModelId);
});
it('should initialize with empty API key', () => {
const handlerWithoutKey = new OpenAiNativeHandler({
apiModelId: 'gpt-4o',
openAiNativeApiKey: ''
});
expect(handlerWithoutKey).toBeInstanceOf(OpenAiNativeHandler);
});
});
describe('createMessage', () => {
const systemPrompt = 'You are a helpful assistant.';
const messages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: 'Hello!'
}
];
it('should handle streaming responses', async () => {
const stream = handler.createMessage(systemPrompt, messages);
const chunks: any[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(0);
const textChunks = chunks.filter(chunk => chunk.type === 'text');
expect(textChunks).toHaveLength(1);
expect(textChunks[0].text).toBe('Test response');
});
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
const stream = handler.createMessage(systemPrompt, messages);
await expect(async () => {
for await (const chunk of stream) {
// Should not reach here
}
}).rejects.toThrow('API Error');
});
});
describe('completePrompt', () => {
it('should complete prompt successfully with gpt-4o model', async () => {
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockCreate).toHaveBeenCalledWith({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Test prompt' }],
temperature: 0
});
});
it('should complete prompt successfully with o1 model', async () => {
handler = new OpenAiNativeHandler({
apiModelId: 'o1',
openAiNativeApiKey: 'test-api-key'
});
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockCreate).toHaveBeenCalledWith({
model: 'o1',
messages: [{ role: 'user', content: 'Test prompt' }]
});
});
it('should complete prompt successfully with o1-preview model', async () => {
handler = new OpenAiNativeHandler({
apiModelId: 'o1-preview',
openAiNativeApiKey: 'test-api-key'
});
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockCreate).toHaveBeenCalledWith({
model: 'o1-preview',
messages: [{ role: 'user', content: 'Test prompt' }]
});
});
it('should complete prompt successfully with o1-mini model', async () => {
handler = new OpenAiNativeHandler({
apiModelId: 'o1-mini',
openAiNativeApiKey: 'test-api-key'
});
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockCreate).toHaveBeenCalledWith({
model: 'o1-mini',
messages: [{ role: 'user', content: 'Test prompt' }]
});
});
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
await expect(handler.completePrompt('Test prompt'))
.rejects.toThrow('OpenAI Native completion error: API Error');
});
it('should handle empty response', async () => {
mockCreate.mockResolvedValueOnce({
choices: [{ message: { content: '' } }]
});
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
});
describe('getModel', () => {
it('should return model info', () => {
const modelInfo = handler.getModel();
expect(modelInfo.id).toBe(mockOptions.apiModelId);
expect(modelInfo.info).toBeDefined();
expect(modelInfo.info.maxTokens).toBe(4096);
expect(modelInfo.info.contextWindow).toBe(128_000);
});
it('should handle undefined model ID', () => {
const handlerWithoutModel = new OpenAiNativeHandler({
openAiNativeApiKey: 'test-api-key'
});
const modelInfo = handlerWithoutModel.getModel();
expect(modelInfo.id).toBe('gpt-4o'); // Default model
expect(modelInfo.info).toBeDefined();
});
});
});

View File

@@ -1,192 +1,224 @@
import { OpenAiHandler } from '../openai'
import { ApiHandlerOptions, openAiModelInfoSaneDefaults } from '../../../shared/api'
import OpenAI, { AzureOpenAI } from 'openai'
import { Anthropic } from '@anthropic-ai/sdk'
import { OpenAiHandler } from '../openai';
import { ApiHandlerOptions } from '../../../shared/api';
import { ApiStream } from '../../transform/stream';
import OpenAI from 'openai';
import { Anthropic } from '@anthropic-ai/sdk';
// Mock dependencies
jest.mock('openai')
// Mock OpenAI client
const mockCreate = jest.fn();
jest.mock('openai', () => {
return {
__esModule: true,
default: jest.fn().mockImplementation(() => ({
chat: {
completions: {
create: mockCreate.mockImplementation(async (options) => {
if (!options.stream) {
return {
id: 'test-completion',
choices: [{
message: { role: 'assistant', content: 'Test response', refusal: null },
finish_reason: 'stop',
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
};
}
return {
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{
delta: { content: 'Test response' },
index: 0
}],
usage: null
};
yield {
choices: [{
delta: {},
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
}
};
}
};
})
}
}
}))
};
});
describe('OpenAiHandler', () => {
const mockOptions: ApiHandlerOptions = {
openAiApiKey: 'test-key',
openAiModelId: 'gpt-4',
openAiStreamingEnabled: true,
openAiBaseUrl: 'https://api.openai.com/v1'
}
let handler: OpenAiHandler;
let mockOptions: ApiHandlerOptions;
beforeEach(() => {
jest.clearAllMocks()
})
mockOptions = {
openAiApiKey: 'test-api-key',
openAiModelId: 'gpt-4',
openAiBaseUrl: 'https://api.openai.com/v1'
};
handler = new OpenAiHandler(mockOptions);
mockCreate.mockClear();
});
test('constructor initializes with correct options', () => {
const handler = new OpenAiHandler(mockOptions)
expect(handler).toBeInstanceOf(OpenAiHandler)
expect(OpenAI).toHaveBeenCalledWith({
apiKey: mockOptions.openAiApiKey,
baseURL: mockOptions.openAiBaseUrl
})
})
describe('constructor', () => {
it('should initialize with provided options', () => {
expect(handler).toBeInstanceOf(OpenAiHandler);
expect(handler.getModel().id).toBe(mockOptions.openAiModelId);
});
test('constructor initializes Azure client when Azure URL is provided', () => {
const azureOptions: ApiHandlerOptions = {
...mockOptions,
openAiBaseUrl: 'https://example.azure.com',
azureApiVersion: '2023-05-15'
}
const handler = new OpenAiHandler(azureOptions)
expect(handler).toBeInstanceOf(OpenAiHandler)
expect(AzureOpenAI).toHaveBeenCalledWith({
baseURL: azureOptions.openAiBaseUrl,
apiKey: azureOptions.openAiApiKey,
apiVersion: azureOptions.azureApiVersion
})
})
it('should use custom base URL if provided', () => {
const customBaseUrl = 'https://custom.openai.com/v1';
const handlerWithCustomUrl = new OpenAiHandler({
...mockOptions,
openAiBaseUrl: customBaseUrl
});
expect(handlerWithCustomUrl).toBeInstanceOf(OpenAiHandler);
});
});
test('getModel returns correct model info', () => {
const handler = new OpenAiHandler(mockOptions)
const result = handler.getModel()
expect(result).toEqual({
id: mockOptions.openAiModelId,
info: openAiModelInfoSaneDefaults
})
})
test('createMessage handles streaming correctly when enabled', async () => {
const handler = new OpenAiHandler({
...mockOptions,
openAiStreamingEnabled: true,
includeMaxTokens: true
})
const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
choices: [{
delta: {
content: 'test response'
}
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5
}
}
}
}
const mockCreate = jest.fn().mockResolvedValue(mockStream)
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
completions: { create: mockCreate }
} as any
const systemPrompt = 'test system prompt'
describe('createMessage', () => {
const systemPrompt = 'You are a helpful assistant.';
const messages: Anthropic.Messages.MessageParam[] = [
{ role: 'user', content: 'test message' }
]
const generator = handler.createMessage(systemPrompt, messages)
const chunks = []
for await (const chunk of generator) {
chunks.push(chunk)
}
expect(chunks).toEqual([
{
type: 'text',
text: 'test response'
},
{
type: 'usage',
inputTokens: 10,
outputTokens: 5
role: 'user',
content: [{
type: 'text' as const,
text: 'Hello!'
}]
}
])
];
expect(mockCreate).toHaveBeenCalledWith({
model: mockOptions.openAiModelId,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: 'test message' }
],
temperature: 0,
stream: true,
stream_options: { include_usage: true },
max_tokens: openAiModelInfoSaneDefaults.maxTokens
})
})
it('should handle non-streaming mode', async () => {
const handler = new OpenAiHandler({
...mockOptions,
openAiStreamingEnabled: false
});
test('createMessage handles non-streaming correctly when disabled', async () => {
const handler = new OpenAiHandler({
...mockOptions,
openAiStreamingEnabled: false
})
const mockResponse = {
choices: [{
message: {
content: 'test response'
const stream = handler.createMessage(systemPrompt, messages);
const chunks: any[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(0);
const textChunk = chunks.find(chunk => chunk.type === 'text');
const usageChunk = chunks.find(chunk => chunk.type === 'usage');
expect(textChunk).toBeDefined();
expect(textChunk?.text).toBe('Test response');
expect(usageChunk).toBeDefined();
expect(usageChunk?.inputTokens).toBe(10);
expect(usageChunk?.outputTokens).toBe(5);
});
it('should handle streaming responses', async () => {
const stream = handler.createMessage(systemPrompt, messages);
const chunks: any[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(0);
const textChunks = chunks.filter(chunk => chunk.type === 'text');
expect(textChunks).toHaveLength(1);
expect(textChunks[0].text).toBe('Test response');
});
});
describe('error handling', () => {
const testMessages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: [{
type: 'text' as const,
text: 'Hello'
}]
}
];
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
const stream = handler.createMessage('system prompt', testMessages);
await expect(async () => {
for await (const chunk of stream) {
// Should not reach here
}
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5
}
}
}).rejects.toThrow('API Error');
});
const mockCreate = jest.fn().mockResolvedValue(mockResponse)
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
completions: { create: mockCreate }
} as any
it('should handle rate limiting', async () => {
const rateLimitError = new Error('Rate limit exceeded');
rateLimitError.name = 'Error';
(rateLimitError as any).status = 429;
mockCreate.mockRejectedValueOnce(rateLimitError);
const systemPrompt = 'test system prompt'
const messages: Anthropic.Messages.MessageParam[] = [
{ role: 'user', content: 'test message' }
]
const stream = handler.createMessage('system prompt', testMessages);
const generator = handler.createMessage(systemPrompt, messages)
const chunks = []
for await (const chunk of generator) {
chunks.push(chunk)
}
await expect(async () => {
for await (const chunk of stream) {
// Should not reach here
}
}).rejects.toThrow('Rate limit exceeded');
});
});
expect(chunks).toEqual([
{
type: 'text',
text: 'test response'
},
{
type: 'usage',
inputTokens: 10,
outputTokens: 5
}
])
describe('completePrompt', () => {
it('should complete prompt successfully', async () => {
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(mockCreate).toHaveBeenCalledWith({
model: mockOptions.openAiModelId,
messages: [{ role: 'user', content: 'Test prompt' }],
temperature: 0
});
});
expect(mockCreate).toHaveBeenCalledWith({
model: mockOptions.openAiModelId,
messages: [
{ role: 'user', content: systemPrompt },
{ role: 'user', content: 'test message' }
]
})
})
it('should handle API errors', async () => {
mockCreate.mockRejectedValueOnce(new Error('API Error'));
await expect(handler.completePrompt('Test prompt'))
.rejects.toThrow('OpenAI completion error: API Error');
});
test('createMessage handles API errors', async () => {
const handler = new OpenAiHandler(mockOptions)
const mockStream = {
async *[Symbol.asyncIterator]() {
throw new Error('API Error')
}
}
it('should handle empty response', async () => {
mockCreate.mockImplementationOnce(() => ({
choices: [{ message: { content: '' } }]
}));
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
});
const mockCreate = jest.fn().mockResolvedValue(mockStream)
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
completions: { create: mockCreate }
} as any
describe('getModel', () => {
it('should return model info with sane defaults', () => {
const model = handler.getModel();
expect(model.id).toBe(mockOptions.openAiModelId);
expect(model.info).toBeDefined();
expect(model.info.contextWindow).toBe(128_000);
expect(model.info.supportsImages).toBe(true);
});
const generator = handler.createMessage('test', [])
await expect(generator.next()).rejects.toThrow('API Error')
})
})
it('should handle undefined model ID', () => {
const handlerWithoutModel = new OpenAiHandler({
...mockOptions,
openAiModelId: undefined
});
const model = handlerWithoutModel.getModel();
expect(model.id).toBe('');
expect(model.info).toBeDefined();
});
});
});

View File

@@ -0,0 +1,296 @@
import { VertexHandler } from '../vertex';
import { Anthropic } from '@anthropic-ai/sdk';
import { AnthropicVertex } from '@anthropic-ai/vertex-sdk';
// Mock Vertex SDK
jest.mock('@anthropic-ai/vertex-sdk', () => ({
AnthropicVertex: jest.fn().mockImplementation(() => ({
messages: {
create: jest.fn().mockImplementation(async (options) => {
if (!options.stream) {
return {
id: 'test-completion',
content: [
{ type: 'text', text: 'Test response' }
],
role: 'assistant',
model: options.model,
usage: {
input_tokens: 10,
output_tokens: 5
}
}
}
return {
async *[Symbol.asyncIterator]() {
yield {
type: 'message_start',
message: {
usage: {
input_tokens: 10,
output_tokens: 5
}
}
}
yield {
type: 'content_block_start',
content_block: {
type: 'text',
text: 'Test response'
}
}
}
}
})
}
}))
}));
describe('VertexHandler', () => {
let handler: VertexHandler;
beforeEach(() => {
handler = new VertexHandler({
apiModelId: 'claude-3-5-sonnet-v2@20241022',
vertexProjectId: 'test-project',
vertexRegion: 'us-central1'
});
});
describe('constructor', () => {
it('should initialize with provided config', () => {
expect(AnthropicVertex).toHaveBeenCalledWith({
projectId: 'test-project',
region: 'us-central1'
});
});
});
describe('createMessage', () => {
const mockMessages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: 'Hello'
},
{
role: 'assistant',
content: 'Hi there!'
}
];
const systemPrompt = 'You are a helpful assistant';
it('should handle streaming responses correctly', async () => {
const mockStream = [
{
type: 'message_start',
message: {
usage: {
input_tokens: 10,
output_tokens: 0
}
}
},
{
type: 'content_block_start',
index: 0,
content_block: {
type: 'text',
text: 'Hello'
}
},
{
type: 'content_block_delta',
delta: {
type: 'text_delta',
text: ' world!'
}
},
{
type: 'message_delta',
usage: {
output_tokens: 5
}
}
];
// Setup async iterator for mock stream
const asyncIterator = {
async *[Symbol.asyncIterator]() {
for (const chunk of mockStream) {
yield chunk;
}
}
};
const mockCreate = jest.fn().mockResolvedValue(asyncIterator);
(handler['client'].messages as any).create = mockCreate;
const stream = handler.createMessage(systemPrompt, mockMessages);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBe(4);
expect(chunks[0]).toEqual({
type: 'usage',
inputTokens: 10,
outputTokens: 0
});
expect(chunks[1]).toEqual({
type: 'text',
text: 'Hello'
});
expect(chunks[2]).toEqual({
type: 'text',
text: ' world!'
});
expect(chunks[3]).toEqual({
type: 'usage',
inputTokens: 0,
outputTokens: 5
});
expect(mockCreate).toHaveBeenCalledWith({
model: 'claude-3-5-sonnet-v2@20241022',
max_tokens: 8192,
temperature: 0,
system: systemPrompt,
messages: mockMessages,
stream: true
});
});
it('should handle multiple content blocks with line breaks', async () => {
const mockStream = [
{
type: 'content_block_start',
index: 0,
content_block: {
type: 'text',
text: 'First line'
}
},
{
type: 'content_block_start',
index: 1,
content_block: {
type: 'text',
text: 'Second line'
}
}
];
const asyncIterator = {
async *[Symbol.asyncIterator]() {
for (const chunk of mockStream) {
yield chunk;
}
}
};
const mockCreate = jest.fn().mockResolvedValue(asyncIterator);
(handler['client'].messages as any).create = mockCreate;
const stream = handler.createMessage(systemPrompt, mockMessages);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBe(3);
expect(chunks[0]).toEqual({
type: 'text',
text: 'First line'
});
expect(chunks[1]).toEqual({
type: 'text',
text: '\n'
});
expect(chunks[2]).toEqual({
type: 'text',
text: 'Second line'
});
});
it('should handle API errors', async () => {
const mockError = new Error('Vertex API error');
const mockCreate = jest.fn().mockRejectedValue(mockError);
(handler['client'].messages as any).create = mockCreate;
const stream = handler.createMessage(systemPrompt, mockMessages);
await expect(async () => {
for await (const chunk of stream) {
// Should throw before yielding any chunks
}
}).rejects.toThrow('Vertex API error');
});
});
describe('completePrompt', () => {
it('should complete prompt successfully', async () => {
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('Test response');
expect(handler['client'].messages.create).toHaveBeenCalledWith({
model: 'claude-3-5-sonnet-v2@20241022',
max_tokens: 8192,
temperature: 0,
messages: [{ role: 'user', content: 'Test prompt' }],
stream: false
});
});
it('should handle API errors', async () => {
const mockError = new Error('Vertex API error');
const mockCreate = jest.fn().mockRejectedValue(mockError);
(handler['client'].messages as any).create = mockCreate;
await expect(handler.completePrompt('Test prompt'))
.rejects.toThrow('Vertex completion error: Vertex API error');
});
it('should handle non-text content', async () => {
const mockCreate = jest.fn().mockResolvedValue({
content: [{ type: 'image' }]
});
(handler['client'].messages as any).create = mockCreate;
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
it('should handle empty response', async () => {
const mockCreate = jest.fn().mockResolvedValue({
content: [{ type: 'text', text: '' }]
});
(handler['client'].messages as any).create = mockCreate;
const result = await handler.completePrompt('Test prompt');
expect(result).toBe('');
});
});
describe('getModel', () => {
it('should return correct model info', () => {
const modelInfo = handler.getModel();
expect(modelInfo.id).toBe('claude-3-5-sonnet-v2@20241022');
expect(modelInfo.info).toBeDefined();
expect(modelInfo.info.maxTokens).toBe(8192);
expect(modelInfo.info.contextWindow).toBe(200_000);
});
it('should return default model if invalid model specified', () => {
const invalidHandler = new VertexHandler({
apiModelId: 'invalid-model',
vertexProjectId: 'test-project',
vertexRegion: 'us-central1'
});
const modelInfo = invalidHandler.getModel();
expect(modelInfo.id).toBe('claude-3-5-sonnet-v2@20241022'); // Default model
});
});
});

View File

@@ -7,10 +7,10 @@ import {
ApiHandlerOptions,
ModelInfo,
} from "../../shared/api"
import { ApiHandler } from "../index"
import { ApiHandler, SingleCompletionHandler } from "../index"
import { ApiStream } from "../transform/stream"
export class AnthropicHandler implements ApiHandler {
export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions
private client: Anthropic
@@ -173,4 +173,27 @@ export class AnthropicHandler implements ApiHandler {
}
return { id: anthropicDefaultModelId, info: anthropicModels[anthropicDefaultModelId] }
}
async completePrompt(prompt: string): Promise<string> {
try {
const response = await this.client.messages.create({
model: this.getModel().id,
max_tokens: this.getModel().info.maxTokens || 8192,
temperature: 0,
messages: [{ role: "user", content: prompt }],
stream: false
})
const content = response.content[0]
if (content.type === 'text') {
return content.text
}
return ''
} catch (error) {
if (error instanceof Error) {
throw new Error(`Anthropic completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -1,6 +1,6 @@
import { BedrockRuntimeClient, ConverseStreamCommand, BedrockRuntimeClientConfig } from "@aws-sdk/client-bedrock-runtime"
import { BedrockRuntimeClient, ConverseStreamCommand, ConverseCommand, BedrockRuntimeClientConfig } from "@aws-sdk/client-bedrock-runtime"
import { Anthropic } from "@anthropic-ai/sdk"
import { ApiHandler } from "../"
import { ApiHandler, SingleCompletionHandler } from "../"
import { ApiHandlerOptions, BedrockModelId, ModelInfo, bedrockDefaultModelId, bedrockModels } from "../../shared/api"
import { ApiStream } from "../transform/stream"
import { convertToBedrockConverseMessages, convertToAnthropicMessage } from "../transform/bedrock-converse-format"
@@ -38,7 +38,7 @@ export interface StreamEvent {
};
}
export class AwsBedrockHandler implements ApiHandler {
export class AwsBedrockHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions
private client: BedrockRuntimeClient
@@ -199,7 +199,7 @@ export class AwsBedrockHandler implements ApiHandler {
if (modelId) {
// For tests, allow any model ID
if (process.env.NODE_ENV === 'test') {
return {
return {
id: modelId,
info: {
maxTokens: 5000,
@@ -214,9 +214,68 @@ export class AwsBedrockHandler implements ApiHandler {
return { id, info: bedrockModels[id] }
}
}
return {
id: bedrockDefaultModelId,
info: bedrockModels[bedrockDefaultModelId]
return {
id: bedrockDefaultModelId,
info: bedrockModels[bedrockDefaultModelId]
}
}
async completePrompt(prompt: string): Promise<string> {
try {
const modelConfig = this.getModel()
// Handle cross-region inference
let modelId: string
if (this.options.awsUseCrossRegionInference) {
let regionPrefix = (this.options.awsRegion || "").slice(0, 3)
switch (regionPrefix) {
case "us-":
modelId = `us.${modelConfig.id}`
break
case "eu-":
modelId = `eu.${modelConfig.id}`
break
default:
modelId = modelConfig.id
break
}
} else {
modelId = modelConfig.id
}
const payload = {
modelId,
messages: convertToBedrockConverseMessages([{
role: "user",
content: prompt
}]),
inferenceConfig: {
maxTokens: modelConfig.info.maxTokens || 5000,
temperature: 0.3,
topP: 0.1
}
}
const command = new ConverseCommand(payload)
const response = await this.client.send(command)
if (response.output && response.output instanceof Uint8Array) {
try {
const outputStr = new TextDecoder().decode(response.output)
const output = JSON.parse(outputStr)
if (output.content) {
return output.content
}
} catch (parseError) {
console.error('Failed to parse Bedrock response:', parseError)
}
}
return ''
} catch (error) {
if (error instanceof Error) {
throw new Error(`Bedrock completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -1,11 +1,11 @@
import { Anthropic } from "@anthropic-ai/sdk"
import { GoogleGenerativeAI } from "@google/generative-ai"
import { ApiHandler } from "../"
import { ApiHandler, SingleCompletionHandler } from "../"
import { ApiHandlerOptions, geminiDefaultModelId, GeminiModelId, geminiModels, ModelInfo } from "../../shared/api"
import { convertAnthropicMessageToGemini } from "../transform/gemini-format"
import { ApiStream } from "../transform/stream"
export class GeminiHandler implements ApiHandler {
export class GeminiHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions
private client: GoogleGenerativeAI
@@ -53,4 +53,26 @@ export class GeminiHandler implements ApiHandler {
}
return { id: geminiDefaultModelId, info: geminiModels[geminiDefaultModelId] }
}
async completePrompt(prompt: string): Promise<string> {
try {
const model = this.client.getGenerativeModel({
model: this.getModel().id,
})
const result = await model.generateContent({
contents: [{ role: "user", parts: [{ text: prompt }] }],
generationConfig: {
temperature: 0,
},
})
return result.response.text()
} catch (error) {
if (error instanceof Error) {
throw new Error(`Gemini completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -1,13 +1,13 @@
import { Anthropic } from "@anthropic-ai/sdk"
import axios from "axios"
import OpenAI from "openai"
import { ApiHandler } from "../"
import { ApiHandler, SingleCompletionHandler } from "../"
import { ApiHandlerOptions, ModelInfo, glamaDefaultModelId, glamaDefaultModelInfo } from "../../shared/api"
import { convertToOpenAiMessages } from "../transform/openai-format"
import { ApiStream } from "../transform/stream"
import delay from "delay"
export class GlamaHandler implements ApiHandler {
export class GlamaHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions
private client: OpenAI
@@ -129,4 +129,26 @@ export class GlamaHandler implements ApiHandler {
return { id: glamaDefaultModelId, info: glamaDefaultModelInfo }
}
async completePrompt(prompt: string): Promise<string> {
try {
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
model: this.getModel().id,
messages: [{ role: "user", content: prompt }],
temperature: 0,
}
if (this.getModel().id.startsWith("anthropic/")) {
requestOptions.max_tokens = 8192
}
const response = await this.client.chat.completions.create(requestOptions)
return response.choices[0]?.message.content || ""
} catch (error) {
if (error instanceof Error) {
throw new Error(`Glama completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -1,11 +1,11 @@
import { Anthropic } from "@anthropic-ai/sdk"
import OpenAI from "openai"
import { ApiHandler } from "../"
import { ApiHandler, SingleCompletionHandler } from "../"
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
import { convertToOpenAiMessages } from "../transform/openai-format"
import { ApiStream } from "../transform/stream"
export class LmStudioHandler implements ApiHandler {
export class LmStudioHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions
private client: OpenAI
@@ -53,4 +53,20 @@ export class LmStudioHandler implements ApiHandler {
info: openAiModelInfoSaneDefaults,
}
}
async completePrompt(prompt: string): Promise<string> {
try {
const response = await this.client.chat.completions.create({
model: this.getModel().id,
messages: [{ role: "user", content: prompt }],
temperature: 0,
stream: false
})
return response.choices[0]?.message.content || ""
} catch (error) {
throw new Error(
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Cline's prompts.",
)
}
}
}

View File

@@ -1,11 +1,11 @@
import { Anthropic } from "@anthropic-ai/sdk"
import OpenAI from "openai"
import { ApiHandler } from "../"
import { ApiHandler, SingleCompletionHandler } from "../"
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
import { convertToOpenAiMessages } from "../transform/openai-format"
import { ApiStream } from "../transform/stream"
export class OllamaHandler implements ApiHandler {
export class OllamaHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions
private client: OpenAI
@@ -46,4 +46,21 @@ export class OllamaHandler implements ApiHandler {
info: openAiModelInfoSaneDefaults,
}
}
async completePrompt(prompt: string): Promise<string> {
try {
const response = await this.client.chat.completions.create({
model: this.getModel().id,
messages: [{ role: "user", content: prompt }],
temperature: 0,
stream: false
})
return response.choices[0]?.message.content || ""
} catch (error) {
if (error instanceof Error) {
throw new Error(`Ollama completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -1,6 +1,6 @@
import { Anthropic } from "@anthropic-ai/sdk"
import OpenAI from "openai"
import { ApiHandler } from "../"
import { ApiHandler, SingleCompletionHandler } from "../"
import {
ApiHandlerOptions,
ModelInfo,
@@ -11,7 +11,7 @@ import {
import { convertToOpenAiMessages } from "../transform/openai-format"
import { ApiStream } from "../transform/stream"
export class OpenAiNativeHandler implements ApiHandler {
export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions
private client: OpenAI
@@ -83,4 +83,37 @@ export class OpenAiNativeHandler implements ApiHandler {
}
return { id: openAiNativeDefaultModelId, info: openAiNativeModels[openAiNativeDefaultModelId] }
}
async completePrompt(prompt: string): Promise<string> {
try {
const modelId = this.getModel().id
let requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
switch (modelId) {
case "o1":
case "o1-preview":
case "o1-mini":
// o1 doesn't support non-1 temp or system prompt
requestOptions = {
model: modelId,
messages: [{ role: "user", content: prompt }]
}
break
default:
requestOptions = {
model: modelId,
messages: [{ role: "user", content: prompt }],
temperature: 0
}
}
const response = await this.client.chat.completions.create(requestOptions)
return response.choices[0]?.message.content || ""
} catch (error) {
if (error instanceof Error) {
throw new Error(`OpenAI Native completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -6,11 +6,11 @@ import {
ModelInfo,
openAiModelInfoSaneDefaults,
} from "../../shared/api"
import { ApiHandler } from "../index"
import { ApiHandler, SingleCompletionHandler } from "../index"
import { convertToOpenAiMessages } from "../transform/openai-format"
import { ApiStream } from "../transform/stream"
export class OpenAiHandler implements ApiHandler {
export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
protected options: ApiHandlerOptions
private client: OpenAI
@@ -100,4 +100,22 @@ export class OpenAiHandler implements ApiHandler {
info: openAiModelInfoSaneDefaults,
}
}
async completePrompt(prompt: string): Promise<string> {
try {
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
model: this.getModel().id,
messages: [{ role: "user", content: prompt }],
temperature: 0,
}
const response = await this.client.chat.completions.create(requestOptions)
return response.choices[0]?.message.content || ""
} catch (error) {
if (error instanceof Error) {
throw new Error(`OpenAI completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -1,11 +1,11 @@
import { Anthropic } from "@anthropic-ai/sdk"
import { AnthropicVertex } from "@anthropic-ai/vertex-sdk"
import { ApiHandler } from "../"
import { ApiHandler, SingleCompletionHandler } from "../"
import { ApiHandlerOptions, ModelInfo, vertexDefaultModelId, VertexModelId, vertexModels } from "../../shared/api"
import { ApiStream } from "../transform/stream"
// https://docs.anthropic.com/en/api/claude-on-vertex-ai
export class VertexHandler implements ApiHandler {
export class VertexHandler implements ApiHandler, SingleCompletionHandler {
private options: ApiHandlerOptions
private client: AnthropicVertex
@@ -83,4 +83,27 @@ export class VertexHandler implements ApiHandler {
}
return { id: vertexDefaultModelId, info: vertexModels[vertexDefaultModelId] }
}
async completePrompt(prompt: string): Promise<string> {
try {
const response = await this.client.messages.create({
model: this.getModel().id,
max_tokens: this.getModel().info.maxTokens || 8192,
temperature: 0,
messages: [{ role: "user", content: prompt }],
stream: false
})
const content = response.content[0]
if (content.type === 'text') {
return content.text
}
return ''
} catch (error) {
if (error instanceof Error) {
throw new Error(`Vertex completion error: ${error.message}`)
}
throw error
}
}
}

View File

@@ -0,0 +1,257 @@
import { convertToOpenAiMessages, convertToAnthropicMessage } from '../openai-format';
import { Anthropic } from '@anthropic-ai/sdk';
import OpenAI from 'openai';
type PartialChatCompletion = Omit<OpenAI.Chat.Completions.ChatCompletion, 'choices'> & {
choices: Array<Partial<OpenAI.Chat.Completions.ChatCompletion.Choice> & {
message: OpenAI.Chat.Completions.ChatCompletion.Choice['message'];
finish_reason: string;
index: number;
}>;
};
describe('OpenAI Format Transformations', () => {
describe('convertToOpenAiMessages', () => {
it('should convert simple text messages', () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: 'Hello'
},
{
role: 'assistant',
content: 'Hi there!'
}
];
const openAiMessages = convertToOpenAiMessages(anthropicMessages);
expect(openAiMessages).toHaveLength(2);
expect(openAiMessages[0]).toEqual({
role: 'user',
content: 'Hello'
});
expect(openAiMessages[1]).toEqual({
role: 'assistant',
content: 'Hi there!'
});
});
it('should handle messages with image content', () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: [
{
type: 'text',
text: 'What is in this image?'
},
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/jpeg',
data: 'base64data'
}
}
]
}
];
const openAiMessages = convertToOpenAiMessages(anthropicMessages);
expect(openAiMessages).toHaveLength(1);
expect(openAiMessages[0].role).toBe('user');
const content = openAiMessages[0].content as Array<{
type: string;
text?: string;
image_url?: { url: string };
}>;
expect(Array.isArray(content)).toBe(true);
expect(content).toHaveLength(2);
expect(content[0]).toEqual({ type: 'text', text: 'What is in this image?' });
expect(content[1]).toEqual({
type: 'image_url',
image_url: { url: 'data:image/jpeg;base64,base64data' }
});
});
it('should handle assistant messages with tool use', () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
role: 'assistant',
content: [
{
type: 'text',
text: 'Let me check the weather.'
},
{
type: 'tool_use',
id: 'weather-123',
name: 'get_weather',
input: { city: 'London' }
}
]
}
];
const openAiMessages = convertToOpenAiMessages(anthropicMessages);
expect(openAiMessages).toHaveLength(1);
const assistantMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionAssistantMessageParam;
expect(assistantMessage.role).toBe('assistant');
expect(assistantMessage.content).toBe('Let me check the weather.');
expect(assistantMessage.tool_calls).toHaveLength(1);
expect(assistantMessage.tool_calls![0]).toEqual({
id: 'weather-123',
type: 'function',
function: {
name: 'get_weather',
arguments: JSON.stringify({ city: 'London' })
}
});
});
it('should handle user messages with tool results', () => {
const anthropicMessages: Anthropic.Messages.MessageParam[] = [
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'weather-123',
content: 'Current temperature in London: 20°C'
}
]
}
];
const openAiMessages = convertToOpenAiMessages(anthropicMessages);
expect(openAiMessages).toHaveLength(1);
const toolMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam;
expect(toolMessage.role).toBe('tool');
expect(toolMessage.tool_call_id).toBe('weather-123');
expect(toolMessage.content).toBe('Current temperature in London: 20°C');
});
});
describe('convertToAnthropicMessage', () => {
it('should convert simple completion', () => {
const openAiCompletion: PartialChatCompletion = {
id: 'completion-123',
model: 'gpt-4',
choices: [{
message: {
role: 'assistant',
content: 'Hello there!',
refusal: null
},
finish_reason: 'stop',
index: 0
}],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15
},
created: 123456789,
object: 'chat.completion'
};
const anthropicMessage = convertToAnthropicMessage(openAiCompletion as OpenAI.Chat.Completions.ChatCompletion);
expect(anthropicMessage.id).toBe('completion-123');
expect(anthropicMessage.role).toBe('assistant');
expect(anthropicMessage.content).toHaveLength(1);
expect(anthropicMessage.content[0]).toEqual({
type: 'text',
text: 'Hello there!'
});
expect(anthropicMessage.stop_reason).toBe('end_turn');
expect(anthropicMessage.usage).toEqual({
input_tokens: 10,
output_tokens: 5
});
});
it('should handle tool calls in completion', () => {
const openAiCompletion: PartialChatCompletion = {
id: 'completion-123',
model: 'gpt-4',
choices: [{
message: {
role: 'assistant',
content: 'Let me check the weather.',
tool_calls: [{
id: 'weather-123',
type: 'function',
function: {
name: 'get_weather',
arguments: '{"city":"London"}'
}
}],
refusal: null
},
finish_reason: 'tool_calls',
index: 0
}],
usage: {
prompt_tokens: 15,
completion_tokens: 8,
total_tokens: 23
},
created: 123456789,
object: 'chat.completion'
};
const anthropicMessage = convertToAnthropicMessage(openAiCompletion as OpenAI.Chat.Completions.ChatCompletion);
expect(anthropicMessage.content).toHaveLength(2);
expect(anthropicMessage.content[0]).toEqual({
type: 'text',
text: 'Let me check the weather.'
});
expect(anthropicMessage.content[1]).toEqual({
type: 'tool_use',
id: 'weather-123',
name: 'get_weather',
input: { city: 'London' }
});
expect(anthropicMessage.stop_reason).toBe('tool_use');
});
it('should handle invalid tool call arguments', () => {
const openAiCompletion: PartialChatCompletion = {
id: 'completion-123',
model: 'gpt-4',
choices: [{
message: {
role: 'assistant',
content: 'Testing invalid arguments',
tool_calls: [{
id: 'test-123',
type: 'function',
function: {
name: 'test_function',
arguments: 'invalid json'
}
}],
refusal: null
},
finish_reason: 'tool_calls',
index: 0
}],
created: 123456789,
object: 'chat.completion'
};
const anthropicMessage = convertToAnthropicMessage(openAiCompletion as OpenAI.Chat.Completions.ChatCompletion);
expect(anthropicMessage.content).toHaveLength(2);
expect(anthropicMessage.content[1]).toEqual({
type: 'tool_use',
id: 'test-123',
name: 'test_function',
input: {} // Should default to empty object for invalid JSON
});
});
});
});

View File

@@ -0,0 +1,114 @@
import { ApiStreamChunk } from '../stream';
describe('API Stream Types', () => {
describe('ApiStreamChunk', () => {
it('should correctly handle text chunks', () => {
const textChunk: ApiStreamChunk = {
type: 'text',
text: 'Hello world'
};
expect(textChunk.type).toBe('text');
expect(textChunk.text).toBe('Hello world');
});
it('should correctly handle usage chunks with cache information', () => {
const usageChunk: ApiStreamChunk = {
type: 'usage',
inputTokens: 100,
outputTokens: 50,
cacheWriteTokens: 20,
cacheReadTokens: 10
};
expect(usageChunk.type).toBe('usage');
expect(usageChunk.inputTokens).toBe(100);
expect(usageChunk.outputTokens).toBe(50);
expect(usageChunk.cacheWriteTokens).toBe(20);
expect(usageChunk.cacheReadTokens).toBe(10);
});
it('should handle usage chunks without cache tokens', () => {
const usageChunk: ApiStreamChunk = {
type: 'usage',
inputTokens: 100,
outputTokens: 50
};
expect(usageChunk.type).toBe('usage');
expect(usageChunk.inputTokens).toBe(100);
expect(usageChunk.outputTokens).toBe(50);
expect(usageChunk.cacheWriteTokens).toBeUndefined();
expect(usageChunk.cacheReadTokens).toBeUndefined();
});
it('should handle text chunks with empty strings', () => {
const emptyTextChunk: ApiStreamChunk = {
type: 'text',
text: ''
};
expect(emptyTextChunk.type).toBe('text');
expect(emptyTextChunk.text).toBe('');
});
it('should handle usage chunks with zero tokens', () => {
const zeroUsageChunk: ApiStreamChunk = {
type: 'usage',
inputTokens: 0,
outputTokens: 0
};
expect(zeroUsageChunk.type).toBe('usage');
expect(zeroUsageChunk.inputTokens).toBe(0);
expect(zeroUsageChunk.outputTokens).toBe(0);
});
it('should handle usage chunks with large token counts', () => {
const largeUsageChunk: ApiStreamChunk = {
type: 'usage',
inputTokens: 1000000,
outputTokens: 500000,
cacheWriteTokens: 200000,
cacheReadTokens: 100000
};
expect(largeUsageChunk.type).toBe('usage');
expect(largeUsageChunk.inputTokens).toBe(1000000);
expect(largeUsageChunk.outputTokens).toBe(500000);
expect(largeUsageChunk.cacheWriteTokens).toBe(200000);
expect(largeUsageChunk.cacheReadTokens).toBe(100000);
});
it('should handle text chunks with special characters', () => {
const specialCharsChunk: ApiStreamChunk = {
type: 'text',
text: '!@#$%^&*()_+-=[]{}|;:,.<>?`~'
};
expect(specialCharsChunk.type).toBe('text');
expect(specialCharsChunk.text).toBe('!@#$%^&*()_+-=[]{}|;:,.<>?`~');
});
it('should handle text chunks with unicode characters', () => {
const unicodeChunk: ApiStreamChunk = {
type: 'text',
text: '你好世界👋🌍'
};
expect(unicodeChunk.type).toBe('text');
expect(unicodeChunk.text).toBe('你好世界👋🌍');
});
it('should handle text chunks with multiline content', () => {
const multilineChunk: ApiStreamChunk = {
type: 'text',
text: 'Line 1\nLine 2\nLine 3'
};
expect(multilineChunk.type).toBe('text');
expect(multilineChunk.text).toBe('Line 1\nLine 2\nLine 3');
expect(multilineChunk.text.split('\n')).toHaveLength(3);
});
});
});