From 5e8d804d2bf4206ce5fb166fc91028b9af5a7091 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Sun, 5 Jan 2025 20:52:06 -0500 Subject: [PATCH] Add test --- src/api/providers/__tests__/openai.test.ts | 192 +++++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 src/api/providers/__tests__/openai.test.ts diff --git a/src/api/providers/__tests__/openai.test.ts b/src/api/providers/__tests__/openai.test.ts new file mode 100644 index 0000000..0a88068 --- /dev/null +++ b/src/api/providers/__tests__/openai.test.ts @@ -0,0 +1,192 @@ +import { OpenAiHandler } from '../openai' +import { ApiHandlerOptions, openAiModelInfoSaneDefaults } from '../../../shared/api' +import OpenAI, { AzureOpenAI } from 'openai' +import { Anthropic } from '@anthropic-ai/sdk' + +// Mock dependencies +jest.mock('openai') + +describe('OpenAiHandler', () => { + const mockOptions: ApiHandlerOptions = { + openAiApiKey: 'test-key', + openAiModelId: 'gpt-4', + openAiStreamingEnabled: true, + openAiBaseUrl: 'https://api.openai.com/v1' + } + + beforeEach(() => { + jest.clearAllMocks() + }) + + test('constructor initializes with correct options', () => { + const handler = new OpenAiHandler(mockOptions) + expect(handler).toBeInstanceOf(OpenAiHandler) + expect(OpenAI).toHaveBeenCalledWith({ + apiKey: mockOptions.openAiApiKey, + baseURL: mockOptions.openAiBaseUrl + }) + }) + + test('constructor initializes Azure client when Azure URL is provided', () => { + const azureOptions: ApiHandlerOptions = { + ...mockOptions, + openAiBaseUrl: 'https://example.azure.com', + azureApiVersion: '2023-05-15' + } + const handler = new OpenAiHandler(azureOptions) + expect(handler).toBeInstanceOf(OpenAiHandler) + expect(AzureOpenAI).toHaveBeenCalledWith({ + baseURL: azureOptions.openAiBaseUrl, + apiKey: azureOptions.openAiApiKey, + apiVersion: azureOptions.azureApiVersion + }) + }) + + test('getModel returns correct model info', () => { + const handler = new OpenAiHandler(mockOptions) + const result = handler.getModel() + + expect(result).toEqual({ + id: mockOptions.openAiModelId, + info: openAiModelInfoSaneDefaults + }) + }) + + test('createMessage handles streaming correctly when enabled', async () => { + const handler = new OpenAiHandler({ + ...mockOptions, + openAiStreamingEnabled: true, + includeMaxTokens: true + }) + + const mockStream = { + async *[Symbol.asyncIterator]() { + yield { + choices: [{ + delta: { + content: 'test response' + } + }], + usage: { + prompt_tokens: 10, + completion_tokens: 5 + } + } + } + } + + const mockCreate = jest.fn().mockResolvedValue(mockStream) + ;(OpenAI as jest.MockedClass).prototype.chat = { + completions: { create: mockCreate } + } as any + + const systemPrompt = 'test system prompt' + const messages: Anthropic.Messages.MessageParam[] = [ + { role: 'user', content: 'test message' } + ] + + const generator = handler.createMessage(systemPrompt, messages) + const chunks = [] + + for await (const chunk of generator) { + chunks.push(chunk) + } + + expect(chunks).toEqual([ + { + type: 'text', + text: 'test response' + }, + { + type: 'usage', + inputTokens: 10, + outputTokens: 5 + } + ]) + + expect(mockCreate).toHaveBeenCalledWith({ + model: mockOptions.openAiModelId, + messages: [ + { role: 'system', content: systemPrompt }, + { role: 'user', content: 'test message' } + ], + temperature: 0, + stream: true, + stream_options: { include_usage: true }, + max_tokens: openAiModelInfoSaneDefaults.maxTokens + }) + }) + + test('createMessage handles non-streaming correctly when disabled', async () => { + const handler = new OpenAiHandler({ + ...mockOptions, + openAiStreamingEnabled: false + }) + + const mockResponse = { + choices: [{ + message: { + content: 'test response' + } + }], + usage: { + prompt_tokens: 10, + completion_tokens: 5 + } + } + + const mockCreate = jest.fn().mockResolvedValue(mockResponse) + ;(OpenAI as jest.MockedClass).prototype.chat = { + completions: { create: mockCreate } + } as any + + const systemPrompt = 'test system prompt' + const messages: Anthropic.Messages.MessageParam[] = [ + { role: 'user', content: 'test message' } + ] + + const generator = handler.createMessage(systemPrompt, messages) + const chunks = [] + + for await (const chunk of generator) { + chunks.push(chunk) + } + + expect(chunks).toEqual([ + { + type: 'text', + text: 'test response' + }, + { + type: 'usage', + inputTokens: 10, + outputTokens: 5 + } + ]) + + expect(mockCreate).toHaveBeenCalledWith({ + model: mockOptions.openAiModelId, + messages: [ + { role: 'user', content: systemPrompt }, + { role: 'user', content: 'test message' } + ] + }) + }) + + test('createMessage handles API errors', async () => { + const handler = new OpenAiHandler(mockOptions) + const mockStream = { + async *[Symbol.asyncIterator]() { + throw new Error('API Error') + } + } + + const mockCreate = jest.fn().mockResolvedValue(mockStream) + ;(OpenAI as jest.MockedClass).prototype.chat = { + completions: { create: mockCreate } + } as any + + const generator = handler.createMessage('test', []) + await expect(generator.next()).rejects.toThrow('API Error') + }) +}) \ No newline at end of file