import { LmStudioHandler } from '../lmstudio'; import { ApiHandlerOptions } from '../../../shared/api'; import OpenAI from 'openai'; import { Anthropic } from '@anthropic-ai/sdk'; // Mock OpenAI client const mockCreate = jest.fn(); jest.mock('openai', () => { return { __esModule: true, default: jest.fn().mockImplementation(() => ({ chat: { completions: { create: mockCreate.mockImplementation(async (options) => { if (!options.stream) { return { id: 'test-completion', choices: [{ message: { role: 'assistant', content: 'Test response' }, finish_reason: 'stop', index: 0 }], usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 } }; } return { [Symbol.asyncIterator]: async function* () { yield { choices: [{ delta: { content: 'Test response' }, index: 0 }], usage: null }; yield { choices: [{ delta: {}, index: 0 }], usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 } }; } }; }) } } })) }; }); describe('LmStudioHandler', () => { let handler: LmStudioHandler; let mockOptions: ApiHandlerOptions; beforeEach(() => { mockOptions = { apiModelId: 'local-model', lmStudioModelId: 'local-model', lmStudioBaseUrl: 'http://localhost:1234/v1' }; handler = new LmStudioHandler(mockOptions); mockCreate.mockClear(); }); describe('constructor', () => { it('should initialize with provided options', () => { expect(handler).toBeInstanceOf(LmStudioHandler); expect(handler.getModel().id).toBe(mockOptions.lmStudioModelId); }); it('should use default base URL if not provided', () => { const handlerWithoutUrl = new LmStudioHandler({ apiModelId: 'local-model', lmStudioModelId: 'local-model' }); expect(handlerWithoutUrl).toBeInstanceOf(LmStudioHandler); }); }); describe('createMessage', () => { const systemPrompt = 'You are a helpful assistant.'; const messages: Anthropic.Messages.MessageParam[] = [ { role: 'user', content: 'Hello!' } ]; it('should handle streaming responses', async () => { const stream = handler.createMessage(systemPrompt, messages); const chunks: any[] = []; for await (const chunk of stream) { chunks.push(chunk); } expect(chunks.length).toBeGreaterThan(0); const textChunks = chunks.filter(chunk => chunk.type === 'text'); expect(textChunks).toHaveLength(1); expect(textChunks[0].text).toBe('Test response'); }); it('should handle API errors', async () => { mockCreate.mockRejectedValueOnce(new Error('API Error')); const stream = handler.createMessage(systemPrompt, messages); await expect(async () => { for await (const chunk of stream) { // Should not reach here } }).rejects.toThrow('Please check the LM Studio developer logs to debug what went wrong'); }); }); describe('completePrompt', () => { it('should complete prompt successfully', async () => { const result = await handler.completePrompt('Test prompt'); expect(result).toBe('Test response'); expect(mockCreate).toHaveBeenCalledWith({ model: mockOptions.lmStudioModelId, messages: [{ role: 'user', content: 'Test prompt' }], temperature: 0, stream: false }); }); it('should handle API errors', async () => { mockCreate.mockRejectedValueOnce(new Error('API Error')); await expect(handler.completePrompt('Test prompt')) .rejects.toThrow('Please check the LM Studio developer logs to debug what went wrong'); }); it('should handle empty response', async () => { mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: '' } }] }); const result = await handler.completePrompt('Test prompt'); expect(result).toBe(''); }); }); describe('getModel', () => { it('should return model info', () => { const modelInfo = handler.getModel(); expect(modelInfo.id).toBe(mockOptions.lmStudioModelId); expect(modelInfo.info).toBeDefined(); expect(modelInfo.info.maxTokens).toBe(-1); expect(modelInfo.info.contextWindow).toBe(128_000); }); }); });