Remove truncation logic for now

This commit is contained in:
Matt Rubens
2024-12-29 11:27:24 -08:00
parent eb8c4cc50f
commit 25987dd40b
4 changed files with 47 additions and 157 deletions

View File

@@ -5,19 +5,6 @@ import { Anthropic } from '@anthropic-ai/sdk'
// Mock dependencies
jest.mock('openai')
jest.mock('../../../shared/api', () => ({
...jest.requireActual('../../../shared/api'),
deepSeekModels: {
'deepseek-chat': {
maxTokens: 1000,
contextWindow: 2000,
supportsImages: false,
supportsPromptCache: false,
inputPrice: 0.014,
outputPrice: 0.28,
}
}
}))
describe('DeepSeekHandler', () => {
@@ -46,8 +33,8 @@ describe('DeepSeekHandler', () => {
expect(result).toEqual({
id: mockOptions.deepSeekModelId,
info: expect.objectContaining({
maxTokens: 1000,
contextWindow: 2000,
maxTokens: 8192,
contextWindow: 64000,
supportsPromptCache: false,
supportsImages: false,
inputPrice: 0.014,
@@ -61,7 +48,7 @@ describe('DeepSeekHandler', () => {
const result = handler.getModel()
expect(result.id).toBe('deepseek-chat')
expect(result.info.maxTokens).toBe(1000)
expect(result.info.maxTokens).toBe(8192)
})
test('createMessage handles string content correctly', async () => {
@@ -109,7 +96,7 @@ describe('DeepSeekHandler', () => {
],
temperature: 0,
stream: true,
max_tokens: 1000,
max_tokens: 8192,
stream_options: { include_usage: true }
}))
})
@@ -155,83 +142,6 @@ describe('DeepSeekHandler', () => {
}))
})
test('createMessage truncates messages when exceeding context window', async () => {
const handler = new DeepSeekHandler(mockOptions)
const longString = 'a'.repeat(1000) // ~300 tokens
const shortString = 'b'.repeat(100) // ~30 tokens
const systemPrompt = 'test system prompt'
const messages: Anthropic.Messages.MessageParam[] = [
{ role: 'user', content: longString }, // Old message
{ role: 'assistant', content: 'short response' },
{ role: 'user', content: shortString } // Recent message
]
const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
choices: [{
delta: {
content: '(Note: Some earlier messages were truncated to fit within the model\'s context window)\n\n'
}
}]
}
yield {
choices: [{
delta: {
content: 'test response'
}
}]
}
}
}
const mockCreate = jest.fn().mockResolvedValue(mockStream)
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
completions: { create: mockCreate }
} as any
const generator = handler.createMessage(systemPrompt, messages)
const chunks = []
for await (const chunk of generator) {
chunks.push(chunk)
}
// Should get two chunks: truncation notice and response
expect(chunks).toHaveLength(2)
expect(chunks[0]).toEqual({
type: 'text',
text: expect.stringContaining('truncated')
})
expect(chunks[1]).toEqual({
type: 'text',
text: 'test response'
})
// Verify API call includes system prompt and recent messages, but not old message
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
messages: expect.arrayContaining([
{ role: 'system', content: systemPrompt },
{ role: 'assistant', content: 'short response' },
{ role: 'user', content: shortString }
])
}))
// Verify truncation notice was included
expect(chunks[0]).toEqual({
type: 'text',
text: expect.stringContaining('truncated')
})
// Verify the messages array contains the expected messages
const calledMessages = mockCreate.mock.calls[0][0].messages
expect(calledMessages).toHaveLength(4)
expect(calledMessages[0]).toEqual({ role: 'system', content: systemPrompt })
expect(calledMessages[1]).toEqual({ role: 'user', content: longString })
expect(calledMessages[2]).toEqual({ role: 'assistant', content: 'short response' })
expect(calledMessages[3]).toEqual({ role: 'user', content: shortString })
})
test('createMessage handles API errors', async () => {
const handler = new DeepSeekHandler(mockOptions)
const mockStream = {