Chore: Prettier for consistant formatting (#794)

* Chore: Pretier for consistant formatting

- TODO: This PR needs to be updated by Saoud after he runs `npm install` & `npm format:fix` and commits the results of the prettier changes.

* Revert prettier config

* Run npm install

* Fix prettier config and ignore package lock

* Run format

---------

Co-authored-by: Saoud Rizwan <7799382+saoudrizwan@users.noreply.github.com>
This commit is contained in:
Mark Percival
2024-11-21 13:13:54 -08:00
committed by GitHub
parent 8d7e28c5e3
commit 93e70c62f1
65 changed files with 2101 additions and 2091 deletions

View File

@@ -36,7 +36,7 @@ export class AnthropicHandler implements ApiHandler {
*/
const userMsgIndices = messages.reduce(
(acc, msg, index) => (msg.role === "user" ? [...acc, index] : acc),
[] as number[]
[] as number[],
)
const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1
const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1
@@ -58,12 +58,12 @@ export class AnthropicHandler implements ApiHandler {
text: message.content,
cache_control: { type: "ephemeral" },
},
]
]
: message.content.map((content, contentIndex) =>
contentIndex === message.content.length - 1
? { ...content, cache_control: { type: "ephemeral" } }
: content
),
: content,
),
}
}
return message
@@ -88,7 +88,7 @@ export class AnthropicHandler implements ApiHandler {
default:
return undefined
}
})()
})(),
)
break
}

View File

@@ -42,7 +42,7 @@ export class LmStudioHandler implements ApiHandler {
} catch (error) {
// LM Studio doesn't return an error code/body for now
throw new Error(
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Cline's prompts."
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Cline's prompts.",
)
}
}

View File

@@ -19,7 +19,7 @@ export function convertAnthropicContentToGemini(
| Anthropic.Messages.ImageBlockParam
| Anthropic.Messages.ToolUseBlockParam
| Anthropic.Messages.ToolResultBlockParam
>
>,
): Part[] {
if (typeof content === "string") {
return [{ text: content } as TextPart]
@@ -83,7 +83,7 @@ export function convertAnthropicContentToGemini(
data: part.source.data,
mimeType: part.source.media_type,
},
} as InlineDataPart)
}) as InlineDataPart,
),
]
}
@@ -113,7 +113,7 @@ export function convertAnthropicToolToGemini(tool: Anthropic.Messages.Tool): Fun
type: (value as any).type.toUpperCase(),
description: (value as any).description || "",
},
])
]),
),
required: (tool.input_schema.required as string[]) || [],
},
@@ -133,7 +133,7 @@ export function unescapeGeminiContent(content: string) {
}
export function convertGeminiResponseToAnthropic(
response: EnhancedGenerateContentResponse
response: EnhancedGenerateContentResponse,
): Anthropic.Messages.Message {
const content: Anthropic.Messages.ContentBlock[] = []

View File

@@ -167,7 +167,7 @@ I've analyzed the project structure, but I need more information to proceed. Let
export function convertToO1Messages(
openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[],
systemPrompt: string
systemPrompt: string,
): OpenAI.Chat.ChatCompletionMessageParam[] {
const toolsReplaced = openAiMessages.reduce((acc, message) => {
if (message.role === "tool") {
@@ -360,7 +360,7 @@ function validateToolInput(toolName: string, tool_input: Record<string, string>)
// Convert OpenAI response to Anthropic format
export function convertO1ResponseToAnthropicMessage(
completion: OpenAI.Chat.Completions.ChatCompletion
completion: OpenAI.Chat.Completions.ChatCompletion,
): Anthropic.Messages.Message {
const openAiMessage = completion.choices[0].message
const { normalText, toolCalls } = parseAIResponse(openAiMessage.content || "")
@@ -405,7 +405,7 @@ export function convertO1ResponseToAnthropicMessage(
name: toolCall.tool,
input: toolCall.tool_input,
}
})
}),
)
}

View File

@@ -2,7 +2,7 @@ import { Anthropic } from "@anthropic-ai/sdk"
import OpenAI from "openai"
export function convertToOpenAiMessages(
anthropicMessages: Anthropic.Messages.MessageParam[]
anthropicMessages: Anthropic.Messages.MessageParam[],
): OpenAI.Chat.ChatCompletionMessageParam[] {
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = []
@@ -31,7 +31,7 @@ export function convertToOpenAiMessages(
} // user cannot send tool_use messages
return acc
},
{ nonToolMessages: [], toolMessages: [] }
{ nonToolMessages: [], toolMessages: [] },
)
// Process tool result messages FIRST since they must follow the tool use messages
@@ -105,7 +105,7 @@ export function convertToOpenAiMessages(
} // assistant cannot send tool_result messages
return acc
},
{ nonToolMessages: [], toolMessages: [] }
{ nonToolMessages: [], toolMessages: [] },
)
// Process non-tool messages
@@ -147,7 +147,7 @@ export function convertToOpenAiMessages(
// Convert OpenAI response to Anthropic format
export function convertToAnthropicMessage(
completion: OpenAI.Chat.Completions.ChatCompletion
completion: OpenAI.Chat.Completions.ChatCompletion,
): Anthropic.Messages.Message {
const openAiMessage = completion.choices[0].message
const anthropicMessage: Anthropic.Messages.Message = {
@@ -196,7 +196,7 @@ export function convertToAnthropicMessage(
name: toolCall.function.name,
input: parsedInput,
}
})
}),
)
}
return anthropicMessage