mirror of
https://github.com/pacnpal/Roo-Code.git
synced 2025-12-21 21:01:06 -05:00
Add prompt cache
This commit is contained in:
@@ -411,10 +411,25 @@ export class ClaudeDev {
|
||||
}
|
||||
}
|
||||
|
||||
calculateApiCost(inputTokens: number, outputTokens: number): number {
|
||||
const inputCost = (this.api.getModel().info.inputPrice / 1_000_000) * inputTokens
|
||||
calculateApiCost(
|
||||
inputTokens: number,
|
||||
outputTokens: number,
|
||||
cacheCreationInputTokens?: number,
|
||||
cacheReadInputTokens?: number
|
||||
): number {
|
||||
const modelCacheWritesPrice = this.api.getModel().info.cacheWrites
|
||||
let cacheWritesCost = 0
|
||||
if (cacheCreationInputTokens && modelCacheWritesPrice) {
|
||||
cacheWritesCost = (modelCacheWritesPrice / 1_000_000) * cacheCreationInputTokens
|
||||
}
|
||||
const modelCacheReadsPrice = this.api.getModel().info.cacheReads
|
||||
let cacheReadsCost = 0
|
||||
if (cacheReadInputTokens && modelCacheReadsPrice) {
|
||||
cacheReadsCost = (modelCacheReadsPrice / 1_000_000) * cacheReadInputTokens
|
||||
}
|
||||
const baseInputCost = (this.api.getModel().info.inputPrice / 1_000_000) * inputTokens
|
||||
const outputCost = (this.api.getModel().info.outputPrice / 1_000_000) * outputTokens
|
||||
const totalCost = inputCost + outputCost
|
||||
const totalCost = cacheWritesCost + cacheReadsCost + baseInputCost + outputCost
|
||||
return totalCost
|
||||
}
|
||||
|
||||
@@ -901,6 +916,7 @@ export class ClaudeDev {
|
||||
try {
|
||||
let systemPrompt = SYSTEM_PROMPT()
|
||||
if (this.customInstructions && this.customInstructions.trim()) {
|
||||
// altering the system prompt mid-task will break the prompt cache, but in the grand scheme this will not change often so it's better to not pollute user messages with it the way we have to with <potentially relevant details>
|
||||
systemPrompt += `
|
||||
====
|
||||
|
||||
@@ -975,12 +991,25 @@ ${this.customInstructions.trim()}
|
||||
let assistantResponses: Anthropic.Messages.ContentBlock[] = []
|
||||
let inputTokens = response.usage.input_tokens
|
||||
let outputTokens = response.usage.output_tokens
|
||||
let cacheCreationInputTokens =
|
||||
(response as Anthropic.Beta.PromptCaching.Messages.PromptCachingBetaMessage).usage
|
||||
.cache_creation_input_tokens || undefined
|
||||
let cacheReadInputTokens =
|
||||
(response as Anthropic.Beta.PromptCaching.Messages.PromptCachingBetaMessage).usage
|
||||
.cache_read_input_tokens || undefined
|
||||
await this.say(
|
||||
"api_req_finished",
|
||||
JSON.stringify({
|
||||
tokensIn: inputTokens,
|
||||
tokensOut: outputTokens,
|
||||
cost: this.calculateApiCost(inputTokens, outputTokens),
|
||||
cacheWrites: cacheCreationInputTokens,
|
||||
cacheReads: cacheReadInputTokens,
|
||||
cost: this.calculateApiCost(
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
cacheCreationInputTokens,
|
||||
cacheReadInputTokens
|
||||
),
|
||||
})
|
||||
)
|
||||
|
||||
|
||||
@@ -16,23 +16,82 @@ export class AnthropicHandler implements ApiHandler {
|
||||
messages: Anthropic.Messages.MessageParam[],
|
||||
tools: Anthropic.Messages.Tool[]
|
||||
): Promise<Anthropic.Messages.Message> {
|
||||
return await this.client.messages.create(
|
||||
{
|
||||
model: this.getModel().id,
|
||||
max_tokens: this.getModel().info.maxTokens,
|
||||
system: systemPrompt,
|
||||
messages,
|
||||
tools,
|
||||
tool_choice: { type: "auto" },
|
||||
},
|
||||
// https://x.com/alexalbert__/status/1812921642143900036
|
||||
// https://github.com/anthropics/anthropic-sdk-typescript?tab=readme-ov-file#default-headers
|
||||
this.getModel().id === "claude-3-5-sonnet-20240620"
|
||||
? {
|
||||
headers: { "anthropic-beta": "max-tokens-3-5-sonnet-2024-07-15" },
|
||||
}
|
||||
: undefined
|
||||
)
|
||||
const modelId = this.getModel().id
|
||||
switch (modelId) {
|
||||
case "claude-3-5-sonnet-20240620":
|
||||
case "claude-3-haiku-20240307":
|
||||
/*
|
||||
The latest message will be the new user message, one before will be the assistant message from a previous request, and the user message before that will be a previously cached user message. So we need to mark the latest user message as ephemeral to cache it for the next request, and mark the second to last user message as ephemeral to let the server know the last message to retrieve from the cache for the current request..
|
||||
*/
|
||||
const userMsgIndices = messages.reduce(
|
||||
(acc, msg, index) => (msg.role === "user" ? [...acc, index] : acc),
|
||||
[] as number[]
|
||||
)
|
||||
const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1
|
||||
const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1
|
||||
return await this.client.beta.promptCaching.messages.create(
|
||||
{
|
||||
model: modelId,
|
||||
max_tokens: this.getModel().info.maxTokens,
|
||||
system: [{ text: systemPrompt, type: "text", cache_control: { type: "ephemeral" } }],
|
||||
messages: messages.map((message, index) => {
|
||||
if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) {
|
||||
return {
|
||||
...message,
|
||||
content:
|
||||
typeof message.content === "string"
|
||||
? [
|
||||
{
|
||||
type: "text",
|
||||
text: message.content,
|
||||
cache_control: { type: "ephemeral" },
|
||||
},
|
||||
]
|
||||
: message.content.map((content, contentIndex) =>
|
||||
contentIndex === message.content.length - 1
|
||||
? { ...content, cache_control: { type: "ephemeral" } }
|
||||
: content
|
||||
),
|
||||
}
|
||||
}
|
||||
return message
|
||||
}),
|
||||
tools: tools.map((tool, index) =>
|
||||
index === tools.length - 1 ? { ...tool, cache_control: { type: "ephemeral" } } : tool
|
||||
),
|
||||
tool_choice: { type: "auto" },
|
||||
},
|
||||
(() => {
|
||||
// 8192 tokens: https://x.com/alexalbert__/status/1812921642143900036
|
||||
// prompt caching: https://x.com/alexalbert__/status/1823751995901272068
|
||||
// https://github.com/anthropics/anthropic-sdk-typescript?tab=readme-ov-file#default-headers
|
||||
// https://github.com/anthropics/anthropic-sdk-typescript/commit/c920b77fc67bd839bfeb6716ceab9d7c9bbe7393
|
||||
switch (modelId) {
|
||||
case "claude-3-5-sonnet-20240620":
|
||||
return {
|
||||
headers: {
|
||||
"anthropic-beta": "prompt-caching-2024-07-31,max-tokens-3-5-sonnet-2024-07-15",
|
||||
},
|
||||
}
|
||||
case "claude-3-haiku-20240307":
|
||||
return {
|
||||
headers: { "anthropic-beta": "prompt-caching-2024-07-31" },
|
||||
}
|
||||
default:
|
||||
return undefined
|
||||
}
|
||||
})()
|
||||
)
|
||||
default:
|
||||
return await this.client.messages.create({
|
||||
model: modelId,
|
||||
max_tokens: this.getModel().info.maxTokens,
|
||||
system: [{ text: systemPrompt, type: "text" }],
|
||||
messages,
|
||||
tools,
|
||||
tool_choice: { type: "auto" },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
createUserReadableRequest(
|
||||
|
||||
@@ -18,8 +18,11 @@ export type ApiConfiguration = ApiHandlerOptions & {
|
||||
export interface ModelInfo {
|
||||
maxTokens: number
|
||||
supportsImages: boolean
|
||||
supportsPromptCache: boolean
|
||||
inputPrice: number
|
||||
outputPrice: number
|
||||
cacheWrites?: number
|
||||
cacheReads?: number
|
||||
}
|
||||
|
||||
export type ApiModelId = AnthropicModelId | OpenRouterModelId | BedrockModelId
|
||||
@@ -32,26 +35,36 @@ export const anthropicModels = {
|
||||
"claude-3-5-sonnet-20240620": {
|
||||
maxTokens: 8192,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: true,
|
||||
inputPrice: 3.0, // $3 per million input tokens
|
||||
outputPrice: 15.0, // $15 per million output tokens
|
||||
cacheWrites: 3.75, // $3.75 per million tokens
|
||||
cacheReads: 0.3, // $0.30 per million tokens
|
||||
},
|
||||
"claude-3-opus-20240229": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 15.0,
|
||||
outputPrice: 75.0,
|
||||
cacheWrites: 18.75,
|
||||
cacheReads: 1.5,
|
||||
},
|
||||
"claude-3-sonnet-20240229": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 3.0,
|
||||
outputPrice: 15.0,
|
||||
},
|
||||
"claude-3-haiku-20240307": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: true,
|
||||
inputPrice: 0.25,
|
||||
outputPrice: 1.25,
|
||||
cacheWrites: 0.3,
|
||||
cacheReads: 0.03,
|
||||
},
|
||||
} as const satisfies Record<string, ModelInfo> // as const assertion makes the object deeply readonly
|
||||
|
||||
@@ -63,24 +76,28 @@ export const bedrockModels = {
|
||||
"anthropic.claude-3-5-sonnet-20240620-v1:0": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 3.0,
|
||||
outputPrice: 15.0,
|
||||
},
|
||||
"anthropic.claude-3-opus-20240229-v1:0": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 15.0,
|
||||
outputPrice: 75.0,
|
||||
},
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 3.0,
|
||||
outputPrice: 15.0,
|
||||
},
|
||||
"anthropic.claude-3-haiku-20240307-v1:0": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 0.25,
|
||||
outputPrice: 1.25,
|
||||
},
|
||||
@@ -94,42 +111,49 @@ export const openRouterModels = {
|
||||
"anthropic/claude-3.5-sonnet:beta": {
|
||||
maxTokens: 8192,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 3.0,
|
||||
outputPrice: 15.0,
|
||||
},
|
||||
"anthropic/claude-3-opus:beta": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 15,
|
||||
outputPrice: 75,
|
||||
},
|
||||
"anthropic/claude-3-sonnet:beta": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 3,
|
||||
outputPrice: 15,
|
||||
},
|
||||
"anthropic/claude-3-haiku:beta": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 0.25,
|
||||
outputPrice: 1.25,
|
||||
},
|
||||
"openai/gpt-4o-2024-08-06": {
|
||||
maxTokens: 16384,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 2.5,
|
||||
outputPrice: 10,
|
||||
},
|
||||
"openai/gpt-4o-mini-2024-07-18": {
|
||||
maxTokens: 16384,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 0.15,
|
||||
outputPrice: 0.6,
|
||||
},
|
||||
"openai/gpt-4-turbo": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: true,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 10,
|
||||
outputPrice: 30,
|
||||
},
|
||||
@@ -175,6 +199,7 @@ export const openRouterModels = {
|
||||
"deepseek/deepseek-coder": {
|
||||
maxTokens: 4096,
|
||||
supportsImages: false,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 0.14,
|
||||
outputPrice: 0.28,
|
||||
},
|
||||
@@ -182,6 +207,7 @@ export const openRouterModels = {
|
||||
"mistralai/mistral-large": {
|
||||
maxTokens: 8192,
|
||||
supportsImages: false,
|
||||
supportsPromptCache: false,
|
||||
inputPrice: 3,
|
||||
outputPrice: 9,
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user