From ec2bfa352a6334bd1c3bd848ac952b77ad611b9f Mon Sep 17 00:00:00 2001 From: Saoud Rizwan <7799382+saoudrizwan@users.noreply.github.com> Date: Thu, 15 Aug 2024 03:34:54 -0400 Subject: [PATCH] Add prompt cache --- package-lock.json | 25 ++----- package.json | 4 +- src/ClaudeDev.ts | 37 +++++++++- src/api/anthropic.ts | 93 +++++++++++++++++++----- src/shared/api.ts | 26 +++++++ webview-ui/src/App.tsx | 1 + webview-ui/src/components/ApiOptions.tsx | 58 ++++++++++----- webview-ui/src/components/ChatView.tsx | 5 ++ webview-ui/src/components/TaskHeader.tsx | 34 ++++++++- webview-ui/src/utils/getApiMetrics.ts | 16 +++- 10 files changed, 235 insertions(+), 64 deletions(-) diff --git a/package-lock.json b/package-lock.json index dcc9792..40ae2e5 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,16 @@ { "name": "claude-dev", - "version": "1.0.98", + "version": "1.1.15", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "claude-dev", - "version": "1.0.98", + "version": "1.1.15", "license": "MIT", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", - "@anthropic-ai/sdk": "^0.24.3", + "@anthropic-ai/sdk": "^0.26.0", "@vscode/codicons": "^0.0.36", "default-shell": "^2.2.0", "delay": "^6.0.0", @@ -62,10 +62,9 @@ } }, "node_modules/@anthropic-ai/sdk": { - "version": "0.24.3", - "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.24.3.tgz", - "integrity": "sha512-916wJXO6T6k8R6BAAcLhLPv/pnLGy7YSEBZXZ1XTFbLcTZE8oTy3oDW9WJf9KKZwMvVcePIfoTSvzXHRcGxkQQ==", - "license": "MIT", + "version": "0.26.0", + "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.26.0.tgz", + "integrity": "sha512-vNbZ2rnnMfk8Bf4OdeVy6GA4EXao8tGC0tLEoSAl1NZrip9oOxnEGUkXl3FsPQgeBM5hmpGE1tSLuu9HEVJiHg==", "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", @@ -73,8 +72,7 @@ "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7", - "web-streams-polyfill": "^3.2.1" + "node-fetch": "^2.6.7" } }, "node_modules/@anthropic-ai/sdk/node_modules/@types/node": { @@ -9642,15 +9640,6 @@ "spdx-expression-parse": "^3.0.0" } }, - "node_modules/web-streams-polyfill": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", - "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, "node_modules/web-tree-sitter": { "version": "0.22.6", "resolved": "https://registry.npmjs.org/web-tree-sitter/-/web-tree-sitter-0.22.6.tgz", diff --git a/package.json b/package.json index 44ed0b3..a0811a3 100644 --- a/package.json +++ b/package.json @@ -123,7 +123,7 @@ }, "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", - "@anthropic-ai/sdk": "^0.24.3", + "@anthropic-ai/sdk": "^0.26.0", "@vscode/codicons": "^0.0.36", "default-shell": "^2.2.0", "delay": "^6.0.0", @@ -138,4 +138,4 @@ "tree-sitter-wasms": "^0.1.11", "web-tree-sitter": "^0.22.6" } -} \ No newline at end of file +} diff --git a/src/ClaudeDev.ts b/src/ClaudeDev.ts index 361ced1..e1c1eeb 100644 --- a/src/ClaudeDev.ts +++ b/src/ClaudeDev.ts @@ -411,10 +411,25 @@ export class ClaudeDev { } } - calculateApiCost(inputTokens: number, outputTokens: number): number { - const inputCost = (this.api.getModel().info.inputPrice / 1_000_000) * inputTokens + calculateApiCost( + inputTokens: number, + outputTokens: number, + cacheCreationInputTokens?: number, + cacheReadInputTokens?: number + ): number { + const modelCacheWritesPrice = this.api.getModel().info.cacheWrites + let cacheWritesCost = 0 + if (cacheCreationInputTokens && modelCacheWritesPrice) { + cacheWritesCost = (modelCacheWritesPrice / 1_000_000) * cacheCreationInputTokens + } + const modelCacheReadsPrice = this.api.getModel().info.cacheReads + let cacheReadsCost = 0 + if (cacheReadInputTokens && modelCacheReadsPrice) { + cacheReadsCost = (modelCacheReadsPrice / 1_000_000) * cacheReadInputTokens + } + const baseInputCost = (this.api.getModel().info.inputPrice / 1_000_000) * inputTokens const outputCost = (this.api.getModel().info.outputPrice / 1_000_000) * outputTokens - const totalCost = inputCost + outputCost + const totalCost = cacheWritesCost + cacheReadsCost + baseInputCost + outputCost return totalCost } @@ -901,6 +916,7 @@ export class ClaudeDev { try { let systemPrompt = SYSTEM_PROMPT() if (this.customInstructions && this.customInstructions.trim()) { + // altering the system prompt mid-task will break the prompt cache, but in the grand scheme this will not change often so it's better to not pollute user messages with it the way we have to with systemPrompt += ` ==== @@ -975,12 +991,25 @@ ${this.customInstructions.trim()} let assistantResponses: Anthropic.Messages.ContentBlock[] = [] let inputTokens = response.usage.input_tokens let outputTokens = response.usage.output_tokens + let cacheCreationInputTokens = + (response as Anthropic.Beta.PromptCaching.Messages.PromptCachingBetaMessage).usage + .cache_creation_input_tokens || undefined + let cacheReadInputTokens = + (response as Anthropic.Beta.PromptCaching.Messages.PromptCachingBetaMessage).usage + .cache_read_input_tokens || undefined await this.say( "api_req_finished", JSON.stringify({ tokensIn: inputTokens, tokensOut: outputTokens, - cost: this.calculateApiCost(inputTokens, outputTokens), + cacheWrites: cacheCreationInputTokens, + cacheReads: cacheReadInputTokens, + cost: this.calculateApiCost( + inputTokens, + outputTokens, + cacheCreationInputTokens, + cacheReadInputTokens + ), }) ) diff --git a/src/api/anthropic.ts b/src/api/anthropic.ts index 0ff8fba..0bb4b84 100644 --- a/src/api/anthropic.ts +++ b/src/api/anthropic.ts @@ -16,23 +16,82 @@ export class AnthropicHandler implements ApiHandler { messages: Anthropic.Messages.MessageParam[], tools: Anthropic.Messages.Tool[] ): Promise { - return await this.client.messages.create( - { - model: this.getModel().id, - max_tokens: this.getModel().info.maxTokens, - system: systemPrompt, - messages, - tools, - tool_choice: { type: "auto" }, - }, - // https://x.com/alexalbert__/status/1812921642143900036 - // https://github.com/anthropics/anthropic-sdk-typescript?tab=readme-ov-file#default-headers - this.getModel().id === "claude-3-5-sonnet-20240620" - ? { - headers: { "anthropic-beta": "max-tokens-3-5-sonnet-2024-07-15" }, - } - : undefined - ) + const modelId = this.getModel().id + switch (modelId) { + case "claude-3-5-sonnet-20240620": + case "claude-3-haiku-20240307": + /* + The latest message will be the new user message, one before will be the assistant message from a previous request, and the user message before that will be a previously cached user message. So we need to mark the latest user message as ephemeral to cache it for the next request, and mark the second to last user message as ephemeral to let the server know the last message to retrieve from the cache for the current request.. + */ + const userMsgIndices = messages.reduce( + (acc, msg, index) => (msg.role === "user" ? [...acc, index] : acc), + [] as number[] + ) + const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1 + const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1 + return await this.client.beta.promptCaching.messages.create( + { + model: modelId, + max_tokens: this.getModel().info.maxTokens, + system: [{ text: systemPrompt, type: "text", cache_control: { type: "ephemeral" } }], + messages: messages.map((message, index) => { + if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) { + return { + ...message, + content: + typeof message.content === "string" + ? [ + { + type: "text", + text: message.content, + cache_control: { type: "ephemeral" }, + }, + ] + : message.content.map((content, contentIndex) => + contentIndex === message.content.length - 1 + ? { ...content, cache_control: { type: "ephemeral" } } + : content + ), + } + } + return message + }), + tools: tools.map((tool, index) => + index === tools.length - 1 ? { ...tool, cache_control: { type: "ephemeral" } } : tool + ), + tool_choice: { type: "auto" }, + }, + (() => { + // 8192 tokens: https://x.com/alexalbert__/status/1812921642143900036 + // prompt caching: https://x.com/alexalbert__/status/1823751995901272068 + // https://github.com/anthropics/anthropic-sdk-typescript?tab=readme-ov-file#default-headers + // https://github.com/anthropics/anthropic-sdk-typescript/commit/c920b77fc67bd839bfeb6716ceab9d7c9bbe7393 + switch (modelId) { + case "claude-3-5-sonnet-20240620": + return { + headers: { + "anthropic-beta": "prompt-caching-2024-07-31,max-tokens-3-5-sonnet-2024-07-15", + }, + } + case "claude-3-haiku-20240307": + return { + headers: { "anthropic-beta": "prompt-caching-2024-07-31" }, + } + default: + return undefined + } + })() + ) + default: + return await this.client.messages.create({ + model: modelId, + max_tokens: this.getModel().info.maxTokens, + system: [{ text: systemPrompt, type: "text" }], + messages, + tools, + tool_choice: { type: "auto" }, + }) + } } createUserReadableRequest( diff --git a/src/shared/api.ts b/src/shared/api.ts index 068f874..82ec5b2 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -18,8 +18,11 @@ export type ApiConfiguration = ApiHandlerOptions & { export interface ModelInfo { maxTokens: number supportsImages: boolean + supportsPromptCache: boolean inputPrice: number outputPrice: number + cacheWrites?: number + cacheReads?: number } export type ApiModelId = AnthropicModelId | OpenRouterModelId | BedrockModelId @@ -32,26 +35,36 @@ export const anthropicModels = { "claude-3-5-sonnet-20240620": { maxTokens: 8192, supportsImages: true, + supportsPromptCache: true, inputPrice: 3.0, // $3 per million input tokens outputPrice: 15.0, // $15 per million output tokens + cacheWrites: 3.75, // $3.75 per million tokens + cacheReads: 0.3, // $0.30 per million tokens }, "claude-3-opus-20240229": { maxTokens: 4096, supportsImages: true, + supportsPromptCache: false, inputPrice: 15.0, outputPrice: 75.0, + cacheWrites: 18.75, + cacheReads: 1.5, }, "claude-3-sonnet-20240229": { maxTokens: 4096, supportsImages: true, + supportsPromptCache: false, inputPrice: 3.0, outputPrice: 15.0, }, "claude-3-haiku-20240307": { maxTokens: 4096, supportsImages: true, + supportsPromptCache: true, inputPrice: 0.25, outputPrice: 1.25, + cacheWrites: 0.3, + cacheReads: 0.03, }, } as const satisfies Record // as const assertion makes the object deeply readonly @@ -63,24 +76,28 @@ export const bedrockModels = { "anthropic.claude-3-5-sonnet-20240620-v1:0": { maxTokens: 4096, supportsImages: true, + supportsPromptCache: false, inputPrice: 3.0, outputPrice: 15.0, }, "anthropic.claude-3-opus-20240229-v1:0": { maxTokens: 4096, supportsImages: true, + supportsPromptCache: false, inputPrice: 15.0, outputPrice: 75.0, }, "anthropic.claude-3-sonnet-20240229-v1:0": { maxTokens: 4096, supportsImages: true, + supportsPromptCache: false, inputPrice: 3.0, outputPrice: 15.0, }, "anthropic.claude-3-haiku-20240307-v1:0": { maxTokens: 4096, supportsImages: true, + supportsPromptCache: false, inputPrice: 0.25, outputPrice: 1.25, }, @@ -94,42 +111,49 @@ export const openRouterModels = { "anthropic/claude-3.5-sonnet:beta": { maxTokens: 8192, supportsImages: true, + supportsPromptCache: false, inputPrice: 3.0, outputPrice: 15.0, }, "anthropic/claude-3-opus:beta": { maxTokens: 4096, supportsImages: true, + supportsPromptCache: false, inputPrice: 15, outputPrice: 75, }, "anthropic/claude-3-sonnet:beta": { maxTokens: 4096, supportsImages: true, + supportsPromptCache: false, inputPrice: 3, outputPrice: 15, }, "anthropic/claude-3-haiku:beta": { maxTokens: 4096, supportsImages: true, + supportsPromptCache: false, inputPrice: 0.25, outputPrice: 1.25, }, "openai/gpt-4o-2024-08-06": { maxTokens: 16384, supportsImages: true, + supportsPromptCache: false, inputPrice: 2.5, outputPrice: 10, }, "openai/gpt-4o-mini-2024-07-18": { maxTokens: 16384, supportsImages: true, + supportsPromptCache: false, inputPrice: 0.15, outputPrice: 0.6, }, "openai/gpt-4-turbo": { maxTokens: 4096, supportsImages: true, + supportsPromptCache: false, inputPrice: 10, outputPrice: 30, }, @@ -175,6 +199,7 @@ export const openRouterModels = { "deepseek/deepseek-coder": { maxTokens: 4096, supportsImages: false, + supportsPromptCache: false, inputPrice: 0.14, outputPrice: 0.28, }, @@ -182,6 +207,7 @@ export const openRouterModels = { "mistralai/mistral-large": { maxTokens: 8192, supportsImages: false, + supportsPromptCache: false, inputPrice: 3, outputPrice: 9, }, diff --git a/webview-ui/src/App.tsx b/webview-ui/src/App.tsx index f8cbd30..63e863e 100644 --- a/webview-ui/src/App.tsx +++ b/webview-ui/src/App.tsx @@ -105,6 +105,7 @@ const App: React.FC = () => { vscodeThemeName={vscodeThemeName} showAnnouncement={showAnnouncement} selectedModelSupportsImages={selectedModelInfo.supportsImages} + selectedModelSupportsPromptCache={selectedModelInfo.supportsPromptCache} hideAnnouncement={() => setShowAnnouncement(false)} /> diff --git a/webview-ui/src/components/ApiOptions.tsx b/webview-ui/src/components/ApiOptions.tsx index b046c31..5232a83 100644 --- a/webview-ui/src/components/ApiOptions.tsx +++ b/webview-ui/src/components/ApiOptions.tsx @@ -217,25 +217,17 @@ const ModelInfoView = ({ modelInfo }: { modelInfo: ModelInfo }) => { return (

- - - {modelInfo.supportsImages ? "Supports images" : "Does not support images"} - + +
+
Max output: {modelInfo.maxTokens.toLocaleString()} tokens
@@ -247,6 +239,34 @@ const ModelInfoView = ({ modelInfo }: { modelInfo: ModelInfo }) => { ) } +const ModelInfoSupportsItem = ({ + isSupported, + supportsLabel, + doesNotSupportLabel, +}: { + isSupported: boolean + supportsLabel: string + doesNotSupportLabel: string +}) => ( + + + {isSupported ? supportsLabel : doesNotSupportLabel} + +) + export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) { const provider = apiConfiguration?.apiProvider || "anthropic" const modelId = apiConfiguration?.apiModelId diff --git a/webview-ui/src/components/ChatView.tsx b/webview-ui/src/components/ChatView.tsx index 4031142..f275dca 100644 --- a/webview-ui/src/components/ChatView.tsx +++ b/webview-ui/src/components/ChatView.tsx @@ -22,6 +22,7 @@ interface ChatViewProps { vscodeThemeName?: string showAnnouncement: boolean selectedModelSupportsImages: boolean + selectedModelSupportsPromptCache: boolean hideAnnouncement: () => void } @@ -34,6 +35,7 @@ const ChatView = ({ vscodeThemeName, showAnnouncement, selectedModelSupportsImages, + selectedModelSupportsPromptCache, hideAnnouncement, }: ChatViewProps) => { //const task = messages.length > 0 ? (messages[0].say === "task" ? messages[0] : undefined) : undefined @@ -448,6 +450,9 @@ const ChatView = ({ task={task} tokensIn={apiMetrics.totalTokensIn} tokensOut={apiMetrics.totalTokensOut} + doesModelSupportPromptCache={selectedModelSupportsPromptCache} + cacheWrites={apiMetrics.totalCacheWrites} + cacheReads={apiMetrics.totalCacheReads} totalCost={apiMetrics.totalCost} onClose={handleTaskCloseButtonClick} isHidden={isHidden} diff --git a/webview-ui/src/components/TaskHeader.tsx b/webview-ui/src/components/TaskHeader.tsx index 7a71e40..e876d9e 100644 --- a/webview-ui/src/components/TaskHeader.tsx +++ b/webview-ui/src/components/TaskHeader.tsx @@ -9,12 +9,25 @@ interface TaskHeaderProps { task: ClaudeMessage tokensIn: number tokensOut: number + doesModelSupportPromptCache: boolean + cacheWrites?: number + cacheReads?: number totalCost: number onClose: () => void isHidden: boolean } -const TaskHeader: React.FC = ({ task, tokensIn, tokensOut, totalCost, onClose, isHidden }) => { +const TaskHeader: React.FC = ({ + task, + tokensIn, + tokensOut, + doesModelSupportPromptCache, + cacheWrites, + cacheReads, + totalCost, + onClose, + isHidden, +}) => { const [isExpanded, setIsExpanded] = useState(false) const [showSeeMore, setShowSeeMore] = useState(false) const textContainerRef = useRef(null) @@ -194,6 +207,25 @@ const TaskHeader: React.FC = ({ task, tokensIn, tokensOut, tota {tokensOut.toLocaleString()} + {(doesModelSupportPromptCache || cacheReads !== undefined || cacheWrites !== undefined) && ( +

+ Prompt Cache: + + + +{(cacheWrites || 0).toLocaleString()} + + + + {(cacheReads || 0).toLocaleString()} + +
+ )}