Add prompt cache

This commit is contained in:
Saoud Rizwan
2024-08-15 03:34:54 -04:00
parent ace4596fe3
commit ec2bfa352a
10 changed files with 235 additions and 64 deletions

25
package-lock.json generated
View File

@@ -1,16 +1,16 @@
{ {
"name": "claude-dev", "name": "claude-dev",
"version": "1.0.98", "version": "1.1.15",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "claude-dev", "name": "claude-dev",
"version": "1.0.98", "version": "1.1.15",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/bedrock-sdk": "^0.10.2",
"@anthropic-ai/sdk": "^0.24.3", "@anthropic-ai/sdk": "^0.26.0",
"@vscode/codicons": "^0.0.36", "@vscode/codicons": "^0.0.36",
"default-shell": "^2.2.0", "default-shell": "^2.2.0",
"delay": "^6.0.0", "delay": "^6.0.0",
@@ -62,10 +62,9 @@
} }
}, },
"node_modules/@anthropic-ai/sdk": { "node_modules/@anthropic-ai/sdk": {
"version": "0.24.3", "version": "0.26.0",
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.24.3.tgz", "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.26.0.tgz",
"integrity": "sha512-916wJXO6T6k8R6BAAcLhLPv/pnLGy7YSEBZXZ1XTFbLcTZE8oTy3oDW9WJf9KKZwMvVcePIfoTSvzXHRcGxkQQ==", "integrity": "sha512-vNbZ2rnnMfk8Bf4OdeVy6GA4EXao8tGC0tLEoSAl1NZrip9oOxnEGUkXl3FsPQgeBM5hmpGE1tSLuu9HEVJiHg==",
"license": "MIT",
"dependencies": { "dependencies": {
"@types/node": "^18.11.18", "@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4", "@types/node-fetch": "^2.6.4",
@@ -73,8 +72,7 @@
"agentkeepalive": "^4.2.1", "agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2", "form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2", "formdata-node": "^4.3.2",
"node-fetch": "^2.6.7", "node-fetch": "^2.6.7"
"web-streams-polyfill": "^3.2.1"
} }
}, },
"node_modules/@anthropic-ai/sdk/node_modules/@types/node": { "node_modules/@anthropic-ai/sdk/node_modules/@types/node": {
@@ -9642,15 +9640,6 @@
"spdx-expression-parse": "^3.0.0" "spdx-expression-parse": "^3.0.0"
} }
}, },
"node_modules/web-streams-polyfill": {
"version": "3.3.3",
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz",
"integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==",
"license": "MIT",
"engines": {
"node": ">= 8"
}
},
"node_modules/web-tree-sitter": { "node_modules/web-tree-sitter": {
"version": "0.22.6", "version": "0.22.6",
"resolved": "https://registry.npmjs.org/web-tree-sitter/-/web-tree-sitter-0.22.6.tgz", "resolved": "https://registry.npmjs.org/web-tree-sitter/-/web-tree-sitter-0.22.6.tgz",

View File

@@ -123,7 +123,7 @@
}, },
"dependencies": { "dependencies": {
"@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/bedrock-sdk": "^0.10.2",
"@anthropic-ai/sdk": "^0.24.3", "@anthropic-ai/sdk": "^0.26.0",
"@vscode/codicons": "^0.0.36", "@vscode/codicons": "^0.0.36",
"default-shell": "^2.2.0", "default-shell": "^2.2.0",
"delay": "^6.0.0", "delay": "^6.0.0",

View File

@@ -411,10 +411,25 @@ export class ClaudeDev {
} }
} }
calculateApiCost(inputTokens: number, outputTokens: number): number { calculateApiCost(
const inputCost = (this.api.getModel().info.inputPrice / 1_000_000) * inputTokens inputTokens: number,
outputTokens: number,
cacheCreationInputTokens?: number,
cacheReadInputTokens?: number
): number {
const modelCacheWritesPrice = this.api.getModel().info.cacheWrites
let cacheWritesCost = 0
if (cacheCreationInputTokens && modelCacheWritesPrice) {
cacheWritesCost = (modelCacheWritesPrice / 1_000_000) * cacheCreationInputTokens
}
const modelCacheReadsPrice = this.api.getModel().info.cacheReads
let cacheReadsCost = 0
if (cacheReadInputTokens && modelCacheReadsPrice) {
cacheReadsCost = (modelCacheReadsPrice / 1_000_000) * cacheReadInputTokens
}
const baseInputCost = (this.api.getModel().info.inputPrice / 1_000_000) * inputTokens
const outputCost = (this.api.getModel().info.outputPrice / 1_000_000) * outputTokens const outputCost = (this.api.getModel().info.outputPrice / 1_000_000) * outputTokens
const totalCost = inputCost + outputCost const totalCost = cacheWritesCost + cacheReadsCost + baseInputCost + outputCost
return totalCost return totalCost
} }
@@ -901,6 +916,7 @@ export class ClaudeDev {
try { try {
let systemPrompt = SYSTEM_PROMPT() let systemPrompt = SYSTEM_PROMPT()
if (this.customInstructions && this.customInstructions.trim()) { if (this.customInstructions && this.customInstructions.trim()) {
// altering the system prompt mid-task will break the prompt cache, but in the grand scheme this will not change often so it's better to not pollute user messages with it the way we have to with <potentially relevant details>
systemPrompt += ` systemPrompt += `
==== ====
@@ -975,12 +991,25 @@ ${this.customInstructions.trim()}
let assistantResponses: Anthropic.Messages.ContentBlock[] = [] let assistantResponses: Anthropic.Messages.ContentBlock[] = []
let inputTokens = response.usage.input_tokens let inputTokens = response.usage.input_tokens
let outputTokens = response.usage.output_tokens let outputTokens = response.usage.output_tokens
let cacheCreationInputTokens =
(response as Anthropic.Beta.PromptCaching.Messages.PromptCachingBetaMessage).usage
.cache_creation_input_tokens || undefined
let cacheReadInputTokens =
(response as Anthropic.Beta.PromptCaching.Messages.PromptCachingBetaMessage).usage
.cache_read_input_tokens || undefined
await this.say( await this.say(
"api_req_finished", "api_req_finished",
JSON.stringify({ JSON.stringify({
tokensIn: inputTokens, tokensIn: inputTokens,
tokensOut: outputTokens, tokensOut: outputTokens,
cost: this.calculateApiCost(inputTokens, outputTokens), cacheWrites: cacheCreationInputTokens,
cacheReads: cacheReadInputTokens,
cost: this.calculateApiCost(
inputTokens,
outputTokens,
cacheCreationInputTokens,
cacheReadInputTokens
),
}) })
) )

View File

@@ -16,23 +16,82 @@ export class AnthropicHandler implements ApiHandler {
messages: Anthropic.Messages.MessageParam[], messages: Anthropic.Messages.MessageParam[],
tools: Anthropic.Messages.Tool[] tools: Anthropic.Messages.Tool[]
): Promise<Anthropic.Messages.Message> { ): Promise<Anthropic.Messages.Message> {
return await this.client.messages.create( const modelId = this.getModel().id
{ switch (modelId) {
model: this.getModel().id, case "claude-3-5-sonnet-20240620":
max_tokens: this.getModel().info.maxTokens, case "claude-3-haiku-20240307":
system: systemPrompt, /*
messages, The latest message will be the new user message, one before will be the assistant message from a previous request, and the user message before that will be a previously cached user message. So we need to mark the latest user message as ephemeral to cache it for the next request, and mark the second to last user message as ephemeral to let the server know the last message to retrieve from the cache for the current request..
tools, */
tool_choice: { type: "auto" }, const userMsgIndices = messages.reduce(
}, (acc, msg, index) => (msg.role === "user" ? [...acc, index] : acc),
// https://x.com/alexalbert__/status/1812921642143900036 [] as number[]
// https://github.com/anthropics/anthropic-sdk-typescript?tab=readme-ov-file#default-headers )
this.getModel().id === "claude-3-5-sonnet-20240620" const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1
? { const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1
headers: { "anthropic-beta": "max-tokens-3-5-sonnet-2024-07-15" }, return await this.client.beta.promptCaching.messages.create(
} {
: undefined model: modelId,
) max_tokens: this.getModel().info.maxTokens,
system: [{ text: systemPrompt, type: "text", cache_control: { type: "ephemeral" } }],
messages: messages.map((message, index) => {
if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) {
return {
...message,
content:
typeof message.content === "string"
? [
{
type: "text",
text: message.content,
cache_control: { type: "ephemeral" },
},
]
: message.content.map((content, contentIndex) =>
contentIndex === message.content.length - 1
? { ...content, cache_control: { type: "ephemeral" } }
: content
),
}
}
return message
}),
tools: tools.map((tool, index) =>
index === tools.length - 1 ? { ...tool, cache_control: { type: "ephemeral" } } : tool
),
tool_choice: { type: "auto" },
},
(() => {
// 8192 tokens: https://x.com/alexalbert__/status/1812921642143900036
// prompt caching: https://x.com/alexalbert__/status/1823751995901272068
// https://github.com/anthropics/anthropic-sdk-typescript?tab=readme-ov-file#default-headers
// https://github.com/anthropics/anthropic-sdk-typescript/commit/c920b77fc67bd839bfeb6716ceab9d7c9bbe7393
switch (modelId) {
case "claude-3-5-sonnet-20240620":
return {
headers: {
"anthropic-beta": "prompt-caching-2024-07-31,max-tokens-3-5-sonnet-2024-07-15",
},
}
case "claude-3-haiku-20240307":
return {
headers: { "anthropic-beta": "prompt-caching-2024-07-31" },
}
default:
return undefined
}
})()
)
default:
return await this.client.messages.create({
model: modelId,
max_tokens: this.getModel().info.maxTokens,
system: [{ text: systemPrompt, type: "text" }],
messages,
tools,
tool_choice: { type: "auto" },
})
}
} }
createUserReadableRequest( createUserReadableRequest(

View File

@@ -18,8 +18,11 @@ export type ApiConfiguration = ApiHandlerOptions & {
export interface ModelInfo { export interface ModelInfo {
maxTokens: number maxTokens: number
supportsImages: boolean supportsImages: boolean
supportsPromptCache: boolean
inputPrice: number inputPrice: number
outputPrice: number outputPrice: number
cacheWrites?: number
cacheReads?: number
} }
export type ApiModelId = AnthropicModelId | OpenRouterModelId | BedrockModelId export type ApiModelId = AnthropicModelId | OpenRouterModelId | BedrockModelId
@@ -32,26 +35,36 @@ export const anthropicModels = {
"claude-3-5-sonnet-20240620": { "claude-3-5-sonnet-20240620": {
maxTokens: 8192, maxTokens: 8192,
supportsImages: true, supportsImages: true,
supportsPromptCache: true,
inputPrice: 3.0, // $3 per million input tokens inputPrice: 3.0, // $3 per million input tokens
outputPrice: 15.0, // $15 per million output tokens outputPrice: 15.0, // $15 per million output tokens
cacheWrites: 3.75, // $3.75 per million tokens
cacheReads: 0.3, // $0.30 per million tokens
}, },
"claude-3-opus-20240229": { "claude-3-opus-20240229": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 15.0, inputPrice: 15.0,
outputPrice: 75.0, outputPrice: 75.0,
cacheWrites: 18.75,
cacheReads: 1.5,
}, },
"claude-3-sonnet-20240229": { "claude-3-sonnet-20240229": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 3.0, inputPrice: 3.0,
outputPrice: 15.0, outputPrice: 15.0,
}, },
"claude-3-haiku-20240307": { "claude-3-haiku-20240307": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: true, supportsImages: true,
supportsPromptCache: true,
inputPrice: 0.25, inputPrice: 0.25,
outputPrice: 1.25, outputPrice: 1.25,
cacheWrites: 0.3,
cacheReads: 0.03,
}, },
} as const satisfies Record<string, ModelInfo> // as const assertion makes the object deeply readonly } as const satisfies Record<string, ModelInfo> // as const assertion makes the object deeply readonly
@@ -63,24 +76,28 @@ export const bedrockModels = {
"anthropic.claude-3-5-sonnet-20240620-v1:0": { "anthropic.claude-3-5-sonnet-20240620-v1:0": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 3.0, inputPrice: 3.0,
outputPrice: 15.0, outputPrice: 15.0,
}, },
"anthropic.claude-3-opus-20240229-v1:0": { "anthropic.claude-3-opus-20240229-v1:0": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 15.0, inputPrice: 15.0,
outputPrice: 75.0, outputPrice: 75.0,
}, },
"anthropic.claude-3-sonnet-20240229-v1:0": { "anthropic.claude-3-sonnet-20240229-v1:0": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 3.0, inputPrice: 3.0,
outputPrice: 15.0, outputPrice: 15.0,
}, },
"anthropic.claude-3-haiku-20240307-v1:0": { "anthropic.claude-3-haiku-20240307-v1:0": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 0.25, inputPrice: 0.25,
outputPrice: 1.25, outputPrice: 1.25,
}, },
@@ -94,42 +111,49 @@ export const openRouterModels = {
"anthropic/claude-3.5-sonnet:beta": { "anthropic/claude-3.5-sonnet:beta": {
maxTokens: 8192, maxTokens: 8192,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 3.0, inputPrice: 3.0,
outputPrice: 15.0, outputPrice: 15.0,
}, },
"anthropic/claude-3-opus:beta": { "anthropic/claude-3-opus:beta": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 15, inputPrice: 15,
outputPrice: 75, outputPrice: 75,
}, },
"anthropic/claude-3-sonnet:beta": { "anthropic/claude-3-sonnet:beta": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 3, inputPrice: 3,
outputPrice: 15, outputPrice: 15,
}, },
"anthropic/claude-3-haiku:beta": { "anthropic/claude-3-haiku:beta": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 0.25, inputPrice: 0.25,
outputPrice: 1.25, outputPrice: 1.25,
}, },
"openai/gpt-4o-2024-08-06": { "openai/gpt-4o-2024-08-06": {
maxTokens: 16384, maxTokens: 16384,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 2.5, inputPrice: 2.5,
outputPrice: 10, outputPrice: 10,
}, },
"openai/gpt-4o-mini-2024-07-18": { "openai/gpt-4o-mini-2024-07-18": {
maxTokens: 16384, maxTokens: 16384,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 0.15, inputPrice: 0.15,
outputPrice: 0.6, outputPrice: 0.6,
}, },
"openai/gpt-4-turbo": { "openai/gpt-4-turbo": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: true, supportsImages: true,
supportsPromptCache: false,
inputPrice: 10, inputPrice: 10,
outputPrice: 30, outputPrice: 30,
}, },
@@ -175,6 +199,7 @@ export const openRouterModels = {
"deepseek/deepseek-coder": { "deepseek/deepseek-coder": {
maxTokens: 4096, maxTokens: 4096,
supportsImages: false, supportsImages: false,
supportsPromptCache: false,
inputPrice: 0.14, inputPrice: 0.14,
outputPrice: 0.28, outputPrice: 0.28,
}, },
@@ -182,6 +207,7 @@ export const openRouterModels = {
"mistralai/mistral-large": { "mistralai/mistral-large": {
maxTokens: 8192, maxTokens: 8192,
supportsImages: false, supportsImages: false,
supportsPromptCache: false,
inputPrice: 3, inputPrice: 3,
outputPrice: 9, outputPrice: 9,
}, },

View File

@@ -105,6 +105,7 @@ const App: React.FC = () => {
vscodeThemeName={vscodeThemeName} vscodeThemeName={vscodeThemeName}
showAnnouncement={showAnnouncement} showAnnouncement={showAnnouncement}
selectedModelSupportsImages={selectedModelInfo.supportsImages} selectedModelSupportsImages={selectedModelInfo.supportsImages}
selectedModelSupportsPromptCache={selectedModelInfo.supportsPromptCache}
hideAnnouncement={() => setShowAnnouncement(false)} hideAnnouncement={() => setShowAnnouncement(false)}
/> />
</> </>

View File

@@ -217,25 +217,17 @@ const ModelInfoView = ({ modelInfo }: { modelInfo: ModelInfo }) => {
return ( return (
<p style={{ fontSize: "12px", marginTop: "2px", color: "var(--vscode-descriptionForeground)" }}> <p style={{ fontSize: "12px", marginTop: "2px", color: "var(--vscode-descriptionForeground)" }}>
<span <ModelInfoSupportsItem
style={{ isSupported={modelInfo.supportsPromptCache}
fontWeight: 500, supportsLabel="Supports prompt cache"
color: modelInfo.supportsImages doesNotSupportLabel="Does not support prompt cache"
? "var(--vscode-testing-iconPassed)" />
: "var(--vscode-errorForeground)", <br />
}}> <ModelInfoSupportsItem
<i isSupported={modelInfo.supportsImages}
className={`codicon codicon-${modelInfo.supportsImages ? "check" : "x"}`} supportsLabel="Supports images"
style={{ doesNotSupportLabel="Does not support images"
marginRight: 4, />
marginBottom: modelInfo.supportsImages ? 1 : -1,
fontSize: modelInfo.supportsImages ? 11 : 13,
fontWeight: 700,
display: "inline-block",
verticalAlign: "bottom",
}}></i>
{modelInfo.supportsImages ? "Supports images" : "Does not support images"}
</span>
<br /> <br />
<span style={{ fontWeight: 500 }}>Max output:</span> {modelInfo.maxTokens.toLocaleString()} tokens <span style={{ fontWeight: 500 }}>Max output:</span> {modelInfo.maxTokens.toLocaleString()} tokens
<br /> <br />
@@ -247,6 +239,34 @@ const ModelInfoView = ({ modelInfo }: { modelInfo: ModelInfo }) => {
) )
} }
const ModelInfoSupportsItem = ({
isSupported,
supportsLabel,
doesNotSupportLabel,
}: {
isSupported: boolean
supportsLabel: string
doesNotSupportLabel: string
}) => (
<span
style={{
fontWeight: 500,
color: isSupported ? "var(--vscode-testing-iconPassed)" : "var(--vscode-errorForeground)",
}}>
<i
className={`codicon codicon-${isSupported ? "check" : "x"}`}
style={{
marginRight: 4,
marginBottom: isSupported ? 1 : -1,
fontSize: isSupported ? 11 : 13,
fontWeight: 700,
display: "inline-block",
verticalAlign: "bottom",
}}></i>
{isSupported ? supportsLabel : doesNotSupportLabel}
</span>
)
export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) { export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
const provider = apiConfiguration?.apiProvider || "anthropic" const provider = apiConfiguration?.apiProvider || "anthropic"
const modelId = apiConfiguration?.apiModelId const modelId = apiConfiguration?.apiModelId

View File

@@ -22,6 +22,7 @@ interface ChatViewProps {
vscodeThemeName?: string vscodeThemeName?: string
showAnnouncement: boolean showAnnouncement: boolean
selectedModelSupportsImages: boolean selectedModelSupportsImages: boolean
selectedModelSupportsPromptCache: boolean
hideAnnouncement: () => void hideAnnouncement: () => void
} }
@@ -34,6 +35,7 @@ const ChatView = ({
vscodeThemeName, vscodeThemeName,
showAnnouncement, showAnnouncement,
selectedModelSupportsImages, selectedModelSupportsImages,
selectedModelSupportsPromptCache,
hideAnnouncement, hideAnnouncement,
}: ChatViewProps) => { }: ChatViewProps) => {
//const task = messages.length > 0 ? (messages[0].say === "task" ? messages[0] : undefined) : undefined //const task = messages.length > 0 ? (messages[0].say === "task" ? messages[0] : undefined) : undefined
@@ -448,6 +450,9 @@ const ChatView = ({
task={task} task={task}
tokensIn={apiMetrics.totalTokensIn} tokensIn={apiMetrics.totalTokensIn}
tokensOut={apiMetrics.totalTokensOut} tokensOut={apiMetrics.totalTokensOut}
doesModelSupportPromptCache={selectedModelSupportsPromptCache}
cacheWrites={apiMetrics.totalCacheWrites}
cacheReads={apiMetrics.totalCacheReads}
totalCost={apiMetrics.totalCost} totalCost={apiMetrics.totalCost}
onClose={handleTaskCloseButtonClick} onClose={handleTaskCloseButtonClick}
isHidden={isHidden} isHidden={isHidden}

View File

@@ -9,12 +9,25 @@ interface TaskHeaderProps {
task: ClaudeMessage task: ClaudeMessage
tokensIn: number tokensIn: number
tokensOut: number tokensOut: number
doesModelSupportPromptCache: boolean
cacheWrites?: number
cacheReads?: number
totalCost: number totalCost: number
onClose: () => void onClose: () => void
isHidden: boolean isHidden: boolean
} }
const TaskHeader: React.FC<TaskHeaderProps> = ({ task, tokensIn, tokensOut, totalCost, onClose, isHidden }) => { const TaskHeader: React.FC<TaskHeaderProps> = ({
task,
tokensIn,
tokensOut,
doesModelSupportPromptCache,
cacheWrites,
cacheReads,
totalCost,
onClose,
isHidden,
}) => {
const [isExpanded, setIsExpanded] = useState(false) const [isExpanded, setIsExpanded] = useState(false)
const [showSeeMore, setShowSeeMore] = useState(false) const [showSeeMore, setShowSeeMore] = useState(false)
const textContainerRef = useRef<HTMLDivElement>(null) const textContainerRef = useRef<HTMLDivElement>(null)
@@ -194,6 +207,25 @@ const TaskHeader: React.FC<TaskHeaderProps> = ({ task, tokensIn, tokensOut, tota
{tokensOut.toLocaleString()} {tokensOut.toLocaleString()}
</span> </span>
</div> </div>
{(doesModelSupportPromptCache || cacheReads !== undefined || cacheWrites !== undefined) && (
<div style={{ display: "flex", alignItems: "center", gap: "4px", flexWrap: "wrap" }}>
<span style={{ fontWeight: "bold" }}>Prompt Cache:</span>
<span style={{ display: "flex", alignItems: "center", gap: "3px" }}>
<i
className="codicon codicon-database"
style={{ fontSize: "12px", fontWeight: "bold", marginBottom: "-1px" }}
/>
+{(cacheWrites || 0).toLocaleString()}
</span>
<span style={{ display: "flex", alignItems: "center", gap: "3px" }}>
<i
className="codicon codicon-arrow-right"
style={{ fontSize: "12px", fontWeight: "bold", marginBottom: 0 }}
/>
{(cacheReads || 0).toLocaleString()}
</span>
</div>
)}
<div <div
style={{ style={{
display: "flex", display: "flex",

View File

@@ -3,6 +3,8 @@ import { ClaudeMessage } from "../../../src/shared/ExtensionMessage"
interface ApiMetrics { interface ApiMetrics {
totalTokensIn: number totalTokensIn: number
totalTokensOut: number totalTokensOut: number
totalCacheWrites?: number
totalCacheReads?: number
totalCost: number totalCost: number
} }
@@ -11,10 +13,10 @@ interface ApiMetrics {
* *
* This function processes 'api_req_started' messages that have been combined with their * This function processes 'api_req_started' messages that have been combined with their
* corresponding 'api_req_finished' messages by the combineApiRequests function. * corresponding 'api_req_finished' messages by the combineApiRequests function.
* It extracts and sums up the tokensIn, tokensOut, and cost from these messages. * It extracts and sums up the tokensIn, tokensOut, cacheWrites, cacheReads, and cost from these messages.
* *
* @param messages - An array of ClaudeMessage objects to process. * @param messages - An array of ClaudeMessage objects to process.
* @returns An ApiMetrics object containing totalTokensIn, totalTokensOut, and totalCost. * @returns An ApiMetrics object containing totalTokensIn, totalTokensOut, totalCacheWrites, totalCacheReads, and totalCost.
* *
* @example * @example
* const messages = [ * const messages = [
@@ -27,6 +29,8 @@ export function getApiMetrics(messages: ClaudeMessage[]): ApiMetrics {
const result: ApiMetrics = { const result: ApiMetrics = {
totalTokensIn: 0, totalTokensIn: 0,
totalTokensOut: 0, totalTokensOut: 0,
totalCacheWrites: undefined,
totalCacheReads: undefined,
totalCost: 0, totalCost: 0,
} }
@@ -34,7 +38,7 @@ export function getApiMetrics(messages: ClaudeMessage[]): ApiMetrics {
if (message.type === "say" && message.say === "api_req_started" && message.text) { if (message.type === "say" && message.say === "api_req_started" && message.text) {
try { try {
const parsedData = JSON.parse(message.text) const parsedData = JSON.parse(message.text)
const { tokensIn, tokensOut, cost } = parsedData const { tokensIn, tokensOut, cacheWrites, cacheReads, cost } = parsedData
if (typeof tokensIn === "number") { if (typeof tokensIn === "number") {
result.totalTokensIn += tokensIn result.totalTokensIn += tokensIn
@@ -42,6 +46,12 @@ export function getApiMetrics(messages: ClaudeMessage[]): ApiMetrics {
if (typeof tokensOut === "number") { if (typeof tokensOut === "number") {
result.totalTokensOut += tokensOut result.totalTokensOut += tokensOut
} }
if (typeof cacheWrites === "number") {
result.totalCacheWrites = (result.totalCacheWrites ?? 0) + cacheWrites
}
if (typeof cacheReads === "number") {
result.totalCacheReads = (result.totalCacheReads ?? 0) + cacheReads
}
if (typeof cost === "number") { if (typeof cost === "number") {
result.totalCost += cost result.totalCost += cost
} }