): boolean {
+ switch (toolName) {
+ case "execute_command":
+ return "command" in tool_input
+ case "read_file":
+ case "list_code_definition_names":
+ case "list_files":
+ return "path" in tool_input
+ case "search_files":
+ return "path" in tool_input && "regex" in tool_input
+ case "write_to_file":
+ return "path" in tool_input && "content" in tool_input
+ case "ask_followup_question":
+ return "question" in tool_input
+ case "attempt_completion":
+ return "result" in tool_input
+ default:
+ return false
+ }
+}
+
+// Example usage:
+// const aiResponse = `Here's my analysis of the situation...
+
+//
+// ls -la
+//
+
+//
+// ./example.txt
+// Hello, World!
+// `;
+//
+// const { normalText, toolCalls } = parseAIResponse(aiResponse);
+// console.log(normalText);
+// console.log(toolCalls);
+
+// Convert OpenAI response to Anthropic format
+export function convertO1ResponseToAnthropicMessage(
+ completion: OpenAI.Chat.Completions.ChatCompletion
+): Anthropic.Messages.Message {
+ const openAiMessage = completion.choices[0].message
+ const { normalText, toolCalls } = parseAIResponse(openAiMessage.content || "")
+
+ const anthropicMessage: Anthropic.Messages.Message = {
+ id: completion.id,
+ type: "message",
+ role: openAiMessage.role, // always "assistant"
+ content: [
+ {
+ type: "text",
+ text: normalText,
+ },
+ ],
+ model: completion.model,
+ stop_reason: (() => {
+ switch (completion.choices[0].finish_reason) {
+ case "stop":
+ return "end_turn"
+ case "length":
+ return "max_tokens"
+ case "tool_calls":
+ return "tool_use"
+ case "content_filter": // Anthropic doesn't have an exact equivalent
+ default:
+ return null
+ }
+ })(),
+ stop_sequence: null, // which custom stop_sequence was generated, if any (not applicable if you don't use stop_sequence)
+ usage: {
+ input_tokens: completion.usage?.prompt_tokens || 0,
+ output_tokens: completion.usage?.completion_tokens || 0,
+ },
+ }
+
+ if (toolCalls.length > 0) {
+ anthropicMessage.content.push(
+ ...toolCalls.map((toolCall: ToolCall, index: number): Anthropic.ToolUseBlock => {
+ return {
+ type: "tool_use",
+ id: `call_${index}_${Date.now()}`, // Generate a unique ID for each tool call
+ name: toolCall.tool,
+ input: toolCall.tool_input,
+ }
+ })
+ )
+ }
+
+ return anthropicMessage
+}
+
+// Example usage:
+// const openAICompletion = {
+// id: "cmpl-123",
+// choices: [{
+// message: {
+// role: "assistant",
+// content: "Here's my analysis...\n\n\n ls -la\n"
+// },
+// finish_reason: "stop"
+// }],
+// model: "gpt-3.5-turbo",
+// usage: { prompt_tokens: 50, completion_tokens: 100 }
+// };
+// const anthropicMessage = convertO1ResponseToAnthropicMessage(openAICompletion);
+// console.log(anthropicMessage);
diff --git a/webview-ui/src/components/ApiOptions.tsx b/webview-ui/src/components/ApiOptions.tsx
index e87d48b..7a0b547 100644
--- a/webview-ui/src/components/ApiOptions.tsx
+++ b/webview-ui/src/components/ApiOptions.tsx
@@ -546,7 +546,7 @@ export const formatPrice = (price: number) => {
const ModelInfoView = ({ selectedModelId, modelInfo }: { selectedModelId: string; modelInfo: ModelInfo }) => {
const isGemini = Object.keys(geminiModels).includes(selectedModelId)
- const isO1 = false //(["o1-preview", "o1-mini"] as OpenAiNativeModelId[]).includes(selectedModelId as OpenAiNativeModelId)
+ const isO1 = selectedModelId && selectedModelId.includes("o1")
return (
- * This model is newly released and may not be accessible to all users yet.
+ * This model does not support tool use or system prompts, so Claude Dev uses structured output
+ prompting to achieve similar results. Your mileage may vary.
>
)}