import { VSCodeCheckbox, VSCodeDropdown, VSCodeLink, VSCodeOption, VSCodeRadio, VSCodeRadioGroup, VSCodeTextField, } from "@vscode/webview-ui-toolkit/react" import { Fragment, memo, useCallback, useEffect, useMemo, useState } from "react" import { useEvent, useInterval } from "react-use" import { ApiConfiguration, ModelInfo, anthropicDefaultModelId, anthropicModels, azureOpenAiDefaultApiVersion, bedrockDefaultModelId, bedrockModels, geminiDefaultModelId, geminiModels, openAiModelInfoSaneDefaults, openAiNativeDefaultModelId, openAiNativeModels, openRouterDefaultModelId, openRouterDefaultModelInfo, vertexDefaultModelId, vertexModels, } from "../../../../src/shared/api" import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage" import { useExtensionState } from "../../context/ExtensionStateContext" import { vscode } from "../../utils/vscode" import VSCodeButtonLink from "../common/VSCodeButtonLink" import OpenRouterModelPicker, { ModelDescriptionMarkdown, OPENROUTER_MODEL_PICKER_Z_INDEX, } from "./OpenRouterModelPicker" import OpenAiModelPicker from "./OpenAiModelPicker" interface ApiOptionsProps { showModelOptions: boolean apiErrorMessage?: string modelIdErrorMessage?: string } const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) => { const { apiConfiguration, setApiConfiguration, uriScheme } = useExtensionState() const [ollamaModels, setOllamaModels] = useState([]) const [lmStudioModels, setLmStudioModels] = useState([]) const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl) const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion) const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) const handleInputChange = (field: keyof ApiConfiguration) => (event: any) => { setApiConfiguration({ ...apiConfiguration, [field]: event.target.value }) } const { selectedProvider, selectedModelId, selectedModelInfo } = useMemo(() => { return normalizeApiConfiguration(apiConfiguration) }, [apiConfiguration]) // Poll ollama/lmstudio models const requestLocalModels = useCallback(() => { if (selectedProvider === "ollama") { vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl }) } else if (selectedProvider === "lmstudio") { vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl }) } }, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl]) useEffect(() => { if (selectedProvider === "ollama" || selectedProvider === "lmstudio") { requestLocalModels() } }, [selectedProvider, requestLocalModels]) useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" ? 2000 : null) const handleMessage = useCallback((event: MessageEvent) => { const message: ExtensionMessage = event.data if (message.type === "ollamaModels" && message.ollamaModels) { setOllamaModels(message.ollamaModels) } else if (message.type === "lmStudioModels" && message.lmStudioModels) { setLmStudioModels(message.lmStudioModels) } }, []) useEvent("message", handleMessage) /* VSCodeDropdown has an open bug where dynamically rendered options don't auto select the provided value prop. You can see this for yourself by comparing it with normal select/option elements, which work as expected. https://github.com/microsoft/vscode-webview-ui-toolkit/issues/433 In our case, when the user switches between providers, we recalculate the selectedModelId depending on the provider, the default model for that provider, and a modelId that the user may have selected. Unfortunately, the VSCodeDropdown component wouldn't select this calculated value, and would default to the first "Select a model..." option instead, which makes it seem like the model was cleared out when it wasn't. As a workaround, we create separate instances of the dropdown for each provider, and then conditionally render the one that matches the current provider. */ const createDropdown = (models: Record) => { return ( Select a model... {Object.keys(models).map((modelId) => ( {modelId} ))} ) } return (
OpenRouter Anthropic Google Gemini GCP Vertex AI AWS Bedrock OpenAI OpenAI Compatible LM Studio Ollama
{selectedProvider === "anthropic" && (
Anthropic API Key { const isChecked = e.target.checked === true setAnthropicBaseUrlSelected(isChecked) if (!isChecked) { setApiConfiguration({ ...apiConfiguration, anthropicBaseUrl: "" }) } }}> Use custom base URL {anthropicBaseUrlSelected && ( )}

This key is stored locally and only used to make API requests from this extension. {!apiConfiguration?.apiKey && ( You can get an Anthropic API key by signing up here. )}

)} {selectedProvider === "openai-native" && (
OpenAI API Key

This key is stored locally and only used to make API requests from this extension. {!apiConfiguration?.openAiNativeApiKey && ( You can get an OpenAI API key by signing up here. )}

)} {selectedProvider === "openrouter" && (
OpenRouter API Key {!apiConfiguration?.openRouterApiKey && ( Get OpenRouter API Key )}

This key is stored locally and only used to make API requests from this extension.{" "} {/* {!apiConfiguration?.openRouterApiKey && ( (Note: OpenRouter is recommended for high rate limits, prompt caching, and wider selection of models.) )} */}

{ const isChecked = e.target.checked === true setApiConfiguration({ ...apiConfiguration, openRouterUseMiddleOutTransform: isChecked }) }}> Compress prompts and message chains to the context size (OpenRouter Transforms)
)} {selectedProvider === "bedrock" && (
AWS Access Key AWS Secret Key AWS Session Token
Select a region... {/* The user will have to choose a region that supports the model they use, but this shouldn't be a problem since they'd have to request access for it in that region in the first place. */} us-east-1 us-east-2 {/* us-west-1 */} us-west-2 {/* af-south-1 */} {/* ap-east-1 */} ap-south-1 ap-northeast-1 ap-northeast-2 {/* ap-northeast-3 */} ap-southeast-1 ap-southeast-2 ca-central-1 eu-central-1 eu-west-1 eu-west-2 eu-west-3 {/* eu-north-1 */} {/* me-south-1 */} sa-east-1 us-gov-west-1 {/* us-gov-east-1 */}
{ const isChecked = e.target.checked === true setApiConfiguration({ ...apiConfiguration, awsUseCrossRegionInference: isChecked }) }}> Use cross-region inference

Authenticate by either providing the keys above or use the default AWS credential providers, i.e. ~/.aws/credentials or environment variables. These credentials are only used locally to make API requests from this extension.

)} {apiConfiguration?.apiProvider === "vertex" && (
Google Cloud Project ID
Select a region... us-east5 us-central1 europe-west1 europe-west4 asia-southeast1

To use Google Cloud Vertex AI, you need to { "1) create a Google Cloud account › enable the Vertex AI API › enable the desired Claude models," } {" "} {"2) install the Google Cloud CLI › configure Application Default Credentials."}

)} {selectedProvider === "gemini" && (
Gemini API Key

This key is stored locally and only used to make API requests from this extension. {!apiConfiguration?.geminiApiKey && ( You can get a Gemini API key by signing up here. )}

)} {selectedProvider === "openai" && (
Base URL API Key
{ const isChecked = e.target.checked setApiConfiguration({ ...apiConfiguration, includeStreamOptions: isChecked }) }}> Include stream options
{ const isChecked = e.target.checked === true setAzureApiVersionSelected(isChecked) if (!isChecked) { setApiConfiguration({ ...apiConfiguration, azureApiVersion: "" }) } }}> Set Azure API version {azureApiVersionSelected && ( )}

(Note: Cline uses complex prompts and works best with Claude models. Less capable models may not work as expected.)

)} {selectedProvider === "lmstudio" && (
Base URL (optional) Model ID {lmStudioModels.length > 0 && ( { const value = (e.target as HTMLInputElement)?.value // need to check value first since radio group returns empty string sometimes if (value) { handleInputChange("lmStudioModelId")({ target: { value }, }) } }}> {lmStudioModels.map((model) => ( {model} ))} )}

LM Studio allows you to run models locally on your computer. For instructions on how to get started, see their quickstart guide. You will also need to start LM Studio's{" "} local server {" "} feature to use it with this extension.{" "} (Note: Cline uses complex prompts and works best with Claude models. Less capable models may not work as expected.)

)} {selectedProvider === "ollama" && (
Base URL (optional) Model ID {ollamaModels.length > 0 && ( { const value = (e.target as HTMLInputElement)?.value // need to check value first since radio group returns empty string sometimes if (value) { handleInputChange("ollamaModelId")({ target: { value }, }) } }}> {ollamaModels.map((model) => ( {model} ))} )}

Ollama allows you to run models locally on your computer. For instructions on how to get started, see their quickstart guide. (Note: Cline uses complex prompts and works best with Claude models. Less capable models may not work as expected.)

)} {apiErrorMessage && (

{apiErrorMessage}

)} {selectedProvider === "openrouter" && showModelOptions && } {selectedProvider !== "openrouter" && selectedProvider !== "openai" && selectedProvider !== "ollama" && selectedProvider !== "lmstudio" && showModelOptions && ( <>
{selectedProvider === "anthropic" && createDropdown(anthropicModels)} {selectedProvider === "bedrock" && createDropdown(bedrockModels)} {selectedProvider === "vertex" && createDropdown(vertexModels)} {selectedProvider === "gemini" && createDropdown(geminiModels)} {selectedProvider === "openai-native" && createDropdown(openAiNativeModels)}
)} {modelIdErrorMessage && (

{modelIdErrorMessage}

)}
) } export function getOpenRouterAuthUrl(uriScheme?: string) { return `https://openrouter.ai/auth?callback_url=${uriScheme || "vscode"}://saoudrizwan.claude-dev/openrouter` } export const formatPrice = (price: number) => { return new Intl.NumberFormat("en-US", { style: "currency", currency: "USD", minimumFractionDigits: 2, maximumFractionDigits: 2, }).format(price) } export const ModelInfoView = ({ selectedModelId, modelInfo, isDescriptionExpanded, setIsDescriptionExpanded, }: { selectedModelId: string modelInfo: ModelInfo isDescriptionExpanded: boolean setIsDescriptionExpanded: (isExpanded: boolean) => void }) => { const isGemini = Object.keys(geminiModels).includes(selectedModelId) const infoItems = [ modelInfo.description && ( ), , , !isGemini && ( ), modelInfo.maxTokens !== undefined && modelInfo.maxTokens > 0 && ( Max output: {modelInfo.maxTokens?.toLocaleString()} tokens ), modelInfo.inputPrice !== undefined && modelInfo.inputPrice > 0 && ( Input price: {formatPrice(modelInfo.inputPrice)}/million tokens ), modelInfo.supportsPromptCache && modelInfo.cacheWritesPrice && ( Cache writes price:{" "} {formatPrice(modelInfo.cacheWritesPrice || 0)}/million tokens ), modelInfo.supportsPromptCache && modelInfo.cacheReadsPrice && ( Cache reads price:{" "} {formatPrice(modelInfo.cacheReadsPrice || 0)}/million tokens ), modelInfo.outputPrice !== undefined && modelInfo.outputPrice > 0 && ( Output price: {formatPrice(modelInfo.outputPrice)}/million tokens ), isGemini && ( * Free up to {selectedModelId && selectedModelId.includes("flash") ? "15" : "2"} requests per minute. After that, billing depends on prompt size.{" "} For more info, see pricing details. ), ].filter(Boolean) return (

{infoItems.map((item, index) => ( {item} {index < infoItems.length - 1 &&
}
))}

) } const ModelInfoSupportsItem = ({ isSupported, supportsLabel, doesNotSupportLabel, }: { isSupported: boolean supportsLabel: string doesNotSupportLabel: string }) => ( {isSupported ? supportsLabel : doesNotSupportLabel} ) export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) { const provider = apiConfiguration?.apiProvider || "anthropic" const modelId = apiConfiguration?.apiModelId const getProviderData = (models: Record, defaultId: string) => { let selectedModelId: string let selectedModelInfo: ModelInfo if (modelId && modelId in models) { selectedModelId = modelId selectedModelInfo = models[modelId] } else { selectedModelId = defaultId selectedModelInfo = models[defaultId] } return { selectedProvider: provider, selectedModelId, selectedModelInfo } } switch (provider) { case "anthropic": return getProviderData(anthropicModels, anthropicDefaultModelId) case "bedrock": return getProviderData(bedrockModels, bedrockDefaultModelId) case "vertex": return getProviderData(vertexModels, vertexDefaultModelId) case "gemini": return getProviderData(geminiModels, geminiDefaultModelId) case "openai-native": return getProviderData(openAiNativeModels, openAiNativeDefaultModelId) case "openrouter": return { selectedProvider: provider, selectedModelId: apiConfiguration?.openRouterModelId || openRouterDefaultModelId, selectedModelInfo: apiConfiguration?.openRouterModelInfo || openRouterDefaultModelInfo, } case "openai": return { selectedProvider: provider, selectedModelId: apiConfiguration?.openAiModelId || "", selectedModelInfo: openAiModelInfoSaneDefaults, } case "ollama": return { selectedProvider: provider, selectedModelId: apiConfiguration?.ollamaModelId || "", selectedModelInfo: openAiModelInfoSaneDefaults, } case "lmstudio": return { selectedProvider: provider, selectedModelId: apiConfiguration?.lmStudioModelId || "", selectedModelInfo: openAiModelInfoSaneDefaults, } default: return getProviderData(anthropicModels, anthropicDefaultModelId) } } export default memo(ApiOptions)