import { Checkbox, Dropdown, Pane } from "vscrui" import type { DropdownOption } from "vscrui" import { VSCodeLink, VSCodeRadio, VSCodeRadioGroup, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" import { Fragment, memo, useCallback, useEffect, useMemo, useState } from "react" import { useEvent, useInterval } from "react-use" import { ApiConfiguration, ModelInfo, anthropicDefaultModelId, anthropicModels, azureOpenAiDefaultApiVersion, bedrockDefaultModelId, bedrockModels, deepSeekDefaultModelId, deepSeekModels, geminiDefaultModelId, geminiModels, glamaDefaultModelId, glamaDefaultModelInfo, mistralDefaultModelId, mistralModels, openAiModelInfoSaneDefaults, openAiNativeDefaultModelId, openAiNativeModels, openRouterDefaultModelId, openRouterDefaultModelInfo, vertexDefaultModelId, vertexModels, } from "../../../../src/shared/api" import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage" import { useExtensionState } from "../../context/ExtensionStateContext" import { vscode } from "../../utils/vscode" import * as vscodemodels from "vscode" import VSCodeButtonLink from "../common/VSCodeButtonLink" import OpenRouterModelPicker, { ModelDescriptionMarkdown, OPENROUTER_MODEL_PICKER_Z_INDEX, } from "./OpenRouterModelPicker" import OpenAiModelPicker from "./OpenAiModelPicker" import GlamaModelPicker from "./GlamaModelPicker" interface ApiOptionsProps { apiErrorMessage?: string modelIdErrorMessage?: string } const ApiOptions = ({ apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) => { const { apiConfiguration, uriScheme, handleInputChange } = useExtensionState() const [ollamaModels, setOllamaModels] = useState([]) const [lmStudioModels, setLmStudioModels] = useState([]) const [vsCodeLmModels, setVsCodeLmModels] = useState([]) const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl) const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion) const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) const { selectedProvider, selectedModelId, selectedModelInfo } = useMemo(() => { return normalizeApiConfiguration(apiConfiguration) }, [apiConfiguration]) // Poll ollama/lmstudio models const requestLocalModels = useCallback(() => { if (selectedProvider === "ollama") { vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl }) } else if (selectedProvider === "lmstudio") { vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl }) } else if (selectedProvider === "vscode-lm") { vscode.postMessage({ type: "requestVsCodeLmModels" }) } }, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl]) useEffect(() => { if (selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm") { requestLocalModels() } }, [selectedProvider, requestLocalModels]) useInterval( requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm" ? 2000 : null, ) const handleMessage = useCallback((event: MessageEvent) => { const message: ExtensionMessage = event.data if (message.type === "ollamaModels" && message.ollamaModels) { setOllamaModels(message.ollamaModels) } else if (message.type === "lmStudioModels" && message.lmStudioModels) { setLmStudioModels(message.lmStudioModels) } else if (message.type === "vsCodeLmModels" && message.vsCodeLmModels) { setVsCodeLmModels(message.vsCodeLmModels) } }, []) useEvent("message", handleMessage) const createDropdown = (models: Record) => { const options: DropdownOption[] = [ { value: "", label: "Select a model..." }, ...Object.keys(models).map((modelId) => ({ value: modelId, label: modelId, })), ] return ( { handleInputChange("apiModelId")({ target: { value: (value as DropdownOption).value, }, }) }} style={{ width: "100%" }} options={options} /> ) } return (
{ handleInputChange("apiProvider")({ target: { value: (value as DropdownOption).value, }, }) }} style={{ minWidth: 130, position: "relative", zIndex: OPENROUTER_MODEL_PICKER_Z_INDEX + 1 }} options={[ { value: "openrouter", label: "OpenRouter" }, { value: "anthropic", label: "Anthropic" }, { value: "gemini", label: "Google Gemini" }, { value: "deepseek", label: "DeepSeek" }, { value: "openai-native", label: "OpenAI" }, { value: "openai", label: "OpenAI Compatible" }, { value: "vertex", label: "GCP Vertex AI" }, { value: "bedrock", label: "AWS Bedrock" }, { value: "glama", label: "Glama" }, { value: "vscode-lm", label: "VS Code LM API" }, { value: "mistral", label: "Mistral" }, { value: "lmstudio", label: "LM Studio" }, { value: "ollama", label: "Ollama" }, ]} />
{selectedProvider === "anthropic" && (
Anthropic API Key { setAnthropicBaseUrlSelected(checked) if (!checked) { handleInputChange("anthropicBaseUrl")({ target: { value: "", }, }) } }}> Use custom base URL {anthropicBaseUrlSelected && ( )}

This key is stored locally and only used to make API requests from this extension. {!apiConfiguration?.apiKey && ( You can get an Anthropic API key by signing up here. )}

)} {selectedProvider === "glama" && (
Glama API Key {!apiConfiguration?.glamaApiKey && ( Get Glama API Key )}

This key is stored locally and only used to make API requests from this extension.

)} {selectedProvider === "openai-native" && (
OpenAI API Key

This key is stored locally and only used to make API requests from this extension. {!apiConfiguration?.openAiNativeApiKey && ( You can get an OpenAI API key by signing up here. )}

)} {selectedProvider === "mistral" && (
Mistral API Key

This key is stored locally and only used to make API requests from this extension. {!apiConfiguration?.mistralApiKey && ( You can get a Mistral API key by signing up here. )}

)} {selectedProvider === "openrouter" && (
OpenRouter API Key {!apiConfiguration?.openRouterApiKey && ( Get OpenRouter API Key )}

This key is stored locally and only used to make API requests from this extension.{" "} {/* {!apiConfiguration?.openRouterApiKey && ( (Note: OpenRouter is recommended for high rate limits, prompt caching, and wider selection of models.) )} */}

{ handleInputChange("openRouterUseMiddleOutTransform")({ target: { value: checked }, }) }}> Compress prompts and message chains to the context size ( OpenRouter Transforms)
)} {selectedProvider === "bedrock" && (
AWS Access Key AWS Secret Key AWS Session Token
{ handleInputChange("awsRegion")({ target: { value: (value as DropdownOption).value, }, }) }} options={[ { value: "", label: "Select a region..." }, { value: "us-east-1", label: "us-east-1" }, { value: "us-east-2", label: "us-east-2" }, { value: "us-west-2", label: "us-west-2" }, { value: "ap-south-1", label: "ap-south-1" }, { value: "ap-northeast-1", label: "ap-northeast-1" }, { value: "ap-northeast-2", label: "ap-northeast-2" }, { value: "ap-southeast-1", label: "ap-southeast-1" }, { value: "ap-southeast-2", label: "ap-southeast-2" }, { value: "ca-central-1", label: "ca-central-1" }, { value: "eu-central-1", label: "eu-central-1" }, { value: "eu-west-1", label: "eu-west-1" }, { value: "eu-west-2", label: "eu-west-2" }, { value: "eu-west-3", label: "eu-west-3" }, { value: "sa-east-1", label: "sa-east-1" }, { value: "us-gov-west-1", label: "us-gov-west-1" }, ]} />
{ handleInputChange("awsUseCrossRegionInference")({ target: { value: checked }, }) }}> Use cross-region inference

Authenticate by either providing the keys above or use the default AWS credential providers, i.e. ~/.aws/credentials or environment variables. These credentials are only used locally to make API requests from this extension.

)} {apiConfiguration?.apiProvider === "vertex" && (
Google Cloud Project ID
{ handleInputChange("vertexRegion")({ target: { value: (value as DropdownOption).value, }, }) }} options={[ { value: "", label: "Select a region..." }, { value: "us-east5", label: "us-east5" }, { value: "us-central1", label: "us-central1" }, { value: "europe-west1", label: "europe-west1" }, { value: "europe-west4", label: "europe-west4" }, { value: "asia-southeast1", label: "asia-southeast1" }, ]} />

To use Google Cloud Vertex AI, you need to { "1) create a Google Cloud account › enable the Vertex AI API › enable the desired Claude models," } {" "} {"2) install the Google Cloud CLI › configure Application Default Credentials."}

)} {selectedProvider === "gemini" && (
Gemini API Key

This key is stored locally and only used to make API requests from this extension. {!apiConfiguration?.geminiApiKey && ( You can get a Gemini API key by signing up here. )}

)} {selectedProvider === "openai" && (
Base URL API Key
{ handleInputChange("openAiStreamingEnabled")({ target: { value: checked }, }) }}> Enable streaming
{ setAzureApiVersionSelected(checked) if (!checked) { handleInputChange("azureApiVersion")({ target: { value: "", }, }) } }}> Set Azure API version {azureApiVersionSelected && ( )}
handleInputChange("openAiCustomModelInfo")({ target: { value: openAiModelInfoSaneDefaults }, }), }, ]}>

Configure the capabilities and pricing for your custom OpenAI-compatible model

{/* Capabilities Section */}
Model Capabilities
{ const value = apiConfiguration?.openAiCustomModelInfo?.maxTokens if (!value) return "var(--vscode-input-border)" return value > 0 ? "var(--vscode-charts-green)" : "var(--vscode-errorForeground)" })(), }} title="Maximum number of tokens the model can generate in a single response" onChange={(e: any) => { const value = parseInt(e.target.value) handleInputChange("openAiCustomModelInfo")({ target: { value: { ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), maxTokens: isNaN(value) ? undefined : value, }, }, }) }} placeholder="e.g. 4096"> Max Output Tokens
Maximum number of tokens the model can generate in a response. Higher values allow longer outputs but may increase costs.
{ const value = apiConfiguration?.openAiCustomModelInfo?.contextWindow if (!value) return "var(--vscode-input-border)" return value > 0 ? "var(--vscode-charts-green)" : "var(--vscode-errorForeground)" })(), }} title="Total number of tokens (input + output) the model can process in a single request" onChange={(e: any) => { const parsed = parseInt(e.target.value) handleInputChange("openAiCustomModelInfo")({ target: { value: { ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), contextWindow: e.target.value === "" ? undefined : isNaN(parsed) ? openAiModelInfoSaneDefaults.contextWindow : parsed, }, }, }) }} placeholder="e.g. 128000"> Context Window Size
Total tokens (input + output) the model can process. Larger windows allow processing more content but may increase memory usage.
Model Features
{ handleInputChange("openAiCustomModelInfo")({ target: { value: { ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), supportsImages: checked, }, }, }) }}> Image Support

Allows the model to analyze and understand images, essential for visual code assistance

{ handleInputChange("openAiCustomModelInfo")({ target: { value: { ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), supportsComputerUse: checked, }, }, }) }}> Computer Interaction

Enables the model to execute commands and modify files for automated assistance

{/* Pricing Section */}
Model Pricing Configure token-based pricing in USD per million tokens
{ const value = apiConfiguration?.openAiCustomModelInfo?.inputPrice if (!value && value !== 0) return "var(--vscode-input-border)" return value >= 0 ? "var(--vscode-charts-green)" : "var(--vscode-errorForeground)" })(), }} onChange={(e: any) => { const parsed = parseFloat(e.target.value) handleInputChange("openAiCustomModelInfo")({ target: { value: { ...(apiConfiguration?.openAiCustomModelInfo ?? openAiModelInfoSaneDefaults), inputPrice: e.target.value === "" ? undefined : isNaN(parsed) ? openAiModelInfoSaneDefaults.inputPrice : parsed, }, }, }) }} placeholder="e.g. 0.0001">
Input Price
{ const value = apiConfiguration?.openAiCustomModelInfo?.outputPrice if (!value && value !== 0) return "var(--vscode-input-border)" return value >= 0 ? "var(--vscode-charts-green)" : "var(--vscode-errorForeground)" })(), }} onChange={(e: any) => { const parsed = parseFloat(e.target.value) handleInputChange("openAiCustomModelInfo")({ target: { value: { ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), outputPrice: e.target.value === "" ? undefined : isNaN(parsed) ? openAiModelInfoSaneDefaults.outputPrice : parsed, }, }, }) }} placeholder="e.g. 0.0002">
Output Price
{/* end Model Info Configuration */}

(Note: Roo Code uses complex prompts and works best with Claude models. Less capable models may not work as expected.)

)} {selectedProvider === "lmstudio" && (
Base URL (optional) Model ID {lmStudioModels.length > 0 && ( { const value = (e.target as HTMLInputElement)?.value // need to check value first since radio group returns empty string sometimes if (value) { handleInputChange("lmStudioModelId")({ target: { value }, }) } }}> {lmStudioModels.map((model) => ( {model} ))} )}

LM Studio allows you to run models locally on your computer. For instructions on how to get started, see their quickstart guide. You will also need to start LM Studio's{" "} local server {" "} feature to use it with this extension.{" "} (Note: Roo Code uses complex prompts and works best with Claude models. Less capable models may not work as expected.)

)} {selectedProvider === "deepseek" && (
DeepSeek API Key

This key is stored locally and only used to make API requests from this extension. {!apiConfiguration?.deepSeekApiKey && ( You can get a DeepSeek API key by signing up here. )}

)} {selectedProvider === "vscode-lm" && (
{vsCodeLmModels.length > 0 ? ( { const valueStr = (value as DropdownOption)?.value if (!valueStr) { return } const [vendor, family] = valueStr.split("/") handleInputChange("vsCodeLmModelSelector")({ target: { value: { vendor, family }, }, }) }} style={{ width: "100%" }} options={[ { value: "", label: "Select a model..." }, ...vsCodeLmModels.map((model) => ({ value: `${model.vendor}/${model.family}`, label: `${model.vendor} - ${model.family}`, })), ]} /> ) : (

The VS Code Language Model API allows you to run models provided by other VS Code extensions (including but not limited to GitHub Copilot). The easiest way to get started is to install the Copilot and Copilot Chat extensions from the VS Code Marketplace.

)}

Note: This is a very experimental integration and may not work as expected. Please report any issues to the Roo-Code GitHub repository.

)} {selectedProvider === "ollama" && (
Base URL (optional) Model ID {ollamaModels.length > 0 && ( { const value = (e.target as HTMLInputElement)?.value // need to check value first since radio group returns empty string sometimes if (value) { handleInputChange("ollamaModelId")({ target: { value }, }) } }}> {ollamaModels.map((model) => ( {model} ))} )}

Ollama allows you to run models locally on your computer. For instructions on how to get started, see their quickstart guide. (Note: Roo Code uses complex prompts and works best with Claude models. Less capable models may not work as expected.)

)} {apiErrorMessage && (

{apiErrorMessage}

)} {selectedProvider === "glama" && } {selectedProvider === "openrouter" && } {selectedProvider !== "glama" && selectedProvider !== "openrouter" && selectedProvider !== "openai" && selectedProvider !== "ollama" && selectedProvider !== "lmstudio" && ( <>
{selectedProvider === "anthropic" && createDropdown(anthropicModels)} {selectedProvider === "bedrock" && createDropdown(bedrockModels)} {selectedProvider === "vertex" && createDropdown(vertexModels)} {selectedProvider === "gemini" && createDropdown(geminiModels)} {selectedProvider === "openai-native" && createDropdown(openAiNativeModels)} {selectedProvider === "deepseek" && createDropdown(deepSeekModels)} {selectedProvider === "mistral" && createDropdown(mistralModels)}
)} {modelIdErrorMessage && (

{modelIdErrorMessage}

)}
) } export function getGlamaAuthUrl(uriScheme?: string) { const callbackUrl = `${uriScheme || "vscode"}://rooveterinaryinc.roo-cline/glama` return `https://glama.ai/oauth/authorize?callback_url=${encodeURIComponent(callbackUrl)}` } export function getOpenRouterAuthUrl(uriScheme?: string) { return `https://openrouter.ai/auth?callback_url=${uriScheme || "vscode"}://rooveterinaryinc.roo-cline/openrouter` } export const formatPrice = (price: number) => { return new Intl.NumberFormat("en-US", { style: "currency", currency: "USD", minimumFractionDigits: 2, maximumFractionDigits: 2, }).format(price) } export const ModelInfoView = ({ selectedModelId, modelInfo, isDescriptionExpanded, setIsDescriptionExpanded, }: { selectedModelId: string modelInfo: ModelInfo isDescriptionExpanded: boolean setIsDescriptionExpanded: (isExpanded: boolean) => void }) => { const isGemini = Object.keys(geminiModels).includes(selectedModelId) const infoItems = [ modelInfo.description && ( ), , , !isGemini && ( ), modelInfo.maxTokens !== undefined && modelInfo.maxTokens > 0 && ( Max output: {modelInfo.maxTokens?.toLocaleString()} tokens ), modelInfo.inputPrice !== undefined && modelInfo.inputPrice > 0 && ( Input price: {formatPrice(modelInfo.inputPrice)}/million tokens ), modelInfo.supportsPromptCache && modelInfo.cacheWritesPrice && ( Cache writes price:{" "} {formatPrice(modelInfo.cacheWritesPrice || 0)}/million tokens ), modelInfo.supportsPromptCache && modelInfo.cacheReadsPrice && ( Cache reads price:{" "} {formatPrice(modelInfo.cacheReadsPrice || 0)}/million tokens ), modelInfo.outputPrice !== undefined && modelInfo.outputPrice > 0 && ( Output price: {formatPrice(modelInfo.outputPrice)}/million tokens ), isGemini && ( * Free up to {selectedModelId && selectedModelId.includes("flash") ? "15" : "2"} requests per minute. After that, billing depends on prompt size.{" "} For more info, see pricing details. ), ].filter(Boolean) return (

{infoItems.map((item, index) => ( {item} {index < infoItems.length - 1 &&
}
))}

) } const ModelInfoSupportsItem = ({ isSupported, supportsLabel, doesNotSupportLabel, }: { isSupported: boolean supportsLabel: string doesNotSupportLabel: string }) => ( {isSupported ? supportsLabel : doesNotSupportLabel} ) export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) { const provider = apiConfiguration?.apiProvider || "anthropic" const modelId = apiConfiguration?.apiModelId const getProviderData = (models: Record, defaultId: string) => { let selectedModelId: string let selectedModelInfo: ModelInfo if (modelId && modelId in models) { selectedModelId = modelId selectedModelInfo = models[modelId] } else { selectedModelId = defaultId selectedModelInfo = models[defaultId] } return { selectedProvider: provider, selectedModelId, selectedModelInfo } } switch (provider) { case "anthropic": return getProviderData(anthropicModels, anthropicDefaultModelId) case "bedrock": return getProviderData(bedrockModels, bedrockDefaultModelId) case "vertex": return getProviderData(vertexModels, vertexDefaultModelId) case "gemini": return getProviderData(geminiModels, geminiDefaultModelId) case "deepseek": return getProviderData(deepSeekModels, deepSeekDefaultModelId) case "openai-native": return getProviderData(openAiNativeModels, openAiNativeDefaultModelId) case "glama": return { selectedProvider: provider, selectedModelId: apiConfiguration?.glamaModelId || glamaDefaultModelId, selectedModelInfo: apiConfiguration?.glamaModelInfo || glamaDefaultModelInfo, } case "mistral": return getProviderData(mistralModels, mistralDefaultModelId) case "openrouter": return { selectedProvider: provider, selectedModelId: apiConfiguration?.openRouterModelId || openRouterDefaultModelId, selectedModelInfo: apiConfiguration?.openRouterModelInfo || openRouterDefaultModelInfo, } case "openai": return { selectedProvider: provider, selectedModelId: apiConfiguration?.openAiModelId || "", selectedModelInfo: apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults, } case "ollama": return { selectedProvider: provider, selectedModelId: apiConfiguration?.ollamaModelId || "", selectedModelInfo: openAiModelInfoSaneDefaults, } case "lmstudio": return { selectedProvider: provider, selectedModelId: apiConfiguration?.lmStudioModelId || "", selectedModelInfo: openAiModelInfoSaneDefaults, } case "vscode-lm": return { selectedProvider: provider, selectedModelId: apiConfiguration?.vsCodeLmModelSelector ? `${apiConfiguration.vsCodeLmModelSelector.vendor}/${apiConfiguration.vsCodeLmModelSelector.family}` : "", selectedModelInfo: { ...openAiModelInfoSaneDefaults, supportsImages: false, // VSCode LM API currently doesn't support images }, } default: return getProviderData(anthropicModels, anthropicDefaultModelId) } } export default memo(ApiOptions)