Add gemini support

This commit is contained in:
Saoud Rizwan
2024-09-12 08:11:33 -04:00
parent caab575b14
commit fbb7620fa1
11 changed files with 384 additions and 61 deletions

View File

@@ -1,13 +1,14 @@
import {
VSCodeCheckbox,
VSCodeDropdown,
VSCodeLink,
VSCodeOption,
VSCodeRadio,
VSCodeRadioGroup,
VSCodeTextField,
VSCodeCheckbox,
} from "@vscode/webview-ui-toolkit/react"
import { memo, useCallback, useEffect, useMemo, useState } from "react"
import { useEvent, useInterval } from "react-use"
import {
ApiConfiguration,
ModelInfo,
@@ -15,17 +16,18 @@ import {
anthropicModels,
bedrockDefaultModelId,
bedrockModels,
geminiDefaultModelId,
geminiModels,
openAiModelInfoSaneDefaults,
openRouterDefaultModelId,
openRouterModels,
vertexDefaultModelId,
vertexModels,
} from "../../../src/shared/api"
import { useExtensionState } from "../context/ExtensionStateContext"
import VSCodeButtonLink from "./VSCodeButtonLink"
import { ExtensionMessage } from "../../../src/shared/ExtensionMessage"
import { useEvent, useInterval } from "react-use"
import { useExtensionState } from "../context/ExtensionStateContext"
import { vscode } from "../utils/vscode"
import VSCodeButtonLink from "./VSCodeButtonLink"
interface ApiOptionsProps {
showModelOptions: boolean
@@ -113,6 +115,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage }: ApiOptionsProps) => {
<VSCodeOption value="openrouter">OpenRouter</VSCodeOption>
<VSCodeOption value="bedrock">AWS Bedrock</VSCodeOption>
<VSCodeOption value="vertex">GCP Vertex AI</VSCodeOption>
<VSCodeOption value="gemini">Google Gemini</VSCodeOption>
<VSCodeOption value="openai">OpenAI Compatible</VSCodeOption>
<VSCodeOption value="ollama">Ollama</VSCodeOption>
</VSCodeDropdown>
@@ -161,7 +164,9 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage }: ApiOptionsProps) => {
}}>
This key is stored locally and only used to make API requests from this extension.
{!apiConfiguration?.apiKey && (
<VSCodeLink href="https://console.anthropic.com/" style={{ display: "inline" }}>
<VSCodeLink
href="https://console.anthropic.com/"
style={{ display: "inline", fontSize: "inherit" }}>
You can get an Anthropic API key by signing up here.
</VSCodeLink>
)}
@@ -311,20 +316,48 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage }: ApiOptionsProps) => {
To use Google Cloud Vertex AI, you need to
<VSCodeLink
href="https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude#before_you_begin"
style={{ display: "inline" }}>
style={{ display: "inline", fontSize: "inherit" }}>
{
"1) create a Google Cloud account enable the Vertex AI API enable the desired Claude models,"
}
</VSCodeLink>{" "}
<VSCodeLink
href="https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp"
style={{ display: "inline" }}>
style={{ display: "inline", fontSize: "inherit" }}>
{"2) install the Google Cloud CLI configure Application Default Credentials."}
</VSCodeLink>
</p>
</div>
)}
{selectedProvider === "gemini" && (
<div>
<VSCodeTextField
value={apiConfiguration?.geminiApiKey || ""}
style={{ width: "100%" }}
type="password"
onInput={handleInputChange("geminiApiKey")}
placeholder="Enter API Key...">
<span style={{ fontWeight: 500 }}>Gemini API Key</span>
</VSCodeTextField>
<p
style={{
fontSize: "12px",
marginTop: 3,
color: "var(--vscode-descriptionForeground)",
}}>
This key is stored locally and only used to make API requests from this extension.
{!apiConfiguration?.geminiApiKey && (
<VSCodeLink
href="https://ai.google.dev/"
style={{ display: "inline", fontSize: "inherit" }}>
You can get a Gemini API key by signing up here.
</VSCodeLink>
)}
</p>
</div>
)}
{selectedProvider === "openai" && (
<div>
<VSCodeTextField
@@ -418,11 +451,13 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage }: ApiOptionsProps) => {
started, see their
<VSCodeLink
href="https://github.com/ollama/ollama/blob/main/README.md"
style={{ display: "inline" }}>
style={{ display: "inline", fontSize: "inherit" }}>
quickstart guide.
</VSCodeLink>{" "}
You can use any model that supports{" "}
<VSCodeLink href="https://ollama.com/search?c=tools" style={{ display: "inline" }}>
<VSCodeLink
href="https://ollama.com/search?c=tools"
style={{ display: "inline", fontSize: "inherit" }}>
tool use.
</VSCodeLink>
<span style={{ color: "var(--vscode-errorForeground)" }}>
@@ -454,9 +489,10 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage }: ApiOptionsProps) => {
{selectedProvider === "openrouter" && createDropdown(openRouterModels)}
{selectedProvider === "bedrock" && createDropdown(bedrockModels)}
{selectedProvider === "vertex" && createDropdown(vertexModels)}
{selectedProvider === "gemini" && createDropdown(geminiModels)}
</div>
<ModelInfoView modelInfo={selectedModelInfo} />
<ModelInfoView selectedModelId={selectedModelId} modelInfo={selectedModelInfo} />
</>
)}
</div>
@@ -476,7 +512,8 @@ export const formatPrice = (price: number) => {
}).format(price)
}
const ModelInfoView = ({ modelInfo }: { modelInfo: ModelInfo }) => {
const ModelInfoView = ({ selectedModelId, modelInfo }: { selectedModelId: string; modelInfo: ModelInfo }) => {
const isGemini = Object.keys(geminiModels).includes(selectedModelId)
return (
<p style={{ fontSize: "12px", marginTop: "2px", color: "var(--vscode-descriptionForeground)" }}>
<ModelInfoSupportsItem
@@ -485,27 +522,57 @@ const ModelInfoView = ({ modelInfo }: { modelInfo: ModelInfo }) => {
doesNotSupportLabel="Does not support images"
/>
<br />
<ModelInfoSupportsItem
isSupported={modelInfo.supportsPromptCache}
supportsLabel="Supports prompt caching"
doesNotSupportLabel="Does not support prompt caching"
/>
<br />
{!isGemini && (
<>
<ModelInfoSupportsItem
isSupported={modelInfo.supportsPromptCache}
supportsLabel="Supports prompt caching"
doesNotSupportLabel="Does not support prompt caching"
/>
<br />
</>
)}
<span style={{ fontWeight: 500 }}>Max output:</span> {modelInfo?.maxTokens?.toLocaleString()} tokens
<br />
<span style={{ fontWeight: 500 }}>Input price:</span> {formatPrice(modelInfo.inputPrice)}/million tokens
{modelInfo.inputPrice > 0 && (
<>
<span style={{ fontWeight: 500 }}>Input price:</span> {formatPrice(modelInfo.inputPrice)}/million
tokens
<br />
</>
)}
{modelInfo.supportsPromptCache && modelInfo.cacheWritesPrice && modelInfo.cacheReadsPrice && (
<>
<br />
<span style={{ fontWeight: 500 }}>Cache writes price:</span>{" "}
{formatPrice(modelInfo.cacheWritesPrice || 0)}/million tokens
<br />
<span style={{ fontWeight: 500 }}>Cache reads price:</span>{" "}
{formatPrice(modelInfo.cacheReadsPrice || 0)}/million tokens
<br />
</>
)}
{modelInfo.outputPrice > 0 && (
<>
<span style={{ fontWeight: 500 }}>Output price:</span> {formatPrice(modelInfo.outputPrice)}/million
tokens
</>
)}
{isGemini && (
<>
<span
style={{
fontStyle: "italic",
}}>
* Free up to {selectedModelId === geminiDefaultModelId ? "15" : "2"} requests per minute. After
that, billing depends on prompt size.{" "}
<VSCodeLink
href="https://ai.google.dev/pricing"
style={{ display: "inline", fontSize: "inherit" }}>
For more info, see pricing details.
</VSCodeLink>
</span>
</>
)}
<br />
<span style={{ fontWeight: 500 }}>Output price:</span> {formatPrice(modelInfo.outputPrice)}/million tokens
</p>
)
}
@@ -563,6 +630,8 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
return getProviderData(bedrockModels, bedrockDefaultModelId)
case "vertex":
return getProviderData(vertexModels, vertexDefaultModelId)
case "gemini":
return getProviderData(geminiModels, geminiDefaultModelId)
case "openai":
return {
selectedProvider: provider,