Merge remote-tracking branch 'upstream/main'

This commit is contained in:
John Stearns
2024-11-14 19:34:12 -08:00
17 changed files with 226 additions and 12 deletions

View File

@@ -15,6 +15,7 @@ body:
- AWS Bedrock - AWS Bedrock
- OpenAI - OpenAI
- OpenAI Compatible - OpenAI Compatible
- LM Studio
- Ollama - Ollama
validations: validations:
required: true required: true

View File

@@ -1,5 +1,13 @@
# Change Log # Change Log
## [2.1.6]
- Add LM Studio as an API provider option (make sure to start the LM Studio server to use it with the extension!)
## [2.1.5]
- Add support for prompt caching for new Claude model IDs on OpenRouter (e.g. `anthropic/claude-3.5-sonnet-20240620`)
## [2.1.4] ## [2.1.4]
- AWS Bedrock fixes (add missing regions, support for cross-region inference, and older Sonnet model for regions where new model is not available) - AWS Bedrock fixes (add missing regions, support for cross-region inference, and older Sonnet model for regions where new model is not available)

View File

@@ -78,7 +78,7 @@ Thanks to [Claude 3.5 Sonnet's agentic coding capabilities](https://www-cdn.ant
### Use any API and Model ### Use any API and Model
Cline supports API providers like OpenRouter, Anthropic, OpenAI, Google Gemini, AWS Bedrock, Azure, and GCP Vertex. You can also configure any OpenAI compatible API, or use a local model through Ollama. If you're using OpenRouter, the extension fetches their latest model list, allowing you to use the newest models as soon as they're available. Cline supports API providers like OpenRouter, Anthropic, OpenAI, Google Gemini, AWS Bedrock, Azure, and GCP Vertex. You can also configure any OpenAI compatible API, or use a local model through LM Studio/Ollama. If you're using OpenRouter, the extension fetches their latest model list, allowing you to use the newest models as soon as they're available.
The extension also keeps track of total tokens and API usage cost for the entire task loop and individual requests, keeping you informed of spend every step of the way. The extension also keeps track of total tokens and API usage cost for the entire task loop and individual requests, keeping you informed of spend every step of the way.
@@ -138,7 +138,7 @@ To contribute to the project, start by exploring [open issues](https://github.co
<details> <details>
<summary>Local Development Instructions</summary> <summary>Local Development Instructions</summary>
1. Clone the repository: 1. Clone the repository _(Requires [git-lfs](https://git-lfs.com/))_:
```bash ```bash
git clone https://github.com/cline/cline.git git clone https://github.com/cline/cline.git
``` ```

BIN
bin/roo-cline-2.0.1.vsix Normal file

Binary file not shown.

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{ {
"name": "roo-cline", "name": "roo-cline",
"version": "1.0.5", "version": "2.0.1",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "roo-cline", "name": "roo-cline",
"version": "1.0.5", "version": "2.0.1",
"dependencies": { "dependencies": {
"@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/bedrock-sdk": "^0.10.2",
"@anthropic-ai/sdk": "^0.26.0", "@anthropic-ai/sdk": "^0.26.0",

View File

@@ -2,7 +2,7 @@
"name": "roo-cline", "name": "roo-cline",
"displayName": "Roo Cline", "displayName": "Roo Cline",
"description": "Autonomous coding agent right in your IDE, capable of creating/editing files, running commands, using the browser, and more with your permission every step of the way.", "description": "Autonomous coding agent right in your IDE, capable of creating/editing files, running commands, using the browser, and more with your permission every step of the way.",
"version": "2.0.0", "version": "2.0.1",
"icon": "assets/icons/icon.png", "icon": "assets/icons/icon.png",
"galleryBanner": { "galleryBanner": {
"color": "#617A91", "color": "#617A91",

View File

@@ -6,6 +6,7 @@ import { OpenRouterHandler } from "./providers/openrouter"
import { VertexHandler } from "./providers/vertex" import { VertexHandler } from "./providers/vertex"
import { OpenAiHandler } from "./providers/openai" import { OpenAiHandler } from "./providers/openai"
import { OllamaHandler } from "./providers/ollama" import { OllamaHandler } from "./providers/ollama"
import { LmStudioHandler } from "./providers/lmstudio"
import { GeminiHandler } from "./providers/gemini" import { GeminiHandler } from "./providers/gemini"
import { OpenAiNativeHandler } from "./providers/openai-native" import { OpenAiNativeHandler } from "./providers/openai-native"
import { ApiStream } from "./transform/stream" import { ApiStream } from "./transform/stream"
@@ -30,6 +31,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
return new OpenAiHandler(options) return new OpenAiHandler(options)
case "ollama": case "ollama":
return new OllamaHandler(options) return new OllamaHandler(options)
case "lmstudio":
return new LmStudioHandler(options)
case "gemini": case "gemini":
return new GeminiHandler(options) return new GeminiHandler(options)
case "openai-native": case "openai-native":

View File

@@ -0,0 +1,56 @@
import { Anthropic } from "@anthropic-ai/sdk"
import OpenAI from "openai"
import { ApiHandler } from "../"
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
import { convertToOpenAiMessages } from "../transform/openai-format"
import { ApiStream } from "../transform/stream"
export class LmStudioHandler implements ApiHandler {
private options: ApiHandlerOptions
private client: OpenAI
constructor(options: ApiHandlerOptions) {
this.options = options
this.client = new OpenAI({
baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1",
apiKey: "noop",
})
}
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
]
try {
const stream = await this.client.chat.completions.create({
model: this.getModel().id,
messages: openAiMessages,
temperature: 0,
stream: true,
})
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta
if (delta?.content) {
yield {
type: "text",
text: delta.content,
}
}
}
} catch (error) {
// LM Studio doesn't return an error code/body for now
throw new Error(
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Cline's prompts."
)
}
}
getModel(): { id: string; info: ModelInfo } {
return {
id: this.options.lmStudioModelId || "",
info: openAiModelInfoSaneDefaults,
}
}
}

View File

@@ -31,9 +31,19 @@ export class OpenRouterHandler implements ApiHandler {
] ]
// prompt caching: https://openrouter.ai/docs/prompt-caching // prompt caching: https://openrouter.ai/docs/prompt-caching
// this is specifically for claude models (some models may 'support prompt caching' automatically without this)
switch (this.getModel().id) { switch (this.getModel().id) {
case "anthropic/claude-3.5-sonnet":
case "anthropic/claude-3.5-sonnet:beta": case "anthropic/claude-3.5-sonnet:beta":
case "anthropic/claude-3.5-sonnet-20240620":
case "anthropic/claude-3.5-sonnet-20240620:beta":
case "anthropic/claude-3-5-haiku":
case "anthropic/claude-3-5-haiku:beta":
case "anthropic/claude-3-5-haiku-20241022":
case "anthropic/claude-3-5-haiku-20241022:beta":
case "anthropic/claude-3-haiku":
case "anthropic/claude-3-haiku:beta": case "anthropic/claude-3-haiku:beta":
case "anthropic/claude-3-opus":
case "anthropic/claude-3-opus:beta": case "anthropic/claude-3-opus:beta":
openAiMessages[0] = { openAiMessages[0] = {
role: "system", role: "system",
@@ -76,6 +86,12 @@ export class OpenRouterHandler implements ApiHandler {
switch (this.getModel().id) { switch (this.getModel().id) {
case "anthropic/claude-3.5-sonnet": case "anthropic/claude-3.5-sonnet":
case "anthropic/claude-3.5-sonnet:beta": case "anthropic/claude-3.5-sonnet:beta":
case "anthropic/claude-3.5-sonnet-20240620":
case "anthropic/claude-3.5-sonnet-20240620:beta":
case "anthropic/claude-3-5-haiku":
case "anthropic/claude-3-5-haiku:beta":
case "anthropic/claude-3-5-haiku-20241022":
case "anthropic/claude-3-5-haiku-20241022:beta":
maxTokens = 8_192 maxTokens = 8_192
break break
} }

View File

@@ -53,6 +53,8 @@ type GlobalStateKey =
| "openAiModelId" | "openAiModelId"
| "ollamaModelId" | "ollamaModelId"
| "ollamaBaseUrl" | "ollamaBaseUrl"
| "lmStudioModelId"
| "lmStudioBaseUrl"
| "anthropicBaseUrl" | "anthropicBaseUrl"
| "azureApiVersion" | "azureApiVersion"
| "openRouterModelId" | "openRouterModelId"
@@ -363,6 +365,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openAiModelId, openAiModelId,
ollamaModelId, ollamaModelId,
ollamaBaseUrl, ollamaBaseUrl,
lmStudioModelId,
lmStudioBaseUrl,
anthropicBaseUrl, anthropicBaseUrl,
geminiApiKey, geminiApiKey,
openAiNativeApiKey, openAiNativeApiKey,
@@ -386,6 +390,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.updateGlobalState("openAiModelId", openAiModelId) await this.updateGlobalState("openAiModelId", openAiModelId)
await this.updateGlobalState("ollamaModelId", ollamaModelId) await this.updateGlobalState("ollamaModelId", ollamaModelId)
await this.updateGlobalState("ollamaBaseUrl", ollamaBaseUrl) await this.updateGlobalState("ollamaBaseUrl", ollamaBaseUrl)
await this.updateGlobalState("lmStudioModelId", lmStudioModelId)
await this.updateGlobalState("lmStudioBaseUrl", lmStudioBaseUrl)
await this.updateGlobalState("anthropicBaseUrl", anthropicBaseUrl) await this.updateGlobalState("anthropicBaseUrl", anthropicBaseUrl)
await this.storeSecret("geminiApiKey", geminiApiKey) await this.storeSecret("geminiApiKey", geminiApiKey)
await this.storeSecret("openAiNativeApiKey", openAiNativeApiKey) await this.storeSecret("openAiNativeApiKey", openAiNativeApiKey)
@@ -460,6 +466,10 @@ export class ClineProvider implements vscode.WebviewViewProvider {
const ollamaModels = await this.getOllamaModels(message.text) const ollamaModels = await this.getOllamaModels(message.text)
this.postMessageToWebview({ type: "ollamaModels", ollamaModels }) this.postMessageToWebview({ type: "ollamaModels", ollamaModels })
break break
case "requestLmStudioModels":
const lmStudioModels = await this.getLmStudioModels(message.text)
this.postMessageToWebview({ type: "lmStudioModels", lmStudioModels })
break
case "refreshOpenRouterModels": case "refreshOpenRouterModels":
await this.refreshOpenRouterModels() await this.refreshOpenRouterModels()
break break
@@ -527,6 +537,25 @@ export class ClineProvider implements vscode.WebviewViewProvider {
} }
} }
// LM Studio
async getLmStudioModels(baseUrl?: string) {
try {
if (!baseUrl) {
baseUrl = "http://localhost:1234"
}
if (!URL.canParse(baseUrl)) {
return []
}
const response = await axios.get(`${baseUrl}/v1/models`)
const modelsArray = response.data?.data?.map((model: any) => model.id) || []
const models = [...new Set<string>(modelsArray)]
return models
} catch (error) {
return []
}
}
// OpenRouter // OpenRouter
async handleOpenRouterCallback(code: string) { async handleOpenRouterCallback(code: string) {
@@ -855,6 +884,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openAiModelId, openAiModelId,
ollamaModelId, ollamaModelId,
ollamaBaseUrl, ollamaBaseUrl,
lmStudioModelId,
lmStudioBaseUrl,
anthropicBaseUrl, anthropicBaseUrl,
geminiApiKey, geminiApiKey,
openAiNativeApiKey, openAiNativeApiKey,
@@ -884,6 +915,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
this.getGlobalState("openAiModelId") as Promise<string | undefined>, this.getGlobalState("openAiModelId") as Promise<string | undefined>,
this.getGlobalState("ollamaModelId") as Promise<string | undefined>, this.getGlobalState("ollamaModelId") as Promise<string | undefined>,
this.getGlobalState("ollamaBaseUrl") as Promise<string | undefined>, this.getGlobalState("ollamaBaseUrl") as Promise<string | undefined>,
this.getGlobalState("lmStudioModelId") as Promise<string | undefined>,
this.getGlobalState("lmStudioBaseUrl") as Promise<string | undefined>,
this.getGlobalState("anthropicBaseUrl") as Promise<string | undefined>, this.getGlobalState("anthropicBaseUrl") as Promise<string | undefined>,
this.getSecret("geminiApiKey") as Promise<string | undefined>, this.getSecret("geminiApiKey") as Promise<string | undefined>,
this.getSecret("openAiNativeApiKey") as Promise<string | undefined>, this.getSecret("openAiNativeApiKey") as Promise<string | undefined>,
@@ -930,6 +963,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
openAiModelId, openAiModelId,
ollamaModelId, ollamaModelId,
ollamaBaseUrl, ollamaBaseUrl,
lmStudioModelId,
lmStudioBaseUrl,
anthropicBaseUrl, anthropicBaseUrl,
geminiApiKey, geminiApiKey,
openAiNativeApiKey, openAiNativeApiKey,

View File

@@ -10,6 +10,7 @@ export interface ExtensionMessage {
| "state" | "state"
| "selectedImages" | "selectedImages"
| "ollamaModels" | "ollamaModels"
| "lmStudioModels"
| "theme" | "theme"
| "workspaceUpdated" | "workspaceUpdated"
| "invoke" | "invoke"
@@ -21,6 +22,7 @@ export interface ExtensionMessage {
state?: ExtensionState state?: ExtensionState
images?: string[] images?: string[]
ollamaModels?: string[] ollamaModels?: string[]
lmStudioModels?: string[]
filePaths?: string[] filePaths?: string[]
partialMessage?: ClineMessage partialMessage?: ClineMessage
openRouterModels?: Record<string, ModelInfo> openRouterModels?: Record<string, ModelInfo>

View File

@@ -19,6 +19,7 @@ export interface WebviewMessage {
| "exportTaskWithId" | "exportTaskWithId"
| "resetState" | "resetState"
| "requestOllamaModels" | "requestOllamaModels"
| "requestLmStudioModels"
| "openImage" | "openImage"
| "openFile" | "openFile"
| "openMention" | "openMention"

View File

@@ -5,6 +5,7 @@ export type ApiProvider =
| "vertex" | "vertex"
| "openai" | "openai"
| "ollama" | "ollama"
| "lmstudio"
| "gemini" | "gemini"
| "openai-native" | "openai-native"
@@ -27,6 +28,8 @@ export interface ApiHandlerOptions {
openAiModelId?: string openAiModelId?: string
ollamaModelId?: string ollamaModelId?: string
ollamaBaseUrl?: string ollamaBaseUrl?: string
lmStudioModelId?: string
lmStudioBaseUrl?: string
geminiApiKey?: string geminiApiKey?: string
openAiNativeApiKey?: string openAiNativeApiKey?: string
azureApiVersion?: string azureApiVersion?: string

View File

@@ -96,6 +96,7 @@ const TaskHeader: React.FC<TaskHeaderProps> = ({
return ( return (
apiConfiguration?.apiProvider !== "openai" && apiConfiguration?.apiProvider !== "openai" &&
apiConfiguration?.apiProvider !== "ollama" && apiConfiguration?.apiProvider !== "ollama" &&
apiConfiguration?.apiProvider !== "lmstudio" &&
apiConfiguration?.apiProvider !== "gemini" apiConfiguration?.apiProvider !== "gemini"
) )
}, [apiConfiguration?.apiProvider]) }, [apiConfiguration?.apiProvider])

View File

@@ -45,6 +45,7 @@ interface ApiOptionsProps {
const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) => { const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) => {
const { apiConfiguration, setApiConfiguration, uriScheme } = useExtensionState() const { apiConfiguration, setApiConfiguration, uriScheme } = useExtensionState()
const [ollamaModels, setOllamaModels] = useState<string[]>([]) const [ollamaModels, setOllamaModels] = useState<string[]>([])
const [lmStudioModels, setLmStudioModels] = useState<string[]>([])
const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl) const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl)
const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion) const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion)
const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false)
@@ -57,23 +58,27 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
return normalizeApiConfiguration(apiConfiguration) return normalizeApiConfiguration(apiConfiguration)
}, [apiConfiguration]) }, [apiConfiguration])
// Poll ollama models // Poll ollama/lmstudio models
const requestOllamaModels = useCallback(() => { const requestLocalModels = useCallback(() => {
if (selectedProvider === "ollama") { if (selectedProvider === "ollama") {
vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl }) vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl })
} else if (selectedProvider === "lmstudio") {
vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl })
} }
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl]) }, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl])
useEffect(() => { useEffect(() => {
if (selectedProvider === "ollama") { if (selectedProvider === "ollama" || selectedProvider === "lmstudio") {
requestOllamaModels() requestLocalModels()
} }
}, [selectedProvider, requestOllamaModels]) }, [selectedProvider, requestLocalModels])
useInterval(requestOllamaModels, selectedProvider === "ollama" ? 2000 : null) useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" ? 2000 : null)
const handleMessage = useCallback((event: MessageEvent) => { const handleMessage = useCallback((event: MessageEvent) => {
const message: ExtensionMessage = event.data const message: ExtensionMessage = event.data
if (message.type === "ollamaModels" && message.ollamaModels) { if (message.type === "ollamaModels" && message.ollamaModels) {
setOllamaModels(message.ollamaModels) setOllamaModels(message.ollamaModels)
} else if (message.type === "lmStudioModels" && message.lmStudioModels) {
setLmStudioModels(message.lmStudioModels)
} }
}, []) }, [])
useEvent("message", handleMessage) useEvent("message", handleMessage)
@@ -128,6 +133,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
<VSCodeOption value="bedrock">AWS Bedrock</VSCodeOption> <VSCodeOption value="bedrock">AWS Bedrock</VSCodeOption>
<VSCodeOption value="openai-native">OpenAI</VSCodeOption> <VSCodeOption value="openai-native">OpenAI</VSCodeOption>
<VSCodeOption value="openai">OpenAI Compatible</VSCodeOption> <VSCodeOption value="openai">OpenAI Compatible</VSCodeOption>
<VSCodeOption value="lmstudio">LM Studio</VSCodeOption>
<VSCodeOption value="ollama">Ollama</VSCodeOption> <VSCodeOption value="ollama">Ollama</VSCodeOption>
</VSCodeDropdown> </VSCodeDropdown>
</div> </div>
@@ -463,6 +469,75 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
</div> </div>
)} )}
{selectedProvider === "lmstudio" && (
<div>
<VSCodeTextField
value={apiConfiguration?.lmStudioBaseUrl || ""}
style={{ width: "100%" }}
type="url"
onInput={handleInputChange("lmStudioBaseUrl")}
placeholder={"Default: http://localhost:1234"}>
<span style={{ fontWeight: 500 }}>Base URL (optional)</span>
</VSCodeTextField>
<VSCodeTextField
value={apiConfiguration?.lmStudioModelId || ""}
style={{ width: "100%" }}
onInput={handleInputChange("lmStudioModelId")}
placeholder={"e.g. meta-llama-3.1-8b-instruct"}>
<span style={{ fontWeight: 500 }}>Model ID</span>
</VSCodeTextField>
{lmStudioModels.length > 0 && (
<VSCodeRadioGroup
value={
lmStudioModels.includes(apiConfiguration?.lmStudioModelId || "")
? apiConfiguration?.lmStudioModelId
: ""
}
onChange={(e) => {
const value = (e.target as HTMLInputElement)?.value
// need to check value first since radio group returns empty string sometimes
if (value) {
handleInputChange("lmStudioModelId")({
target: { value },
})
}
}}>
{lmStudioModels.map((model) => (
<VSCodeRadio
key={model}
value={model}
checked={apiConfiguration?.lmStudioModelId === model}>
{model}
</VSCodeRadio>
))}
</VSCodeRadioGroup>
)}
<p
style={{
fontSize: "12px",
marginTop: "5px",
color: "var(--vscode-descriptionForeground)",
}}>
LM Studio allows you to run models locally on your computer. For instructions on how to get
started, see their
<VSCodeLink href="https://lmstudio.ai/docs" style={{ display: "inline", fontSize: "inherit" }}>
quickstart guide.
</VSCodeLink>
You will also need to start LM Studio's{" "}
<VSCodeLink
href="https://lmstudio.ai/docs/basics/server"
style={{ display: "inline", fontSize: "inherit" }}>
local server
</VSCodeLink>{" "}
feature to use it with this extension.{" "}
<span style={{ color: "var(--vscode-errorForeground)" }}>
(<span style={{ fontWeight: 500 }}>Note:</span> Cline uses complex prompts and works best
with Claude models. Less capable models may not work as expected.)
</span>
</p>
</div>
)}
{selectedProvider === "ollama" && ( {selectedProvider === "ollama" && (
<div> <div>
<VSCodeTextField <VSCodeTextField
@@ -543,6 +618,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
{selectedProvider !== "openrouter" && {selectedProvider !== "openrouter" &&
selectedProvider !== "openai" && selectedProvider !== "openai" &&
selectedProvider !== "ollama" && selectedProvider !== "ollama" &&
selectedProvider !== "lmstudio" &&
showModelOptions && ( showModelOptions && (
<> <>
<div className="dropdown-container"> <div className="dropdown-container">
@@ -758,6 +834,12 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
selectedModelId: apiConfiguration?.ollamaModelId || "", selectedModelId: apiConfiguration?.ollamaModelId || "",
selectedModelInfo: openAiModelInfoSaneDefaults, selectedModelInfo: openAiModelInfoSaneDefaults,
} }
case "lmstudio":
return {
selectedProvider: provider,
selectedModelId: apiConfiguration?.lmStudioModelId || "",
selectedModelInfo: openAiModelInfoSaneDefaults,
}
default: default:
return getProviderData(anthropicModels, anthropicDefaultModelId) return getProviderData(anthropicModels, anthropicDefaultModelId)
} }

View File

@@ -56,6 +56,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
config.vertexProjectId, config.vertexProjectId,
config.openAiApiKey, config.openAiApiKey,
config.ollamaModelId, config.ollamaModelId,
config.lmStudioModelId,
config.geminiApiKey, config.geminiApiKey,
config.openAiNativeApiKey, config.openAiNativeApiKey,
].some((key) => key !== undefined) ].some((key) => key !== undefined)

View File

@@ -47,6 +47,11 @@ export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): s
return "You must provide a valid model ID." return "You must provide a valid model ID."
} }
break break
case "lmstudio":
if (!apiConfiguration.lmStudioModelId) {
return "You must provide a valid model ID."
}
break
} }
} }
return undefined return undefined