mirror of
https://github.com/pacnpal/Roo-Code.git
synced 2025-12-20 04:11:10 -05:00
Merge remote-tracking branch 'upstream/main'
This commit is contained in:
@@ -96,6 +96,7 @@ const TaskHeader: React.FC<TaskHeaderProps> = ({
|
||||
return (
|
||||
apiConfiguration?.apiProvider !== "openai" &&
|
||||
apiConfiguration?.apiProvider !== "ollama" &&
|
||||
apiConfiguration?.apiProvider !== "lmstudio" &&
|
||||
apiConfiguration?.apiProvider !== "gemini"
|
||||
)
|
||||
}, [apiConfiguration?.apiProvider])
|
||||
|
||||
@@ -45,6 +45,7 @@ interface ApiOptionsProps {
|
||||
const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }: ApiOptionsProps) => {
|
||||
const { apiConfiguration, setApiConfiguration, uriScheme } = useExtensionState()
|
||||
const [ollamaModels, setOllamaModels] = useState<string[]>([])
|
||||
const [lmStudioModels, setLmStudioModels] = useState<string[]>([])
|
||||
const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl)
|
||||
const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion)
|
||||
const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false)
|
||||
@@ -57,23 +58,27 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
|
||||
return normalizeApiConfiguration(apiConfiguration)
|
||||
}, [apiConfiguration])
|
||||
|
||||
// Poll ollama models
|
||||
const requestOllamaModels = useCallback(() => {
|
||||
// Poll ollama/lmstudio models
|
||||
const requestLocalModels = useCallback(() => {
|
||||
if (selectedProvider === "ollama") {
|
||||
vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl })
|
||||
} else if (selectedProvider === "lmstudio") {
|
||||
vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl })
|
||||
}
|
||||
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl])
|
||||
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl])
|
||||
useEffect(() => {
|
||||
if (selectedProvider === "ollama") {
|
||||
requestOllamaModels()
|
||||
if (selectedProvider === "ollama" || selectedProvider === "lmstudio") {
|
||||
requestLocalModels()
|
||||
}
|
||||
}, [selectedProvider, requestOllamaModels])
|
||||
useInterval(requestOllamaModels, selectedProvider === "ollama" ? 2000 : null)
|
||||
}, [selectedProvider, requestLocalModels])
|
||||
useInterval(requestLocalModels, selectedProvider === "ollama" || selectedProvider === "lmstudio" ? 2000 : null)
|
||||
|
||||
const handleMessage = useCallback((event: MessageEvent) => {
|
||||
const message: ExtensionMessage = event.data
|
||||
if (message.type === "ollamaModels" && message.ollamaModels) {
|
||||
setOllamaModels(message.ollamaModels)
|
||||
} else if (message.type === "lmStudioModels" && message.lmStudioModels) {
|
||||
setLmStudioModels(message.lmStudioModels)
|
||||
}
|
||||
}, [])
|
||||
useEvent("message", handleMessage)
|
||||
@@ -128,6 +133,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
|
||||
<VSCodeOption value="bedrock">AWS Bedrock</VSCodeOption>
|
||||
<VSCodeOption value="openai-native">OpenAI</VSCodeOption>
|
||||
<VSCodeOption value="openai">OpenAI Compatible</VSCodeOption>
|
||||
<VSCodeOption value="lmstudio">LM Studio</VSCodeOption>
|
||||
<VSCodeOption value="ollama">Ollama</VSCodeOption>
|
||||
</VSCodeDropdown>
|
||||
</div>
|
||||
@@ -463,6 +469,75 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
|
||||
</div>
|
||||
)}
|
||||
|
||||
{selectedProvider === "lmstudio" && (
|
||||
<div>
|
||||
<VSCodeTextField
|
||||
value={apiConfiguration?.lmStudioBaseUrl || ""}
|
||||
style={{ width: "100%" }}
|
||||
type="url"
|
||||
onInput={handleInputChange("lmStudioBaseUrl")}
|
||||
placeholder={"Default: http://localhost:1234"}>
|
||||
<span style={{ fontWeight: 500 }}>Base URL (optional)</span>
|
||||
</VSCodeTextField>
|
||||
<VSCodeTextField
|
||||
value={apiConfiguration?.lmStudioModelId || ""}
|
||||
style={{ width: "100%" }}
|
||||
onInput={handleInputChange("lmStudioModelId")}
|
||||
placeholder={"e.g. meta-llama-3.1-8b-instruct"}>
|
||||
<span style={{ fontWeight: 500 }}>Model ID</span>
|
||||
</VSCodeTextField>
|
||||
{lmStudioModels.length > 0 && (
|
||||
<VSCodeRadioGroup
|
||||
value={
|
||||
lmStudioModels.includes(apiConfiguration?.lmStudioModelId || "")
|
||||
? apiConfiguration?.lmStudioModelId
|
||||
: ""
|
||||
}
|
||||
onChange={(e) => {
|
||||
const value = (e.target as HTMLInputElement)?.value
|
||||
// need to check value first since radio group returns empty string sometimes
|
||||
if (value) {
|
||||
handleInputChange("lmStudioModelId")({
|
||||
target: { value },
|
||||
})
|
||||
}
|
||||
}}>
|
||||
{lmStudioModels.map((model) => (
|
||||
<VSCodeRadio
|
||||
key={model}
|
||||
value={model}
|
||||
checked={apiConfiguration?.lmStudioModelId === model}>
|
||||
{model}
|
||||
</VSCodeRadio>
|
||||
))}
|
||||
</VSCodeRadioGroup>
|
||||
)}
|
||||
<p
|
||||
style={{
|
||||
fontSize: "12px",
|
||||
marginTop: "5px",
|
||||
color: "var(--vscode-descriptionForeground)",
|
||||
}}>
|
||||
LM Studio allows you to run models locally on your computer. For instructions on how to get
|
||||
started, see their
|
||||
<VSCodeLink href="https://lmstudio.ai/docs" style={{ display: "inline", fontSize: "inherit" }}>
|
||||
quickstart guide.
|
||||
</VSCodeLink>
|
||||
You will also need to start LM Studio's{" "}
|
||||
<VSCodeLink
|
||||
href="https://lmstudio.ai/docs/basics/server"
|
||||
style={{ display: "inline", fontSize: "inherit" }}>
|
||||
local server
|
||||
</VSCodeLink>{" "}
|
||||
feature to use it with this extension.{" "}
|
||||
<span style={{ color: "var(--vscode-errorForeground)" }}>
|
||||
(<span style={{ fontWeight: 500 }}>Note:</span> Cline uses complex prompts and works best
|
||||
with Claude models. Less capable models may not work as expected.)
|
||||
</span>
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{selectedProvider === "ollama" && (
|
||||
<div>
|
||||
<VSCodeTextField
|
||||
@@ -543,6 +618,7 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
|
||||
{selectedProvider !== "openrouter" &&
|
||||
selectedProvider !== "openai" &&
|
||||
selectedProvider !== "ollama" &&
|
||||
selectedProvider !== "lmstudio" &&
|
||||
showModelOptions && (
|
||||
<>
|
||||
<div className="dropdown-container">
|
||||
@@ -758,6 +834,12 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
|
||||
selectedModelId: apiConfiguration?.ollamaModelId || "",
|
||||
selectedModelInfo: openAiModelInfoSaneDefaults,
|
||||
}
|
||||
case "lmstudio":
|
||||
return {
|
||||
selectedProvider: provider,
|
||||
selectedModelId: apiConfiguration?.lmStudioModelId || "",
|
||||
selectedModelInfo: openAiModelInfoSaneDefaults,
|
||||
}
|
||||
default:
|
||||
return getProviderData(anthropicModels, anthropicDefaultModelId)
|
||||
}
|
||||
|
||||
@@ -56,6 +56,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
|
||||
config.vertexProjectId,
|
||||
config.openAiApiKey,
|
||||
config.ollamaModelId,
|
||||
config.lmStudioModelId,
|
||||
config.geminiApiKey,
|
||||
config.openAiNativeApiKey,
|
||||
].some((key) => key !== undefined)
|
||||
|
||||
@@ -47,6 +47,11 @@ export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): s
|
||||
return "You must provide a valid model ID."
|
||||
}
|
||||
break
|
||||
case "lmstudio":
|
||||
if (!apiConfiguration.lmStudioModelId) {
|
||||
return "You must provide a valid model ID."
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
|
||||
Reference in New Issue
Block a user