mirror of
https://github.com/pacnpal/Roo-Code.git
synced 2025-12-20 04:11:10 -05:00
73 lines
2.2 KiB
TypeScript
73 lines
2.2 KiB
TypeScript
import { Anthropic } from "@anthropic-ai/sdk"
|
|
import OpenAI from "openai"
|
|
import { ApiHandler, SingleCompletionHandler } from "../"
|
|
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
|
|
import { convertToOpenAiMessages } from "../transform/openai-format"
|
|
import { ApiStream } from "../transform/stream"
|
|
|
|
export class LmStudioHandler implements ApiHandler, SingleCompletionHandler {
|
|
private options: ApiHandlerOptions
|
|
private client: OpenAI
|
|
|
|
constructor(options: ApiHandlerOptions) {
|
|
this.options = options
|
|
this.client = new OpenAI({
|
|
baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1",
|
|
apiKey: "noop",
|
|
})
|
|
}
|
|
|
|
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
|
|
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
|
|
{ role: "system", content: systemPrompt },
|
|
...convertToOpenAiMessages(messages),
|
|
]
|
|
|
|
try {
|
|
const stream = await this.client.chat.completions.create({
|
|
model: this.getModel().id,
|
|
messages: openAiMessages,
|
|
temperature: 0,
|
|
stream: true,
|
|
})
|
|
for await (const chunk of stream) {
|
|
const delta = chunk.choices[0]?.delta
|
|
if (delta?.content) {
|
|
yield {
|
|
type: "text",
|
|
text: delta.content,
|
|
}
|
|
}
|
|
}
|
|
} catch (error) {
|
|
// LM Studio doesn't return an error code/body for now
|
|
throw new Error(
|
|
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Cline's prompts.",
|
|
)
|
|
}
|
|
}
|
|
|
|
getModel(): { id: string; info: ModelInfo } {
|
|
return {
|
|
id: this.options.lmStudioModelId || "",
|
|
info: openAiModelInfoSaneDefaults,
|
|
}
|
|
}
|
|
|
|
async completePrompt(prompt: string): Promise<string> {
|
|
try {
|
|
const response = await this.client.chat.completions.create({
|
|
model: this.getModel().id,
|
|
messages: [{ role: "user", content: prompt }],
|
|
temperature: 0,
|
|
stream: false
|
|
})
|
|
return response.choices[0]?.message.content || ""
|
|
} catch (error) {
|
|
throw new Error(
|
|
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Cline's prompts.",
|
|
)
|
|
}
|
|
}
|
|
}
|