mirror of
https://github.com/pacnpal/Roo-Code.git
synced 2025-12-20 12:21:13 -05:00
Add OpenRouter custom model scheme
This commit is contained in:
@@ -25,7 +25,7 @@ export class GeminiHandler implements ApiHandler {
|
||||
const result = await model.generateContentStream({
|
||||
contents: messages.map(convertAnthropicMessageToGemini),
|
||||
generationConfig: {
|
||||
maxOutputTokens: this.getModel().info.maxTokens,
|
||||
// maxOutputTokens: this.getModel().info.maxTokens,
|
||||
temperature: 0,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -24,7 +24,7 @@ export class OllamaHandler implements ApiHandler {
|
||||
]
|
||||
|
||||
const stream = await this.client.chat.completions.create({
|
||||
model: this.options.ollamaModelId ?? "",
|
||||
model: this.getModel().id,
|
||||
messages: openAiMessages,
|
||||
temperature: 0,
|
||||
stream: true,
|
||||
|
||||
@@ -30,7 +30,7 @@ export class OpenAiNativeHandler implements ApiHandler {
|
||||
|
||||
const stream = await this.client.chat.completions.create({
|
||||
model: this.getModel().id,
|
||||
max_completion_tokens: this.getModel().info.maxTokens,
|
||||
// max_completion_tokens: this.getModel().info.maxTokens,
|
||||
temperature: 0,
|
||||
messages: openAiMessages,
|
||||
stream: true,
|
||||
|
||||
@@ -2,13 +2,7 @@ import { Anthropic } from "@anthropic-ai/sdk"
|
||||
import axios from "axios"
|
||||
import OpenAI from "openai"
|
||||
import { ApiHandler } from "../"
|
||||
import {
|
||||
ApiHandlerOptions,
|
||||
ModelInfo,
|
||||
openRouterDefaultModelId,
|
||||
OpenRouterModelId,
|
||||
openRouterModels,
|
||||
} from "../../shared/api"
|
||||
import { ApiHandlerOptions, ModelInfo, openRouterDefaultModelId, openRouterDefaultModelInfo } from "../../shared/api"
|
||||
import { convertToOpenAiMessages } from "../transform/openai-format"
|
||||
import { ApiStream } from "../transform/stream"
|
||||
|
||||
@@ -74,9 +68,18 @@ export class OpenRouterHandler implements ApiHandler {
|
||||
break
|
||||
}
|
||||
|
||||
// Not sure how openrouter defaults max tokens when no value is provided, but the anthropic api requires this value and since they offer both 4096 and 8192 variants, we should ensure 8192.
|
||||
// (models usually default to max tokens allowed)
|
||||
let maxTokens: number | undefined
|
||||
switch (this.getModel().id) {
|
||||
case "anthropic/claude-3.5-sonnet":
|
||||
case "anthropic/claude-3.5-sonnet:beta":
|
||||
maxTokens = 8_192
|
||||
break
|
||||
}
|
||||
const stream = await this.client.chat.completions.create({
|
||||
model: this.getModel().id,
|
||||
max_tokens: this.getModel().info.maxTokens,
|
||||
max_tokens: maxTokens,
|
||||
temperature: 0,
|
||||
messages: openAiMessages,
|
||||
stream: true,
|
||||
@@ -129,12 +132,12 @@ export class OpenRouterHandler implements ApiHandler {
|
||||
}
|
||||
}
|
||||
|
||||
getModel(): { id: OpenRouterModelId; info: ModelInfo } {
|
||||
const modelId = this.options.apiModelId
|
||||
if (modelId && modelId in openRouterModels) {
|
||||
const id = modelId as OpenRouterModelId
|
||||
return { id, info: openRouterModels[id] }
|
||||
getModel(): { id: string; info: ModelInfo } {
|
||||
const modelId = this.options.openRouterModelId
|
||||
const modelInfo = this.options.openRouterModelInfo
|
||||
if (modelId && modelInfo) {
|
||||
return { id: modelId, info: modelInfo }
|
||||
}
|
||||
return { id: openRouterDefaultModelId, info: openRouterModels[openRouterDefaultModelId] }
|
||||
return { id: openRouterDefaultModelId, info: openRouterDefaultModelInfo }
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user