mirror of
https://github.com/pacnpal/Roo-Code.git
synced 2025-12-21 04:41:16 -05:00
Fix o1 in openai native
This commit is contained in:
@@ -23,48 +23,52 @@ export class OpenAiNativeHandler implements ApiHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
|
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
|
||||||
let systemPromptMessage: OpenAI.Chat.ChatCompletionMessageParam
|
|
||||||
let temperature = 0
|
|
||||||
switch (this.getModel().id) {
|
switch (this.getModel().id) {
|
||||||
case "o1-preview":
|
case "o1-preview":
|
||||||
case "o1-mini":
|
case "o1-mini": {
|
||||||
systemPromptMessage = { role: "user", content: systemPrompt }
|
// o1 doesnt support streaming, non-1 temp, or system prompt
|
||||||
temperature = 1
|
const response = await this.client.chat.completions.create({
|
||||||
break
|
model: this.getModel().id,
|
||||||
default:
|
messages: [{ role: "user", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
|
||||||
systemPromptMessage = { role: "system", content: systemPrompt }
|
})
|
||||||
temperature = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
|
|
||||||
systemPromptMessage,
|
|
||||||
...convertToOpenAiMessages(messages),
|
|
||||||
]
|
|
||||||
|
|
||||||
const stream = await this.client.chat.completions.create({
|
|
||||||
model: this.getModel().id,
|
|
||||||
// max_completion_tokens: this.getModel().info.maxTokens,
|
|
||||||
temperature,
|
|
||||||
messages: openAiMessages,
|
|
||||||
stream: true,
|
|
||||||
stream_options: { include_usage: true },
|
|
||||||
})
|
|
||||||
|
|
||||||
for await (const chunk of stream) {
|
|
||||||
const delta = chunk.choices[0]?.delta
|
|
||||||
if (delta?.content) {
|
|
||||||
yield {
|
yield {
|
||||||
type: "text",
|
type: "text",
|
||||||
text: delta.content,
|
text: response.choices[0]?.message.content || "",
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// contains a null value except for the last chunk which contains the token usage statistics for the entire request
|
|
||||||
if (chunk.usage) {
|
|
||||||
yield {
|
yield {
|
||||||
type: "usage",
|
type: "usage",
|
||||||
inputTokens: chunk.usage.prompt_tokens || 0,
|
inputTokens: response.usage?.prompt_tokens || 0,
|
||||||
outputTokens: chunk.usage.completion_tokens || 0,
|
outputTokens: response.usage?.completion_tokens || 0,
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
default: {
|
||||||
|
const stream = await this.client.chat.completions.create({
|
||||||
|
model: this.getModel().id,
|
||||||
|
// max_completion_tokens: this.getModel().info.maxTokens,
|
||||||
|
temperature: 0,
|
||||||
|
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
|
||||||
|
stream: true,
|
||||||
|
stream_options: { include_usage: true },
|
||||||
|
})
|
||||||
|
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
const delta = chunk.choices[0]?.delta
|
||||||
|
if (delta?.content) {
|
||||||
|
yield {
|
||||||
|
type: "text",
|
||||||
|
text: delta.content,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// contains a null value except for the last chunk which contains the token usage statistics for the entire request
|
||||||
|
if (chunk.usage) {
|
||||||
|
yield {
|
||||||
|
type: "usage",
|
||||||
|
inputTokens: chunk.usage.prompt_tokens || 0,
|
||||||
|
outputTokens: chunk.usage.completion_tokens || 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user