mirror of
https://github.com/pacnpal/Roo-Code.git
synced 2025-12-20 12:21:13 -05:00
Set temperature to 0.2
This commit is contained in:
@@ -37,6 +37,7 @@ export class AnthropicHandler implements ApiHandler {
|
||||
{
|
||||
model: modelId,
|
||||
max_tokens: this.getModel().info.maxTokens,
|
||||
temperature: 0.2,
|
||||
system: [{ text: systemPrompt, type: "text", cache_control: { type: "ephemeral" } }], // setting cache breakpoint for system prompt so new tasks can reuse it
|
||||
messages: messages.map((message, index) => {
|
||||
if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) {
|
||||
@@ -89,6 +90,7 @@ export class AnthropicHandler implements ApiHandler {
|
||||
const message = await this.client.messages.create({
|
||||
model: modelId,
|
||||
max_tokens: this.getModel().info.maxTokens,
|
||||
temperature: 0.2,
|
||||
system: [{ text: systemPrompt, type: "text" }],
|
||||
messages,
|
||||
tools,
|
||||
|
||||
@@ -31,6 +31,7 @@ export class AwsBedrockHandler implements ApiHandler {
|
||||
const message = await this.client.messages.create({
|
||||
model: this.getModel().id,
|
||||
max_tokens: this.getModel().info.maxTokens,
|
||||
temperature: 0.2,
|
||||
system: systemPrompt,
|
||||
messages,
|
||||
tools,
|
||||
|
||||
@@ -39,6 +39,7 @@ export class GeminiHandler implements ApiHandler {
|
||||
contents: messages.map(convertAnthropicMessageToGemini),
|
||||
generationConfig: {
|
||||
maxOutputTokens: this.getModel().info.maxTokens,
|
||||
temperature: 0.2,
|
||||
},
|
||||
})
|
||||
const message = convertGeminiResponseToAnthropic(result.response)
|
||||
|
||||
@@ -36,6 +36,7 @@ export class OllamaHandler implements ApiHandler {
|
||||
const createParams: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
|
||||
model: this.options.ollamaModelId ?? "",
|
||||
messages: openAiMessages,
|
||||
temperature: 0.2,
|
||||
tools: openAiTools,
|
||||
tool_choice: "auto",
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ export class OpenAiNativeHandler implements ApiHandler {
|
||||
createParams = {
|
||||
model: this.getModel().id,
|
||||
max_completion_tokens: this.getModel().info.maxTokens,
|
||||
temperature: 0.2,
|
||||
messages: convertToO1Messages(convertToOpenAiMessages(messages), systemPrompt),
|
||||
}
|
||||
break
|
||||
@@ -55,6 +56,7 @@ export class OpenAiNativeHandler implements ApiHandler {
|
||||
createParams = {
|
||||
model: this.getModel().id,
|
||||
max_completion_tokens: this.getModel().info.maxTokens,
|
||||
temperature: 0.2,
|
||||
messages: openAiMessages,
|
||||
tools: openAiTools,
|
||||
tool_choice: "auto",
|
||||
|
||||
@@ -47,6 +47,7 @@ export class OpenAiHandler implements ApiHandler {
|
||||
const createParams: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
|
||||
model: this.options.openAiModelId ?? "",
|
||||
messages: openAiMessages,
|
||||
temperature: 0.2,
|
||||
tools: openAiTools,
|
||||
tool_choice: "auto",
|
||||
}
|
||||
|
||||
@@ -95,6 +95,7 @@ export class OpenRouterHandler implements ApiHandler {
|
||||
createParams = {
|
||||
model: this.getModel().id,
|
||||
max_tokens: this.getModel().info.maxTokens,
|
||||
temperature: 0.2,
|
||||
messages: convertToO1Messages(convertToOpenAiMessages(messages), systemPrompt),
|
||||
}
|
||||
break
|
||||
@@ -102,6 +103,7 @@ export class OpenRouterHandler implements ApiHandler {
|
||||
createParams = {
|
||||
model: this.getModel().id,
|
||||
max_tokens: this.getModel().info.maxTokens,
|
||||
temperature: 0.2,
|
||||
messages: openAiMessages,
|
||||
tools: openAiTools,
|
||||
tool_choice: "auto",
|
||||
|
||||
@@ -25,6 +25,7 @@ export class VertexHandler implements ApiHandler {
|
||||
const message = await this.client.messages.create({
|
||||
model: this.getModel().id,
|
||||
max_tokens: this.getModel().info.maxTokens,
|
||||
temperature: 0.2,
|
||||
system: systemPrompt,
|
||||
messages,
|
||||
tools,
|
||||
|
||||
Reference in New Issue
Block a user