Set temperature to 0.2

This commit is contained in:
Saoud Rizwan
2024-09-20 10:27:58 -04:00
parent f001aad306
commit 4d1db22fe3
8 changed files with 11 additions and 0 deletions

View File

@@ -37,6 +37,7 @@ export class AnthropicHandler implements ApiHandler {
{
model: modelId,
max_tokens: this.getModel().info.maxTokens,
temperature: 0.2,
system: [{ text: systemPrompt, type: "text", cache_control: { type: "ephemeral" } }], // setting cache breakpoint for system prompt so new tasks can reuse it
messages: messages.map((message, index) => {
if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) {
@@ -89,6 +90,7 @@ export class AnthropicHandler implements ApiHandler {
const message = await this.client.messages.create({
model: modelId,
max_tokens: this.getModel().info.maxTokens,
temperature: 0.2,
system: [{ text: systemPrompt, type: "text" }],
messages,
tools,

View File

@@ -31,6 +31,7 @@ export class AwsBedrockHandler implements ApiHandler {
const message = await this.client.messages.create({
model: this.getModel().id,
max_tokens: this.getModel().info.maxTokens,
temperature: 0.2,
system: systemPrompt,
messages,
tools,

View File

@@ -39,6 +39,7 @@ export class GeminiHandler implements ApiHandler {
contents: messages.map(convertAnthropicMessageToGemini),
generationConfig: {
maxOutputTokens: this.getModel().info.maxTokens,
temperature: 0.2,
},
})
const message = convertGeminiResponseToAnthropic(result.response)

View File

@@ -36,6 +36,7 @@ export class OllamaHandler implements ApiHandler {
const createParams: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
model: this.options.ollamaModelId ?? "",
messages: openAiMessages,
temperature: 0.2,
tools: openAiTools,
tool_choice: "auto",
}

View File

@@ -48,6 +48,7 @@ export class OpenAiNativeHandler implements ApiHandler {
createParams = {
model: this.getModel().id,
max_completion_tokens: this.getModel().info.maxTokens,
temperature: 0.2,
messages: convertToO1Messages(convertToOpenAiMessages(messages), systemPrompt),
}
break
@@ -55,6 +56,7 @@ export class OpenAiNativeHandler implements ApiHandler {
createParams = {
model: this.getModel().id,
max_completion_tokens: this.getModel().info.maxTokens,
temperature: 0.2,
messages: openAiMessages,
tools: openAiTools,
tool_choice: "auto",

View File

@@ -47,6 +47,7 @@ export class OpenAiHandler implements ApiHandler {
const createParams: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
model: this.options.openAiModelId ?? "",
messages: openAiMessages,
temperature: 0.2,
tools: openAiTools,
tool_choice: "auto",
}

View File

@@ -95,6 +95,7 @@ export class OpenRouterHandler implements ApiHandler {
createParams = {
model: this.getModel().id,
max_tokens: this.getModel().info.maxTokens,
temperature: 0.2,
messages: convertToO1Messages(convertToOpenAiMessages(messages), systemPrompt),
}
break
@@ -102,6 +103,7 @@ export class OpenRouterHandler implements ApiHandler {
createParams = {
model: this.getModel().id,
max_tokens: this.getModel().info.maxTokens,
temperature: 0.2,
messages: openAiMessages,
tools: openAiTools,
tool_choice: "auto",

View File

@@ -25,6 +25,7 @@ export class VertexHandler implements ApiHandler {
const message = await this.client.messages.create({
model: this.getModel().id,
max_tokens: this.getModel().info.maxTokens,
temperature: 0.2,
system: systemPrompt,
messages,
tools,