Add markdown rendering for model description; fixes

This commit is contained in:
Saoud Rizwan
2024-10-03 21:23:49 -04:00
parent 9ffe01888a
commit f6a14fdfb9
9 changed files with 159 additions and 117 deletions

View File

@@ -41,7 +41,7 @@ export class AnthropicHandler implements ApiHandler {
stream = await this.client.beta.promptCaching.messages.create(
{
model: modelId,
max_tokens: this.getModel().info.maxTokens,
max_tokens: this.getModel().info.maxTokens || 8192,
temperature: 0,
system: [{ text: systemPrompt, type: "text", cache_control: { type: "ephemeral" } }], // setting cache breakpoint for system prompt so new tasks can reuse it
messages: messages.map((message, index) => {
@@ -96,7 +96,7 @@ export class AnthropicHandler implements ApiHandler {
default: {
stream = (await this.client.messages.create({
model: modelId,
max_tokens: this.getModel().info.maxTokens,
max_tokens: this.getModel().info.maxTokens || 8192,
temperature: 0,
system: [{ text: systemPrompt, type: "text" }],
messages,

View File

@@ -27,7 +27,7 @@ export class AwsBedrockHandler implements ApiHandler {
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
const stream = await this.client.messages.create({
model: this.getModel().id,
max_tokens: this.getModel().info.maxTokens,
max_tokens: this.getModel().info.maxTokens || 8192,
temperature: 0,
system: systemPrompt,
messages,

View File

@@ -21,7 +21,7 @@ export class VertexHandler implements ApiHandler {
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
const stream = await this.client.messages.create({
model: this.getModel().id,
max_tokens: this.getModel().info.maxTokens,
max_tokens: this.getModel().info.maxTokens || 8192,
temperature: 0,
system: systemPrompt,
messages,