Merge branch 'RooVetGit:main' into main

This commit is contained in:
Premshay
2024-12-12 09:23:19 +02:00
committed by GitHub
11 changed files with 419 additions and 50 deletions

View File

@@ -0,0 +1,86 @@
name: AI Release Notes
description: Generate AI release notes using git and openai, outputs 'RELEASE_NOTES' and 'OPENAI_PROMPT'
inputs:
OPENAI_API_KEY:
required: true
type: string
GHA_PAT:
required: true
type: string
model_name:
required: false
type: string
default: gpt-4o-mini
repo_path:
required: false
type: string
custom_prompt:
required: false
default: ''
type: string
git_ref:
required: false
type: string
default: ''
head_ref:
required: false
type: string
default: main
base_ref:
required: false
type: string
default: main
outputs:
RELEASE_NOTES:
description: "AI generated release notes"
value: ${{ steps.ai_release_notes.outputs.RELEASE_NOTES }}
OPENAI_PROMPT:
description: "Prompt used to generate release notes"
value: ${{ steps.ai_prompt.outputs.OPENAI_PROMPT }}
env:
GITHUB_REF: ${{ inputs.git_ref == '' && github.event.pull_request.head.ref || inputs.git_ref }}
BASE_REF: ${{ inputs.base_ref == '' && github.base_ref || inputs.base_ref }}
HEAD_REF: ${{ inputs.head_ref == '' && github.event.pull_request.head.sha || inputs.head_ref }}
runs:
using: "composite"
steps:
- uses: actions/checkout@v4
with:
repository: ${{ inputs.repo_path }}
token: ${{ inputs.GHA_PAT }}
ref: ${{ env.GITHUB_REF }}
fetch-depth: 0
- name: Set Workspace
shell: bash
run: |
pip install tiktoken
pip install pytz
# Github outputs: 'OPENAI_PROMPT'
- name: Add Git Info to base prompt
id: ai_prompt
shell: bash
env:
BASE_REF: ${{ env.BASE_REF }}
HEAD_SHA: ${{ env.HEAD_SHA }}
PR_TITLE: ${{ github.event.pull_request.title }}
PR_BODY: ${{ github.event.pull_request.body }}
MODEL_NAME: ${{ inputs.model_name }}
CUSTOM_PROMPT: ${{ inputs.custom_prompt }} # Default: ''
run: python .github/scripts/release-notes-prompt.py
# Github outputs: 'RELEASE_NOTES'
- name: Generate AI release notes
id: ai_release_notes
shell: bash
env:
OPENAI_API_KEY: ${{ inputs.OPENAI_API_KEY }}
CUSTOM_PROMPT: ${{ steps.ai_prompt.outputs.OPENAI_PROMPT }}
MODEL_NAME: ${{ inputs.model_name }}
run: python .github/scripts/ai-release-notes.py

123
.github/scripts/ai-release-notes.py vendored Normal file
View File

@@ -0,0 +1,123 @@
"""
AI-powered release notes generator that creates concise and informative release notes from git changes.
This script uses OpenAI's API to analyze git changes (summary, diff, and commit log) and generate
well-formatted release notes in markdown. It focuses on important changes and their impact,
particularly highlighting new types and schemas while avoiding repetitive information.
Environment Variables Required:
OPENAI_API_KEY: OpenAI API key for authentication
CHANGE_SUMMARY: Summary of changes made (optional if CUSTOM_PROMPT provided)
CHANGE_DIFF: Git diff of changes (optional if CUSTOM_PROMPT provided)
CHANGE_LOG: Git commit log (optional if CUSTOM_PROMPT provided)
GITHUB_OUTPUT: Path to GitHub output file
CUSTOM_PROMPT: Custom prompt to override default (optional)
"""
import os
import requests # type: ignore
import json
import tiktoken # type: ignore
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
CHANGE_SUMMARY = os.environ.get('CHANGE_SUMMARY', '')
CHANGE_DIFF = os.environ.get('CHANGE_DIFF', '')
CHANGE_LOG = os.environ.get('CHANGE_LOG', '')
GITHUB_OUTPUT = os.getenv("GITHUB_OUTPUT")
OPEN_AI_BASE_URL = "https://api.openai.com/v1"
OPEN_API_HEADERS = {"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}
CUSTOM_PROMPT = os.environ.get('CUSTOM_PROMPT', '')
MODEL_NAME = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo-16k')
def num_tokens_from_string(string: str, model_name: str) -> int:
"""
Calculate the number of tokens in a text string for a specific model.
Args:
string: The input text to count tokens for
model_name: Name of the OpenAI model to use for token counting
Returns:
int: Number of tokens in the input string
"""
encoding = tiktoken.encoding_for_model(model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def truncate_to_token_limit(text, max_tokens, model_name):
"""
Truncate text to fit within a maximum token limit for a specific model.
Args:
text: The input text to truncate
max_tokens: Maximum number of tokens allowed
model_name: Name of the OpenAI model to use for tokenization
Returns:
str: Truncated text that fits within the token limit
"""
encoding = tiktoken.encoding_for_model(model_name)
encoded = encoding.encode(text)
truncated = encoded[:max_tokens]
return encoding.decode(truncated)
def generate_release_notes(model_name):
"""
Generate release notes using OpenAI's API based on git changes.
Uses the GPT-3.5-turbo model to analyze change summary, commit log, and code diff
to generate concise and informative release notes in markdown format. The notes
focus on important changes and their impact, with sections for new types/schemas
and other updates.
Returns:
str: Generated release notes in markdown format
Raises:
requests.exceptions.RequestException: If the OpenAI API request fails
"""
max_tokens = 14000 # Reserve some tokens for the response
# Truncate inputs if necessary to fit within token limits
change_summary = '' if CUSTOM_PROMPT else truncate_to_token_limit(CHANGE_SUMMARY, 1000, model_name)
change_log = '' if CUSTOM_PROMPT else truncate_to_token_limit(CHANGE_LOG, 2000, model_name)
change_diff = '' if CUSTOM_PROMPT else truncate_to_token_limit(CHANGE_DIFF, max_tokens - num_tokens_from_string(change_summary, model_name) - num_tokens_from_string(change_log, model_name) - 1000, model_name)
url = f"{OPEN_AI_BASE_URL}/chat/completions"
# Construct prompt for OpenAI API
openai_prompt = CUSTOM_PROMPT if CUSTOM_PROMPT else f"""Based on the following summary of changes, commit log and code diff, please generate concise and informative release notes:
Summary of changes:
{change_summary}
Commit log:
{change_log}
Code Diff:
{json.dumps(change_diff)}
"""
data = {
"model": model_name,
"messages": [{"role": "user", "content": openai_prompt}],
"temperature": 0.7,
"max_tokens": 1000,
}
print("----------------------------------------------------------------------------------------------------------")
print("POST request to OpenAI")
print("----------------------------------------------------------------------------------------------------------")
ai_response = requests.post(url, headers=OPEN_API_HEADERS, json=data)
print(f"Status Code: {str(ai_response.status_code)}")
print(f"Response: {ai_response.text}")
ai_response.raise_for_status()
return ai_response.json()["choices"][0]["message"]["content"]
release_notes = generate_release_notes(MODEL_NAME)
print("----------------------------------------------------------------------------------------------------------")
print("OpenAI generated release notes")
print("----------------------------------------------------------------------------------------------------------")
print(release_notes)
# Write the release notes to GITHUB_OUTPUT
with open(GITHUB_OUTPUT, "a") as outputs_file:
outputs_file.write(f"RELEASE_NOTES<<EOF\n{release_notes}\nEOF")

View File

@@ -1,23 +1,125 @@
"""
This script generates a base prompt for OpenAI to create release notes.
"""
#!/usr/bin/env python3
import os import os
import subprocess
import json
import re
import tiktoken # type: ignore
from datetime import datetime; from datetime import datetime;
from pytz import timezone from pytz import timezone
GITHUB_OUTPUT = os.getenv("GITHUB_OUTPUT") GITHUB_OUTPUT = os.getenv("GITHUB_OUTPUT")
BASE_REF = os.getenv("BASE_REF", "main")
HEAD_SHA = os.environ["HEAD_SHA"]
PR_TITLE = os.environ["PR_TITLE"]
PR_BODY = os.environ["PR_BODY"]
EXISTING_NOTES = os.environ.get("EXISTING_NOTES", "null")
MODEL_NAME = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo-16k')
CUSTOM_PROMPT = os.environ.get('CUSTOM_PROMPT', '')
def extract_description_section(pr_body):
# Find content between ## Description and the next ## or end of text
description_match = re.search(r'## Description\s*\n(.*?)(?=\n##|$)', pr_body, re.DOTALL)
if description_match:
content = description_match.group(1).strip()
# Remove the comment line if it exists
comment_pattern = r'\[comment\]:.+?\n'
content = re.sub(comment_pattern, '', content)
return content.strip()
return ""
def extract_ellipsis_important(pr_body):
# Find content between <!-- ELLIPSIS_HIDDEN --> and <!-- ELLIPSIS_HIDDEN --> that contains [!IMPORTANT]
ellipsis_match = re.search(r'<!--\s*ELLIPSIS_HIDDEN\s*-->(.*?)<!--\s*ELLIPSIS_HIDDEN\s*-->', pr_body, re.DOTALL)
if ellipsis_match:
content = ellipsis_match.group(1).strip()
important_match = re.search(r'\[!IMPORTANT\](.*?)(?=\[!|$)', content, re.DOTALL)
if important_match:
important_text = important_match.group(1).strip()
important_text = re.sub(r'^-+\s*', '', important_text)
return important_text.strip()
return ""
def extract_coderabbit_summary(pr_body):
# Find content between ## Summary by CodeRabbit and the next ## or end of text
summary_match = re.search(r'## Summary by CodeRabbit\s*\n(.*?)(?=\n##|$)', pr_body, re.DOTALL)
return summary_match.group(1).strip() if summary_match else ""
def num_tokens_from_string(string: str, model_name: str) -> int:
"""
Calculate the number of tokens in a text string for a specific model.
Args:
string: The input text to count tokens for
model_name: Name of the OpenAI model to use for token counting
Returns:
int: Number of tokens in the input string
"""
encoding = tiktoken.encoding_for_model(model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def truncate_to_token_limit(text, max_tokens, model_name):
"""
Truncate text to fit within a maximum token limit for a specific model.
Args:
text: The input text to truncate
max_tokens: Maximum number of tokens allowed
model_name: Name of the OpenAI model to use for tokenization
Returns:
str: Truncated text that fits within the token limit
"""
encoding = tiktoken.encoding_for_model(model_name)
encoded = encoding.encode(text)
truncated = encoded[:max_tokens]
return encoding.decode(truncated)
# Extract sections and combine into PR_OVERVIEW
description = extract_description_section(PR_BODY)
important = extract_ellipsis_important(PR_BODY)
summary = extract_coderabbit_summary(PR_BODY)
PR_OVERVIEW = "\n\n".join(filter(None, [description, important, summary]))
# Get git information
base_sha = subprocess.getoutput(f"git rev-parse origin/{BASE_REF}") if BASE_REF == 'main' else BASE_REF
diff_overview = subprocess.getoutput(f"git diff {base_sha}..{HEAD_SHA} --name-status | awk '{{print $2}}' | sort | uniq -c | awk '{{print $2 \": \" $1 \" files changed\"}}'")
git_log = subprocess.getoutput(f"git log {base_sha}..{HEAD_SHA} --pretty=format:'%h - %s (%an)' --reverse | head -n 50")
git_diff = subprocess.getoutput(f"git diff {base_sha}..{HEAD_SHA} --minimal --abbrev --ignore-cr-at-eol --ignore-space-at-eol --ignore-space-change --ignore-all-space --ignore-blank-lines --unified=0 --diff-filter=ACDMRT")
max_tokens = 14000 # Reserve some tokens for the response
changes_summary = truncate_to_token_limit(diff_overview, 1000, MODEL_NAME)
git_logs = truncate_to_token_limit(git_log, 2000, MODEL_NAME)
changes_diff = truncate_to_token_limit(git_diff, max_tokens - num_tokens_from_string(changes_summary, MODEL_NAME) - num_tokens_from_string(git_logs, MODEL_NAME) - 1000, MODEL_NAME)
# Get today's existing changelog if any
existing_changelog = EXISTING_NOTES if EXISTING_NOTES != "null" else None
existing_changelog_text = f"\nAdditional context:\n{existing_changelog}" if existing_changelog else ""
TODAY = datetime.now(timezone('US/Eastern')).isoformat(sep=' ', timespec='seconds') TODAY = datetime.now(timezone('US/Eastern')).isoformat(sep=' ', timespec='seconds')
BASE_PROMPT = f"""Based on the following 'PR Information', please generate concise and informative release notes to be read by developers. BASE_PROMPT = CUSTOM_PROMPT if CUSTOM_PROMPT else f"""Based on the following 'PR Information', please generate concise and informative release notes to be read by developers.
Format the release notes with markdown, and always use this structure: a descriptive and very short title (no more than 8 words) with heading level 2, a paragraph with a summary of changes (no header), and if applicable, sections for '🚀 New Features & Improvements', '🐛 Bugs Fixed' and '🔧 Other Updates', with heading level 3, skip respectively the sections if not applicable. Format the release notes with markdown, and always use this structure: a descriptive and very short title (no more than 8 words) with heading level 2, a paragraph with a summary of changes (no header), and if applicable, sections for '🚀 New Features & Improvements', '🐛 Bugs Fixed' and '🔧 Other Updates', with heading level 3, skip respectively the sections if not applicable.
Finally include the following markdown comment with the PR merged date: <!-- PR_DATE: {TODAY} -->. Finally include the following markdown comment with the PR merged date: <!-- PR_DATE: {TODAY} -->.
Avoid being repetitive and focus on the most important changes and their impact, discard any mention of version bumps/updates, changeset files, environment variables or syntax updates. Avoid being repetitive and focus on the most important changes and their impact, discard any mention of version bumps/updates, changeset files, environment variables or syntax updates.
PR Information:""" PR Information:"""
OPENAI_PROMPT = f"""{BASE_PROMPT}
Git log summary:
{changes_summary}
Commit Messages:
{git_logs}
PR Title:
{PR_TITLE}
PR Overview:
{PR_OVERVIEW}{existing_changelog_text}
Code Diff:
{json.dumps(changes_diff)}"""
print("OpenAI Prompt")
print("----------------------------------------------------------------")
print(OPENAI_PROMPT)
# Write the prompt to GITHUB_OUTPUT # Write the prompt to GITHUB_OUTPUT
with open(GITHUB_OUTPUT, "a") as outputs_file: with open(GITHUB_OUTPUT, "a") as outputs_file:
outputs_file.write(f"BASE_PROMPT<<EOF\n{BASE_PROMPT}\nEOF") outputs_file.write(f"OPENAI_PROMPT<<EOF\n{OPENAI_PROMPT}\nEOF")

View File

@@ -96,11 +96,6 @@ jobs:
echo "version=$VERSION" echo "version=$VERSION"
echo "prev_version=$PREV_VERSION" echo "prev_version=$PREV_VERSION"
# Generate base prompt for OpenAI, GITHUB_OUTPUT: 'BASE_PROMPT'
- name: Release Notes Prompt
id: ai_prompt
run: python .github/scripts/release-notes-prompt.py
# Get previous version refs, GITHUB_OUTPUT: 'BASE_REF' and 'HEAD_REF' # Get previous version refs, GITHUB_OUTPUT: 'BASE_REF' and 'HEAD_REF'
- name: Get Previous Version Refs - name: Get Previous Version Refs
id: version_refs id: version_refs
@@ -109,7 +104,7 @@ jobs:
# Generate release notes using OpenAI if not already edited, GITHUB_OUTPUT: 'RELEASE_NOTES' and 'OPENAI_PROMPT' # Generate release notes using OpenAI if not already edited, GITHUB_OUTPUT: 'RELEASE_NOTES' and 'OPENAI_PROMPT'
- name: AI Release Notes - name: AI Release Notes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'openai-edited') }} if: ${{ !contains(github.event.pull_request.labels.*.name, 'openai-edited') }}
uses: RooVetGit/Roo-GHA/.github/actions/ai-release-notes@main uses: ./.github/actions/ai-release-notes
id: ai_release_notes id: ai_release_notes
with: with:
GHA_PAT: ${{ secrets.CROSS_REPO_ACCESS_TOKEN }} GHA_PAT: ${{ secrets.CROSS_REPO_ACCESS_TOKEN }}
@@ -118,7 +113,6 @@ jobs:
repo_path: ${{ env.REPO_PATH }} repo_path: ${{ env.REPO_PATH }}
base_ref: ${{ steps.version_refs.outputs.base_ref }} base_ref: ${{ steps.version_refs.outputs.base_ref }}
head_ref: ${{ steps.version_refs.outputs.head_ref }} head_ref: ${{ steps.version_refs.outputs.head_ref }}
custom_prompt: ${{ steps.ai_prompt.outputs.BASE_PROMPT }}
# Update CHANGELOG.md with AI-generated notes # Update CHANGELOG.md with AI-generated notes
- name: Update Changeset Changelog - name: Update Changeset Changelog

1
.gitignore vendored
View File

@@ -6,6 +6,7 @@ node_modules
.DS_Store .DS_Store
# Builds # Builds
bin
roo-cline-*.vsix roo-cline-*.vsix
# Local prompts # Local prompts

View File

@@ -1,80 +1,86 @@
# Roo Cline Changelog # Roo Cline Changelog
## 2.1.21
### Patch Changes
- 8dbd019: Larger Promp Text Input
## [2.1.20] ## [2.1.20]
- Add Gemini 2.0 - Add Gemini 2.0
## [2.1.19] ## [2.1.19]
- Better error handling for diff editing - Better error handling for diff editing
## [2.1.18] ## [2.1.18]
- Diff editing bugfix to handle Windows line endings - Diff editing bugfix to handle Windows line endings
## [2.1.17] ## [2.1.17]
- Switch to search/replace diffs in experimental diff editing mode - Switch to search/replace diffs in experimental diff editing mode
## [2.1.16] ## [2.1.16]
- Allow copying prompts from the history screen - Allow copying prompts from the history screen
## [2.1.15] ## [2.1.15]
- Incorporate dbasclpy's [PR](https://github.com/RooVetGit/Roo-Cline/pull/54) to add support for gemini-exp-1206 - Incorporate dbasclpy's [PR](https://github.com/RooVetGit/Roo-Cline/pull/54) to add support for gemini-exp-1206
- Make it clear that diff editing is very experimental - Make it clear that diff editing is very experimental
## [2.1.14] ## [2.1.14]
- Fix bug where diffs were not being applied correctly and try Aider's [unified diff prompt](https://github.com/Aider-AI/aider/blob/3995accd0ca71cea90ef76d516837f8c2731b9fe/aider/coders/udiff_prompts.py#L75-L105) - Fix bug where diffs were not being applied correctly and try Aider's [unified diff prompt](https://github.com/Aider-AI/aider/blob/3995accd0ca71cea90ef76d516837f8c2731b9fe/aider/coders/udiff_prompts.py#L75-L105)
- If diffs are enabled, automatically reject write_to_file commands that lead to truncated output - If diffs are enabled, automatically reject write_to_file commands that lead to truncated output
## [2.1.13] ## [2.1.13]
- Fix https://github.com/RooVetGit/Roo-Cline/issues/50 where sound effects were not respecting settings - Fix https://github.com/RooVetGit/Roo-Cline/issues/50 where sound effects were not respecting settings
## [2.1.12] ## [2.1.12]
- Incorporate JoziGila's [PR](https://github.com/cline/cline/pull/158) to add support for editing through diffs - Incorporate JoziGila's [PR](https://github.com/cline/cline/pull/158) to add support for editing through diffs
## [2.1.11] ## [2.1.11]
- Incorporate lloydchang's [PR](https://github.com/RooVetGit/Roo-Cline/pull/42) to add support for OpenRouter compression - Incorporate lloydchang's [PR](https://github.com/RooVetGit/Roo-Cline/pull/42) to add support for OpenRouter compression
## [2.1.10] ## [2.1.10]
- Incorporate HeavenOSK's [PR](https://github.com/cline/cline/pull/818) to add sound effects to Cline - Incorporate HeavenOSK's [PR](https://github.com/cline/cline/pull/818) to add sound effects to Cline
## [2.1.9] ## [2.1.9]
- Add instructions for using .clinerules on the settings screen - Add instructions for using .clinerules on the settings screen
## [2.1.8] ## [2.1.8]
- Roo Cline now allows configuration of which commands are allowed without approval! - Roo Cline now allows configuration of which commands are allowed without approval!
## [2.1.7] ## [2.1.7]
- Updated extension icon and metadata - Updated extension icon and metadata
## [2.1.6] ## [2.1.6]
- Roo Cline now runs in all VSCode-compatible editors - Roo Cline now runs in all VSCode-compatible editors
## [2.1.5] ## [2.1.5]
- Fix bug in browser action approval - Fix bug in browser action approval
## [2.1.4] ## [2.1.4]
- Roo Cline now can run side-by-side with Cline - Roo Cline now can run side-by-side with Cline
## [2.1.3] ## [2.1.3]
- Roo Cline now allows browser actions without approval when `alwaysAllowBrowser` is true - Roo Cline now allows browser actions without approval when `alwaysAllowBrowser` is true
## [2.1.2] ## [2.1.2]
- Support for auto-approval of write operations and command execution - Support for auto-approval of write operations and command execution
- Support for .clinerules custom instructions - Support for .clinerules custom instructions

View File

@@ -7,9 +7,9 @@ A fork of Cline, an autonomous coding agent, with some added experimental config
- Unit test coverage (written almost entirely by Roo Cline!) - Unit test coverage (written almost entirely by Roo Cline!)
- Support for playing sound effects - Support for playing sound effects
- Support for OpenRouter compression - Support for OpenRouter compression
- Support for gemini-exp-1206
- Support for copying prompts from the history screen - Support for copying prompts from the history screen
- Support for editing through diffs / handling truncated full-file edits - Support for editing through diffs / handling truncated full-file edits
- Support for newer Gemini models (gemini-exp-1206 and gemini-2.0-flash-exp)
Here's an example of Roo-Cline autonomously creating a snake game with "Always approve write operations" and "Always approve browser actions" turned on: Here's an example of Roo-Cline autonomously creating a snake game with "Always approve write operations" and "Always approve browser actions" turned on:

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{ {
"name": "roo-cline", "name": "roo-cline",
"version": "2.1.20", "version": "2.1.21",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "roo-cline", "name": "roo-cline",
"version": "2.1.20", "version": "2.1.21",
"dependencies": { "dependencies": {
"@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/bedrock-sdk": "^0.10.2",
"@anthropic-ai/sdk": "^0.26.0", "@anthropic-ai/sdk": "^0.26.0",

View File

@@ -3,7 +3,7 @@
"displayName": "Roo Cline", "displayName": "Roo Cline",
"description": "A fork of Cline, an autonomous coding agent, with some added experimental configuration and automation features.", "description": "A fork of Cline, an autonomous coding agent, with some added experimental configuration and automation features.",
"publisher": "RooVeterinaryInc", "publisher": "RooVeterinaryInc",
"version": "2.1.20", "version": "2.1.21",
"icon": "assets/icons/rocket.png", "icon": "assets/icons/rocket.png",
"galleryBanner": { "galleryBanner": {
"color": "#617A91", "color": "#617A91",
@@ -153,7 +153,7 @@
"publish": "npm run build && changeset publish && npm install --package-lock-only", "publish": "npm run build && changeset publish && npm install --package-lock-only",
"version-packages": "changeset version && npm install --package-lock-only", "version-packages": "changeset version && npm install --package-lock-only",
"vscode:prepublish": "npm run package", "vscode:prepublish": "npm run package",
"vsix": "npx vsce package --out bin", "vsix": "mkdir -p bin && npx vsce package --out bin",
"watch": "npm-run-all -p watch:*", "watch": "npm-run-all -p watch:*",
"watch:esbuild": "node esbuild.js --watch", "watch:esbuild": "node esbuild.js --watch",
"watch:tsc": "tsc --noEmit --watch --project tsconfig.json", "watch:tsc": "tsc --noEmit --watch --project tsconfig.json",
@@ -215,7 +215,7 @@
}, },
"lint-staged": { "lint-staged": {
"src/**/*.{ts,tsx}": [ "src/**/*.{ts,tsx}": [
"npx eslint -c .eslintrc.json" "npx eslint -c .eslintrc.json"
] ]
} }
} }

View File

@@ -13,6 +13,8 @@ import { MAX_IMAGES_PER_MESSAGE } from "./ChatView"
import ContextMenu from "./ContextMenu" import ContextMenu from "./ContextMenu"
import Thumbnails from "../common/Thumbnails" import Thumbnails from "../common/Thumbnails"
declare const vscode: any;
interface ChatTextAreaProps { interface ChatTextAreaProps {
inputValue: string inputValue: string
setInputValue: (value: string) => void setInputValue: (value: string) => void
@@ -427,7 +429,62 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
opacity: textAreaDisabled ? 0.5 : 1, opacity: textAreaDisabled ? 0.5 : 1,
position: "relative", position: "relative",
display: "flex", display: "flex",
}}> }}
onDrop={async (e) => {
console.log("onDrop called")
e.preventDefault()
const files = Array.from(e.dataTransfer.files)
const text = e.dataTransfer.getData("text")
if (text) {
const newValue =
inputValue.slice(0, cursorPosition) + text + inputValue.slice(cursorPosition)
setInputValue(newValue)
const newCursorPosition = cursorPosition + text.length
setCursorPosition(newCursorPosition)
setIntendedCursorPosition(newCursorPosition)
return
}
const acceptedTypes = ["png", "jpeg", "webp"]
const imageFiles = files.filter((file) => {
const [type, subtype] = file.type.split("/")
return type === "image" && acceptedTypes.includes(subtype)
})
if (!shouldDisableImages && imageFiles.length > 0) {
const imagePromises = imageFiles.map((file) => {
return new Promise<string | null>((resolve) => {
const reader = new FileReader()
reader.onloadend = () => {
if (reader.error) {
console.error("Error reading file:", reader.error)
resolve(null)
} else {
const result = reader.result
console.log("File read successfully", result)
resolve(typeof result === "string" ? result : null)
}
}
reader.readAsDataURL(file)
})
})
const imageDataArray = await Promise.all(imagePromises)
const dataUrls = imageDataArray.filter((dataUrl): dataUrl is string => dataUrl !== null)
if (dataUrls.length > 0) {
setSelectedImages((prevImages) => [...prevImages, ...dataUrls].slice(0, MAX_IMAGES_PER_MESSAGE))
if (typeof vscode !== 'undefined') {
vscode.postMessage({
type: 'draggedImages',
dataUrls: dataUrls
})
}
} else {
console.warn("No valid images were processed")
}
}
}}
onDragOver={(e) => {
e.preventDefault()
}}
>
{showContextMenu && ( {showContextMenu && (
<div ref={contextMenuContainerRef}> <div ref={contextMenuContainerRef}>
<ContextMenu <ContextMenu
@@ -508,7 +565,8 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
onHeightChange?.(height) onHeightChange?.(height)
}} }}
placeholder={placeholderText} placeholder={placeholderText}
maxRows={10} minRows={2}
maxRows={20}
autoFocus={true} autoFocus={true}
style={{ style={{
width: "100%", width: "100%",
@@ -523,7 +581,6 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
resize: "none", resize: "none",
overflowX: "hidden", overflowX: "hidden",
overflowY: "scroll", overflowY: "scroll",
scrollbarWidth: "none",
// Since we have maxRows, when text is long enough it starts to overflow the bottom padding, appearing behind the thumbnails. To fix this, we use a transparent border to push the text up instead. (https://stackoverflow.com/questions/42631947/maintaining-a-padding-inside-of-text-area/52538410#52538410) // Since we have maxRows, when text is long enough it starts to overflow the bottom padding, appearing behind the thumbnails. To fix this, we use a transparent border to push the text up instead. (https://stackoverflow.com/questions/42631947/maintaining-a-padding-inside-of-text-area/52538410#52538410)
// borderTop: "9px solid transparent", // borderTop: "9px solid transparent",
borderLeft: 0, borderLeft: 0,
@@ -560,11 +617,11 @@ const ChatTextArea = forwardRef<HTMLTextAreaElement, ChatTextAreaProps>(
<div <div
style={{ style={{
position: "absolute", position: "absolute",
right: 23, right: 28,
display: "flex", display: "flex",
alignItems: "flex-center", alignItems: "flex-end",
height: textAreaBaseHeight || 31, height: textAreaBaseHeight || 31,
bottom: 9.5, // should be 10 but doesnt look good on mac bottom: 18,
zIndex: 2, zIndex: 2,
}}> }}>
<div style={{ display: "flex", flexDirection: "row", alignItems: "center" }}> <div style={{ display: "flex", flexDirection: "row", alignItems: "center" }}>

View File

@@ -689,7 +689,7 @@ const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryVie
useEvent("wheel", handleWheel, window, { passive: true }) // passive improves scrolling performance useEvent("wheel", handleWheel, window, { passive: true }) // passive improves scrolling performance
const placeholderText = useMemo(() => { const placeholderText = useMemo(() => {
const text = task ? "Type a message (@ to add context)..." : "Type your task here (@ to add context)..." const text = task ? "Type a message...\n(@ to add context, hold shift to drag in images)" : "Type your task here...\n(@ to add context, hold shift to drag in images)"
return text return text
}, [task]) }, [task])