Add Road Trip Planner template with interactive map and trip management features

- Implemented a new HTML template for the Road Trip Planner.
- Integrated Leaflet.js for interactive mapping and routing.
- Added functionality for searching and selecting parks to include in a trip.
- Enabled drag-and-drop reordering of selected parks.
- Included trip optimization and route calculation features.
- Created a summary display for trip statistics.
- Added functionality to save trips and manage saved trips.
- Enhanced UI with responsive design and dark mode support.
This commit is contained in:
pacnpal
2025-08-15 20:53:00 -04:00
parent da7c7e3381
commit b5bae44cb8
99 changed files with 18697 additions and 4010 deletions

View File

@@ -0,0 +1,996 @@
#!/bin/bash
# ThrillWiki Complete Unraid Automation Setup
# This script automates the entire VM creation and deployment process on Unraid
#
# Usage:
# ./setup-complete-automation.sh # Standard setup
# ./setup-complete-automation.sh --reset # Delete VM and config, start completely fresh
# ./setup-complete-automation.sh --reset-vm # Delete VM only, keep configuration
# ./setup-complete-automation.sh --reset-config # Delete config only, keep VM
# Function to show help
show_help() {
echo "ThrillWiki CI/CD Automation Setup"
echo ""
echo "Usage:"
echo " $0 Set up or update ThrillWiki automation"
echo " $0 --reset Delete VM and config, start completely fresh"
echo " $0 --reset-vm Delete VM only, keep configuration"
echo " $0 --reset-config Delete config only, keep VM"
echo " $0 --help Show this help message"
echo ""
echo "Reset Options:"
echo " --reset Completely removes existing VM, disks, and config"
echo " before starting fresh installation"
echo " --reset-vm Removes only the VM and disks, preserves saved"
echo " configuration to avoid re-entering settings"
echo " --reset-config Removes only the saved configuration, preserves"
echo " VM and prompts for fresh configuration input"
echo " --help Display this help and exit"
echo ""
echo "Examples:"
echo " $0 # Normal setup/update"
echo " $0 --reset # Complete fresh installation"
echo " $0 --reset-vm # Fresh VM with saved settings"
echo " $0 --reset-config # Re-configure existing VM"
exit 0
}
# Check for help flag
if [[ "$1" == "--help" || "$1" == "-h" ]]; then
show_help
fi
# Parse reset flags
RESET_ALL=false
RESET_VM_ONLY=false
RESET_CONFIG_ONLY=false
if [[ "$1" == "--reset" ]]; then
RESET_ALL=true
echo "🔄 COMPLETE RESET MODE: Will delete VM and configuration"
elif [[ "$1" == "--reset-vm" ]]; then
RESET_VM_ONLY=true
echo "🔄 VM RESET MODE: Will delete VM only, keep configuration"
elif [[ "$1" == "--reset-config" ]]; then
RESET_CONFIG_ONLY=true
echo "🔄 CONFIG RESET MODE: Will delete configuration only, keep VM"
fi
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log() {
echo -e "${BLUE}[AUTOMATION]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
LOG_DIR="$PROJECT_DIR/logs"
# Default values
DEFAULT_UNRAID_HOST=""
DEFAULT_VM_NAME="thrillwiki-vm"
DEFAULT_VM_MEMORY="4096"
DEFAULT_VM_VCPUS="2"
DEFAULT_VM_DISK_SIZE="50"
DEFAULT_WEBHOOK_PORT="9000"
# Configuration file
CONFIG_FILE="$PROJECT_DIR/.thrillwiki-config"
# Function to save configuration
save_config() {
log "Saving configuration to $CONFIG_FILE..."
cat > "$CONFIG_FILE" << EOF
# ThrillWiki Automation Configuration
# This file stores your settings to avoid re-entering them each time
# Unraid Server Configuration
UNRAID_HOST="$UNRAID_HOST"
UNRAID_USER="$UNRAID_USER"
VM_NAME="$VM_NAME"
VM_MEMORY="$VM_MEMORY"
VM_VCPUS="$VM_VCPUS"
VM_DISK_SIZE="$VM_DISK_SIZE"
# Network Configuration
VM_IP="$VM_IP"
VM_GATEWAY="$VM_GATEWAY"
VM_NETMASK="$VM_NETMASK"
VM_NETWORK="$VM_NETWORK"
# GitHub Configuration
REPO_URL="$REPO_URL"
GITHUB_USERNAME="$GITHUB_USERNAME"
GITHUB_API_ENABLED="$GITHUB_API_ENABLED"
GITHUB_AUTH_METHOD="$GITHUB_AUTH_METHOD"
# Webhook Configuration
WEBHOOK_PORT="$WEBHOOK_PORT"
WEBHOOK_ENABLED="$WEBHOOK_ENABLED"
# SSH Configuration (path to key, not the key content)
SSH_KEY_PATH="$HOME/.ssh/thrillwiki_vm"
EOF
log_success "Configuration saved to $CONFIG_FILE"
}
# Function to load configuration
load_config() {
if [ -f "$CONFIG_FILE" ]; then
log "Loading existing configuration from $CONFIG_FILE..."
source "$CONFIG_FILE"
return 0
else
return 1
fi
}
# Function to prompt for configuration
prompt_unraid_config() {
log "=== Unraid VM Configuration ==="
echo
# Try to load existing config first
if load_config; then
log_success "Loaded existing configuration"
echo "Current settings:"
echo " Unraid Host: $UNRAID_HOST"
echo " VM Name: $VM_NAME"
echo " VM IP: $VM_IP"
echo " Repository: $REPO_URL"
echo
read -p "Use existing configuration? (y/n): " use_existing
if [ "$use_existing" = "y" ] || [ "$use_existing" = "Y" ]; then
# Still need to get sensitive info that we don't save
read -s -p "Enter Unraid [PASSWORD-REMOVED]
echo
# Handle GitHub authentication based on saved method
if [ -n "$GITHUB_USERNAME" ] && [ "$GITHUB_API_ENABLED" = "true" ]; then
if [ "$GITHUB_AUTH_METHOD" = "oauth" ]; then
# Check if OAuth token is still valid
if python3 "$SCRIPT_DIR/../github-auth.py" validate 2>/dev/null; then
GITHUB_TOKEN=$(python3 "$SCRIPT_DIR/../github-auth.py" token)
log "Using existing OAuth token"
else
log "OAuth token expired, re-authenticating..."
if python3 "$SCRIPT_DIR/../github-auth.py" login; then
GITHUB_TOKEN=$(python3 "$SCRIPT_DIR/../github-auth.py" token)
log_success "OAuth token refreshed"
else
log_error "OAuth re-authentication failed"
exit 1
fi
fi
else
# Personal access token method
read -s -p "Enter GitHub personal access token: " GITHUB_TOKEN
echo
fi
fi
if [ "$WEBHOOK_ENABLED" = "true" ]; then
read -s -p "Enter GitHub webhook secret: " WEBHOOK_SECRET
echo
fi
return 0
fi
fi
# Prompt for new configuration
read -p "Enter your Unraid server IP address: " UNRAID_HOST
save_config
read -p "Enter Unraid username (default: root): " UNRAID_USER
UNRAID_USER=${UNRAID_USER:-root}
save_config
read -s -p "Enter Unraid [PASSWORD-REMOVED]
echo
# Note: Password not saved for security
read -p "Enter VM name (default: $DEFAULT_VM_NAME): " VM_NAME
VM_NAME=${VM_NAME:-$DEFAULT_VM_NAME}
save_config
read -p "Enter VM memory in MB (default: $DEFAULT_VM_MEMORY): " VM_MEMORY
VM_MEMORY=${VM_MEMORY:-$DEFAULT_VM_MEMORY}
save_config
read -p "Enter VM vCPUs (default: $DEFAULT_VM_VCPUS): " VM_VCPUS
VM_VCPUS=${VM_VCPUS:-$DEFAULT_VM_VCPUS}
save_config
read -p "Enter VM disk size in GB (default: $DEFAULT_VM_DISK_SIZE): " VM_DISK_SIZE
VM_DISK_SIZE=${VM_DISK_SIZE:-$DEFAULT_VM_DISK_SIZE}
save_config
read -p "Enter GitHub repository URL: " REPO_URL
save_config
# GitHub API Configuration
echo
log "=== GitHub API Configuration ==="
echo "Choose GitHub authentication method:"
echo "1. OAuth Device Flow (recommended - secure, supports private repos)"
echo "2. Personal Access Token (manual token entry)"
echo "3. Skip (public repositories only)"
while true; do
read -p "Select option (1-3): " auth_choice
case $auth_choice in
1)
log "Using GitHub OAuth Device Flow..."
if python3 "$SCRIPT_DIR/../github-auth.py" validate 2>/dev/null; then
log "Existing GitHub authentication found and valid"
GITHUB_USERNAME=$(python3 "$SCRIPT_DIR/../github-auth.py" whoami 2>/dev/null | grep "You are authenticated as:" | cut -d: -f2 | xargs)
GITHUB_TOKEN=$(python3 "$SCRIPT_DIR/../github-auth.py" token)
else
log "Starting GitHub OAuth authentication..."
if python3 "$SCRIPT_DIR/../github-auth.py" login; then
GITHUB_USERNAME=$(python3 "$SCRIPT_DIR/../github-auth.py" whoami 2>/dev/null | grep "You are authenticated as:" | cut -d: -f2 | xargs)
GITHUB_TOKEN=$(python3 "$SCRIPT_DIR/../github-auth.py" token)
log_success "GitHub OAuth authentication completed"
else
log_error "GitHub authentication failed"
continue
fi
fi
GITHUB_API_ENABLED=true
GITHUB_AUTH_METHOD="oauth"
break
;;
2)
read -p "Enter GitHub username: " GITHUB_USERNAME
read -s -p "Enter GitHub personal access token: " GITHUB_TOKEN
echo
if [ -n "$GITHUB_USERNAME" ] && [ -n "$GITHUB_TOKEN" ]; then
GITHUB_API_ENABLED=true
GITHUB_AUTH_METHOD="token"
log "Personal access token configured"
else
log_error "Both username and token are required"
continue
fi
break
;;
3)
GITHUB_USERNAME=""
GITHUB_TOKEN=""
GITHUB_API_ENABLED=false
GITHUB_AUTH_METHOD="none"
log "Skipping GitHub API - using public access only"
break
;;
*)
echo "Invalid option. Please select 1, 2, or 3."
;;
esac
done
# Save GitHub configuration
save_config
log "GitHub authentication configuration saved"
# Webhook Configuration
echo
read -s -p "Enter GitHub webhook secret (optional, press Enter to skip): " WEBHOOK_SECRET
echo
# If no webhook secret provided, disable webhook functionality
if [ -z "$WEBHOOK_SECRET" ]; then
log "No webhook secret provided - webhook functionality will be disabled"
WEBHOOK_ENABLED=false
else
WEBHOOK_ENABLED=true
fi
read -p "Enter webhook port (default: $DEFAULT_WEBHOOK_PORT): " WEBHOOK_PORT
WEBHOOK_PORT=${WEBHOOK_PORT:-$DEFAULT_WEBHOOK_PORT}
# Save webhook configuration
save_config
log "Webhook configuration saved"
# Get VM IP address with proper range validation
while true; do
read -p "Enter VM IP address (192.168.20.10-192.168.20.100): " VM_IP
if [[ "$VM_IP" =~ ^192\.168\.20\.([1-9][0-9]|100)$ ]]; then
local ip_last_octet="${BASH_REMATCH[1]}"
if [ "$ip_last_octet" -ge 10 ] && [ "$ip_last_octet" -le 100 ]; then
break
fi
fi
echo "Invalid IP address. Please enter an IP in the range 192.168.20.10-192.168.20.100"
done
# Set network configuration
VM_GATEWAY="192.168.20.1"
VM_NETMASK="255.255.255.0"
VM_NETWORK="192.168.20.0/24"
# Save final network configuration
save_config
log "Network configuration saved - setup complete!"
}
# Generate SSH keys for VM access
setup_ssh_keys() {
log "Setting up SSH keys for VM access..."
local ssh_key_path="$HOME/.ssh/thrillwiki_vm"
local ssh_config_path="$HOME/.ssh/config"
if [ ! -f "$ssh_key_path" ]; then
ssh-keygen -t rsa -b 4096 -f "$ssh_key_path" -N "" -C "thrillwiki-vm-access"
log_success "SSH key generated: $ssh_key_path"
else
log "SSH key already exists: $ssh_key_path"
fi
# Add SSH config entry
if ! grep -q "Host $VM_NAME" "$ssh_config_path" 2>/dev/null; then
cat >> "$ssh_config_path" << EOF
# ThrillWiki VM
Host $VM_NAME
HostName %h
User ubuntu
IdentityFile $ssh_key_path
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
EOF
log_success "SSH config updated"
fi
# Store public key for VM setup
SSH_PUBLIC_KEY=$(cat "$ssh_key_path.pub")
export SSH_PUBLIC_KEY
}
# Setup Unraid host access
setup_unraid_access() {
log "Setting up Unraid server access..."
local unraid_key_path="$HOME/.ssh/unraid_access"
if [ ! -f "$unraid_key_path" ]; then
ssh-keygen -t rsa -b 4096 -f "$unraid_key_path" -N "" -C "unraid-access"
log "Please add this public key to your Unraid server:"
echo "---"
cat "$unraid_key_path.pub"
echo "---"
echo
log "Add this to /root/.ssh/***REMOVED*** on your Unraid server"
read -p "Press Enter when you've added the key..."
fi
# Test Unraid connection
log "Testing Unraid connection..."
if ssh -i "$unraid_key_path" -o ConnectTimeout=5 -o StrictHostKeyChecking=no "$UNRAID_USER@$UNRAID_HOST" "echo 'Connected to Unraid successfully'"; then
log_success "Unraid connection test passed"
else
log_error "Unraid connection test failed"
exit 1
fi
# Update SSH config for Unraid
if ! grep -q "Host unraid" "$HOME/.ssh/config" 2>/dev/null; then
cat >> "$HOME/.ssh/config" << EOF
# Unraid Server
Host unraid
HostName $UNRAID_HOST
User $UNRAID_USER
IdentityFile $unraid_key_path
StrictHostKeyChecking no
EOF
fi
}
# Create environment files
create_environment_files() {
log "Creating environment configuration files..."
# Get SSH public key content safely
local ssh_key_path="$HOME/.ssh/thrillwiki_vm.pub"
local ssh_public_key=""
if [ -f "$ssh_key_path" ]; then
ssh_public_key=$(cat "$ssh_key_path")
fi
# Unraid VM environment
cat > "$PROJECT_DIR/***REMOVED***.unraid" << EOF
# Unraid VM Configuration
UNRAID_HOST=$UNRAID_HOST
UNRAID_USER=$UNRAID_USER
UNRAID_PASSWORD=$UNRAID_PASSWORD
VM_NAME=$VM_NAME
VM_MEMORY=$VM_MEMORY
VM_VCPUS=$VM_VCPUS
VM_DISK_SIZE=$VM_DISK_SIZE
SSH_PUBLIC_KEY="$ssh_public_key"
# Network Configuration
VM_IP=$VM_IP
VM_GATEWAY=$VM_GATEWAY
VM_NETMASK=$VM_NETMASK
VM_NETWORK=$VM_NETWORK
# GitHub Configuration
REPO_URL=$REPO_URL
GITHUB_USERNAME=$GITHUB_USERNAME
GITHUB_TOKEN=$GITHUB_TOKEN
GITHUB_API_ENABLED=$GITHUB_API_ENABLED
EOF
# Webhook environment (updated with VM info)
cat > "$PROJECT_DIR/***REMOVED***.webhook" << EOF
# ThrillWiki Webhook Configuration
WEBHOOK_PORT=$WEBHOOK_PORT
WEBHOOK_SECRET=$WEBHOOK_SECRET
WEBHOOK_ENABLED=$WEBHOOK_ENABLED
VM_HOST=$VM_IP
VM_PORT=22
VM_USER=ubuntu
VM_KEY_PATH=$HOME/.ssh/thrillwiki_vm
VM_PROJECT_PATH=/home/ubuntu/thrillwiki
REPO_URL=$REPO_URL
DEPLOY_BRANCH=main
# GitHub API Configuration
GITHUB_USERNAME=$GITHUB_USERNAME
GITHUB_TOKEN=$GITHUB_TOKEN
GITHUB_API_ENABLED=$GITHUB_API_ENABLED
EOF
log_success "Environment files created"
}
# Install required tools
install_dependencies() {
log "Installing required dependencies..."
# Check for required tools
local missing_tools=()
local mac_tools=()
command -v python3 >/dev/null 2>&1 || missing_tools+=("python3")
command -v ssh >/dev/null 2>&1 || missing_tools+=("openssh-client")
command -v scp >/dev/null 2>&1 || missing_tools+=("openssh-client")
# Check for ISO creation tools and handle platform differences
if ! command -v genisoimage >/dev/null 2>&1 && ! command -v mkisofs >/dev/null 2>&1 && ! command -v hdiutil >/dev/null 2>&1; then
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
missing_tools+=("genisoimage")
elif [[ "$OSTYPE" == "darwin"* ]]; then
# On macOS, hdiutil should be available, but add cdrtools as backup
if command -v brew >/dev/null 2>&1; then
mac_tools+=("cdrtools")
fi
fi
fi
# Install Linux packages
if [ ${#missing_tools[@]} -gt 0 ]; then
log "Installing missing tools for Linux: ${missing_tools[*]}"
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get update
sudo apt-get install -y "${missing_tools[@]}"
elif command -v yum >/dev/null 2>&1; then
sudo yum install -y "${missing_tools[@]}"
elif command -v dnf >/dev/null 2>&1; then
sudo dnf install -y "${missing_tools[@]}"
else
log_error "Linux package manager not found. Please install: ${missing_tools[*]}"
exit 1
fi
fi
# Install macOS packages
if [ ${#mac_tools[@]} -gt 0 ]; then
log "Installing additional tools for macOS: ${mac_tools[*]}"
if command -v brew >/dev/null 2>&1; then
brew install "${mac_tools[@]}"
else
log "Homebrew not found. Skipping optional tool installation."
log "Note: hdiutil should be available on macOS for ISO creation"
fi
fi
# Install Python dependencies
if [ -f "$PROJECT_DIR/pyproject.toml" ]; then
log "Installing Python dependencies with UV..."
if ! command -v uv >/dev/null 2>&1; then
curl -LsSf https://astral.sh/uv/install.sh | sh
source ~/.cargo/env
fi
uv sync
fi
log_success "Dependencies installed"
}
# Create VM using the VM manager
create_vm() {
log "Creating VM on Unraid server..."
# Export all environment variables from the file
set -a # automatically export all variables
source "$PROJECT_DIR/***REMOVED***.unraid"
set +a # turn off automatic export
# Run VM creation/update
cd "$PROJECT_DIR"
python3 scripts/unraid/vm-manager.py setup
if [ $? -eq 0 ]; then
log_success "VM created/updated successfully"
# Start the VM
log "Starting VM..."
python3 scripts/unraid/vm-manager.py start
if [ $? -eq 0 ]; then
log_success "VM started successfully"
else
log_error "VM failed to start"
exit 1
fi
else
log_error "VM creation/update failed"
exit 1
fi
}
# Wait for VM to be ready and get IP
wait_for_vm() {
log "Waiting for VM to be ready..."
sleep 120
# Export all environment variables from the file
set -a # automatically export all variables
source "$PROJECT_DIR/***REMOVED***.unraid"
set +a # turn off automatic export
local max_attempts=60
local attempt=1
while [ $attempt -le $max_attempts ]; do
VM_IP=$(python3 scripts/unraid/vm-manager.py ip 2>/dev/null | grep "VM IP:" | cut -d' ' -f3)
if [ -n "$VM_IP" ]; then
log_success "VM is ready with IP: $VM_IP"
# Update SSH config with actual IP
sed -i.bak "s/HostName %h/HostName $VM_IP/" "$HOME/.ssh/config"
# Update webhook environment with IP
sed -i.bak "s/VM_HOST=$VM_NAME/VM_HOST=$VM_IP/" "$PROJECT_DIR/***REMOVED***.webhook"
return 0
fi
log "Waiting for VM to get IP... (attempt $attempt/$max_attempts)"
sleep 30
((attempt++))
done
log_error "VM failed to get IP address"
exit 1
}
# Configure VM for ThrillWiki
configure_vm() {
log "Configuring VM for ThrillWiki deployment..."
local vm_setup_script="/tmp/vm_thrillwiki_setup.sh"
# Create VM setup script
cat > "$vm_setup_script" << 'EOF'
#!/bin/bash
set -e
echo "Setting up VM for ThrillWiki..."
# Update system
sudo apt update && sudo apt upgrade -y
# Install required packages
sudo apt install -y git curl build-essential python3-pip lsof postgresql postgresql-contrib nginx
# Install UV
curl -LsSf https://astral.sh/uv/install.sh | sh
source ~/.cargo/env
# Configure PostgreSQL
sudo -u postgres psql << PSQL
CREATE DATABASE thrillwiki;
CREATE USER thrillwiki_user WITH ENCRYPTED PASSWORD 'thrillwiki_pass';
GRANT ALL PRIVILEGES ON DATABASE thrillwiki TO thrillwiki_user;
\q
PSQL
# Clone repository
git clone REPO_URL_PLACEHOLDER thrillwiki
cd thrillwiki
# Install dependencies
~/.cargo/bin/uv sync
# Create directories
mkdir -p logs backups
# Make scripts executable
chmod +x scripts/*.sh
# Run initial setup
~/.cargo/bin/uv run manage.py migrate
~/.cargo/bin/uv run manage.py collectstatic --noinput
# Install systemd services
sudo cp scripts/systemd/thrillwiki.service /etc/systemd/system/
sudo sed -i 's|/home/ubuntu|/home/ubuntu|g' /etc/systemd/system/thrillwiki.service
sudo systemctl daemon-reload
sudo systemctl enable thrillwiki.service
echo "VM setup completed!"
EOF
# Replace placeholder with actual repo URL
sed -i "s|REPO_URL_PLACEHOLDER|$REPO_URL|g" "$vm_setup_script"
# Copy and execute setup script on VM
scp "$vm_setup_script" "$VM_NAME:/tmp/"
ssh "$VM_NAME" "bash /tmp/vm_thrillwiki_setup.sh"
# Cleanup
rm "$vm_setup_script"
log_success "VM configured for ThrillWiki"
}
# Start services
start_services() {
log "Starting ThrillWiki services..."
# Start VM service
ssh "$VM_NAME" "sudo systemctl start thrillwiki"
# Verify service is running
if ssh "$VM_NAME" "systemctl is-active --quiet thrillwiki"; then
log_success "ThrillWiki service started successfully"
else
log_error "Failed to start ThrillWiki service"
exit 1
fi
# Get service status
log "Service status:"
ssh "$VM_NAME" "systemctl status thrillwiki --no-pager -l"
}
# Setup webhook listener
setup_webhook_listener() {
log "Setting up webhook listener..."
# Create webhook start script
cat > "$PROJECT_DIR/start-webhook.sh" << 'EOF'
#!/bin/bash
cd "$(dirname "$0")"
source ***REMOVED***.webhook
python3 scripts/webhook-listener.py
EOF
chmod +x "$PROJECT_DIR/start-webhook.sh"
log_success "Webhook listener configured"
log "You can start the webhook listener with: ./start-webhook.sh"
}
# Perform end-to-end test
test_deployment() {
log "Performing end-to-end deployment test..."
# Test VM connectivity
if ssh "$VM_NAME" "echo 'VM connectivity test passed'"; then
log_success "VM connectivity test passed"
else
log_error "VM connectivity test failed"
return 1
fi
# Test ThrillWiki service
if ssh "$VM_NAME" "curl -f http://localhost:8000 >/dev/null 2>&1"; then
log_success "ThrillWiki service test passed"
else
log_warning "ThrillWiki service test failed - checking logs..."
ssh "$VM_NAME" "journalctl -u thrillwiki --no-pager -l | tail -20"
fi
# Test deployment script
log "Testing deployment script..."
ssh "$VM_NAME" "cd thrillwiki && ./scripts/vm-deploy.sh status"
log_success "End-to-end test completed"
}
# Generate final instructions
generate_instructions() {
log "Generating final setup instructions..."
cat > "$PROJECT_DIR/UNRAID_SETUP_COMPLETE.md" << EOF
# ThrillWiki Unraid Automation - Setup Complete! 🎉
Your ThrillWiki CI/CD system has been fully automated and deployed!
## VM Information
- **VM Name**: $VM_NAME
- **VM IP**: $VM_IP
- **SSH Access**: \`ssh $VM_NAME\`
## Services Status
- **ThrillWiki Service**: Running on VM
- **Database**: PostgreSQL configured
- **Web Server**: Available at http://$VM_IP:8000
## Next Steps
### 1. Start Webhook Listener
\`\`\`bash
./start-webhook.sh
\`\`\`
### 2. Configure GitHub Webhook
- Go to your repository: $REPO_URL
- Settings → Webhooks → Add webhook
- **Payload URL**: http://YOUR_PUBLIC_IP:$WEBHOOK_PORT/webhook
- **Content type**: application/json
- **Secret**: (your webhook secret)
- **Events**: Just the push event
### 3. Test the System
\`\`\`bash
# Test VM connection
ssh $VM_NAME
# Test service status
ssh $VM_NAME "systemctl status thrillwiki"
# Test manual deployment
ssh $VM_NAME "cd thrillwiki && ./scripts/vm-deploy.sh"
# Make a test commit to trigger automatic deployment
git add .
git commit -m "Test automated deployment"
git push origin main
\`\`\`
## Management Commands
### VM Management
\`\`\`bash
# Check VM status
python3 scripts/unraid/vm-manager.py status
# Start/stop VM
python3 scripts/unraid/vm-manager.py start
python3 scripts/unraid/vm-manager.py stop
# Get VM IP
python3 scripts/unraid/vm-manager.py ip
\`\`\`
### Service Management on VM
\`\`\`bash
# Check service status
ssh $VM_NAME "./scripts/vm-deploy.sh status"
# Restart service
ssh $VM_NAME "./scripts/vm-deploy.sh restart"
# View logs
ssh $VM_NAME "journalctl -u thrillwiki -f"
\`\`\`
## Troubleshooting
### Common Issues
1. **VM not accessible**: Check VM is running and has IP
2. **Service not starting**: Check logs with \`journalctl -u thrillwiki\`
3. **Webhook not working**: Verify port $WEBHOOK_PORT is open
### Support Files
- Configuration: \`***REMOVED***.unraid\`, \`***REMOVED***.webhook\`
- Logs: \`logs/\` directory
- Documentation: \`docs/VM_DEPLOYMENT_SETUP.md\`
**Your automated CI/CD system is now ready!** 🚀
Every push to the main branch will automatically deploy to your VM.
EOF
log_success "Setup instructions saved to UNRAID_SETUP_COMPLETE.md"
}
# Main automation function
main() {
log "🚀 Starting ThrillWiki Complete Unraid Automation"
echo "[AWS-SECRET-REMOVED]=========="
echo
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--reset)
RESET_ALL=true
shift
;;
--reset-vm)
RESET_VM_ONLY=true
shift
;;
--reset-config)
RESET_CONFIG_ONLY=true
shift
;;
--help|-h)
show_help
exit 0
;;
*)
echo "Unknown option: $1"
show_help
exit 1
;;
esac
done
# Create logs directory
mkdir -p "$LOG_DIR"
# Handle reset modes
if [[ "$RESET_ALL" == "true" ]]; then
log "🔄 Complete reset mode - deleting VM and configuration"
echo
# Load configuration first to get connection details for VM deletion
if [[ -f "$CONFIG_FILE" ]]; then
source "$CONFIG_FILE"
log_success "Loaded existing configuration for VM deletion"
else
log_warning "No configuration file found, will skip VM deletion"
fi
# Delete existing VM if config exists
if [[ -f "$CONFIG_FILE" ]]; then
log "🗑️ Deleting existing VM..."
# Export environment variables for VM manager
set -a
source "$PROJECT_DIR/***REMOVED***.unraid" 2>/dev/null || true
set +a
if python3 "$(dirname "$0")/vm-manager.py" delete; then
log_success "VM deleted successfully"
else
log "⚠️ VM deletion failed or VM didn't exist"
fi
fi
# Remove configuration files
if [[ -f "$CONFIG_FILE" ]]; then
rm "$CONFIG_FILE"
log_success "Configuration file removed"
fi
# Remove environment files
rm -f "$PROJECT_DIR/***REMOVED***.unraid" "$PROJECT_DIR/***REMOVED***.webhook"
log_success "Environment files removed"
log_success "Complete reset finished - continuing with fresh setup"
echo
elif [[ "$RESET_VM_ONLY" == "true" ]]; then
log "🔄 VM-only reset mode - deleting VM, preserving configuration"
echo
# Load configuration to get connection details
if [[ -f "$CONFIG_FILE" ]]; then
source "$CONFIG_FILE"
log_success "Loaded existing configuration"
else
log_error "No configuration file found. Cannot reset VM without connection details."
echo " Run the script without reset flags first to create initial configuration."
exit 1
fi
# Delete existing VM
log "🗑️ Deleting existing VM..."
# Export environment variables for VM manager
set -a
source "$PROJECT_DIR/***REMOVED***.unraid" 2>/dev/null || true
set +a
if python3 "$(dirname "$0")/vm-manager.py" delete; then
log_success "VM deleted successfully"
else
log "⚠️ VM deletion failed or VM didn't exist"
fi
# Remove only environment files, keep main config
rm -f "$PROJECT_DIR/***REMOVED***.unraid" "$PROJECT_DIR/***REMOVED***.webhook"
log_success "Environment files removed, configuration preserved"
log_success "VM reset complete - will recreate VM with saved configuration"
echo
elif [[ "$RESET_CONFIG_ONLY" == "true" ]]; then
log "🔄 Config-only reset mode - deleting configuration, preserving VM"
echo
# Remove configuration files
if [[ -f "$CONFIG_FILE" ]]; then
rm "$CONFIG_FILE"
log_success "Configuration file removed"
fi
# Remove environment files
rm -f "$PROJECT_DIR/***REMOVED***.unraid" "$PROJECT_DIR/***REMOVED***.webhook"
log_success "Environment files removed"
log_success "Configuration reset complete - will prompt for fresh configuration"
echo
fi
# Collect configuration
prompt_unraid_config
# Setup steps
setup_ssh_keys
setup_unraid_access
create_environment_files
install_dependencies
create_vm
wait_for_vm
configure_vm
start_services
setup_webhook_listener
test_deployment
generate_instructions
echo
log_success "🎉 Complete automation setup finished!"
echo
log "Your ThrillWiki VM is running at: http://$VM_IP:8000"
log "Start the webhook listener: ./start-webhook.sh"
log "See UNRAID_SETUP_COMPLETE.md for detailed instructions"
echo
log "The system will now automatically deploy when you push to GitHub!"
}
# Run main function and log output
main "$@" 2>&1 | tee "$LOG_DIR/unraid-automation.log"

861
scripts/unraid/vm-manager.py Executable file
View File

@@ -0,0 +1,861 @@
#!/usr/bin/env python3
"""
Unraid VM Manager for ThrillWiki
This script automates VM creation, configuration, and management on Unraid.
"""
import os
import sys
import json
import time
import logging
import requests
import subprocess
from pathlib import Path
from typing import Dict, Optional, List
# Configuration
UNRAID_HOST = os***REMOVED***iron.get('UNRAID_HOST', 'localhost')
UNRAID_USER = os***REMOVED***iron.get('UNRAID_USER', 'root')
UNRAID_PASSWORD = os***REMOVED***iron.get('UNRAID_PASSWORD', '')
VM_NAME = os***REMOVED***iron.get('VM_NAME', 'thrillwiki-vm')
VM_TEMPLATE = os***REMOVED***iron.get('VM_TEMPLATE', 'Ubuntu Server 22.04')
VM_MEMORY = int(os***REMOVED***iron.get('VM_MEMORY', 4096)) # MB
VM_VCPUS = int(os***REMOVED***iron.get('VM_VCPUS', 2))
VM_DISK_SIZE = int(os***REMOVED***iron.get('VM_DISK_SIZE', 50)) # GB
SSH_PUBLIC_KEY = os***REMOVED***iron.get('SSH_PUBLIC_KEY', '')
# Network Configuration
VM_IP = os***REMOVED***iron.get('VM_IP', '192.168.20.20')
VM_GATEWAY = os***REMOVED***iron.get('VM_GATEWAY', '192.168.20.1')
VM_NETMASK = os***REMOVED***iron.get('VM_NETMASK', '255.255.255.0')
VM_NETWORK = os***REMOVED***iron.get('VM_NETWORK', '192.168.20.0/24')
# GitHub Configuration
REPO_URL = os***REMOVED***iron.get('REPO_URL', '')
GITHUB_USERNAME = os***REMOVED***iron.get('GITHUB_USERNAME', '')
GITHUB_TOKEN = os***REMOVED***iron.get('GITHUB_TOKEN', '')
GITHUB_API_ENABLED = os***REMOVED***iron.get(
'GITHUB_API_ENABLED', 'false').lower() == 'true'
# Setup logging
os.makedirs('logs', exist_ok=True)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('logs/unraid-vm.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
class UnraidVMManager:
"""Manages VMs on Unraid server."""
def __init__(self):
self.session = requests.Session()
self.base_url = f"http://{UNRAID_HOST}"
self.vm_config_path = f"/mnt/user/domains/{VM_NAME}"
def authenticate(self) -> bool:
"""Authenticate with Unraid server."""
try:
login_url = f"{self.base_url}/login"
login_data = {
'username': UNRAID_USER,
'password': UNRAID_PASSWORD
}
response = self.session.post(login_url, data=login_data)
if response.status_code == 200:
logger.info("Successfully authenticated with Unraid")
return True
else:
logger.error(f"Authentication failed: {response.status_code}")
return False
except Exception as e:
logger.error(f"Authentication error: {e}")
return False
def check_vm_exists(self) -> bool:
"""Check if VM already exists."""
try:
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh list --all | grep {VM_NAME}'",
shell=True,
capture_output=True,
text=True
)
return VM_NAME in result.stdout
except Exception as e:
logger.error(f"Error checking VM existence: {e}")
return False
def create_vm_xml(self, existing_uuid: str = None) -> str:
"""Generate VM XML configuration."""
import uuid
vm_uuid = existing_uuid if existing_uuid else str(uuid.uuid4())
xml_template = f"""<?xml version='1.0' encoding='UTF-8'?>
<domain type='kvm'>
<name>{VM_NAME}</name>
<uuid>{vm_uuid}</uuid>
<metadata>
<vmtemplate xmlns="unraid" name="Windows 10" iconold="ubuntu.png" icon="ubuntu.png" os="linux" webui=""/>
</metadata>
<memory unit='KiB'>{VM_MEMORY * 1024}</memory>
<currentMemory unit='KiB'>{VM_MEMORY * 1024}</currentMemory>
<vcpu placement='static'>{VM_VCPUS}</vcpu>
<os>
<type arch='x86_64' machine='pc-q35-9.2'>hvm</type>
<loader readonly='yes' type='pflash' format='raw'>/usr/share/qemu/ovmf-x64/OVMF_CODE-pure-efi.fd</loader>
<nvram format='raw'>/etc/libvirt/qemu/nvram/{vm_uuid}_VARS-pure-efi.fd</nvram>
</os>
<features>
<acpi/>
<apic/>
<vmport state='off'/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'>
<topology sockets='1' dies='1' clusters='1' cores='{VM_VCPUS // 2 if VM_VCPUS > 1 else 1}' threads='{2 if VM_VCPUS > 1 else 1}'/>
<cache mode='passthrough'/>
<feature policy='require' name='topoext'/>
</cpu>
<clock offset='utc'>
<timer name='hpet' present='no'/>
<timer name='hypervclock' present='yes'/>
<timer name='pit' tickpolicy='delay'/>
<timer name='rtc' tickpolicy='catchup'/>
</clock>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<pm>
<suspend-to-mem enabled='no'/>
<suspend-to-disk enabled='no'/>
</pm>
<devices>
<emulator>/usr/local/sbin/qemu</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='writeback' discard='ignore'/>
<source file='/mnt/user/domains/{VM_NAME}/vdisk1.qcow2'/>
<target dev='hdc' bus='virtio'/>
<boot order='2'/>
<address type='pci' domain='0x0000' bus='0x02' slot='0x00' function='0x0'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/mnt/user/isos/ubuntu-24.04.3-live-server-amd64.iso'/>
<target dev='hda' bus='sata'/>
<readonly/>
<boot order='1'/>
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/mnt/user/isos/{VM_NAME}-cloud-init.iso'/>
<target dev='hdb' bus='sata'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='1'/>
</disk>
<controller type='usb' index='0' model='qemu-xhci' ports='15'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</controller>
<controller type='pci' index='0' model='pcie-root'/>
<controller type='pci' index='1' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='1' port='0x10'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0' multifunction='on'/>
</controller>
<controller type='pci' index='2' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='2' port='0x11'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x1'/>
</controller>
<controller type='pci' index='3' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='3' port='0x12'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x2'/>
</controller>
<controller type='pci' index='4' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='4' port='0x13'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x3'/>
</controller>
<controller type='pci' index='5' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='5' port='0x14'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x4'/>
</controller>
<controller type='virtio-serial' index='0'>
<address type='pci' domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
</controller>
<controller type='sata' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x1f' function='0x2'/>
</controller>
<interface type='bridge'>
<mac address='52:54:00:{":".join([f"{int(VM_IP.split('.')[3]):02x}", "7d", "fd"])}'/>
<source bridge='br0.20'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<channel type='unix'>
<target type='virtio' name='org.qemu.guest_agent.0'/>
<address type='virtio-serial' controller='0' bus='0' port='1'/>
</channel>
<input type='tablet' bus='usb'>
<address type='usb' bus='0' port='1'/>
</input>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes' websocket='-1' listen='0.0.0.0' sharePolicy='ignore'>
<listen type='address' address='0.0.0.0'/>
</graphics>
<audio id='1' type='none'/>
<video>
<model type='qxl' ram='65536' vram='65536' vram64='65535' vgamem='65536' heads='1' primary='yes'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x1e' function='0x0'/>
</video>
<watchdog model='itco' action='reset'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x05' slot='0x00' function='0x0'/>
</memballoon>
</devices>
</domain>"""
return xml_template.strip()
def create_vm(self) -> bool:
"""Create or update the VM on Unraid."""
try:
vm_exists = self.check_vm_exists()
if vm_exists:
logger.info(
f"VM {VM_NAME} already exists, updating configuration...")
# Stop VM if running before updating
if self.vm_status() == "running":
logger.info(
f"Stopping VM {VM_NAME} for configuration update...")
self.stop_vm()
# Wait for VM to stop
import time
time.sleep(5)
else:
logger.info(f"Creating VM {VM_NAME}...")
# Create VM directory
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'mkdir -p {self.vm_config_path}'",
shell=True,
check=True
)
# Create virtual disk only if VM doesn't exist
disk_cmd = f"""
ssh {UNRAID_USER}@{UNRAID_HOST} 'qemu-img create -f qcow2 {self.vm_config_path}/vdisk1.qcow2 {VM_DISK_SIZE}G'
"""
subprocess.run(disk_cmd, shell=True, check=True)
# Create cloud-init ISO for automated installation and ThrillWiki deployment
logger.info(
"Creating cloud-init ISO for automated Ubuntu and ThrillWiki setup...")
if not self.create_cloud_init_iso(VM_IP):
logger.error("Failed to create cloud-init ISO")
return False
existing_uuid = None
if vm_exists:
# Get existing VM UUID
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh dumpxml {VM_NAME} | grep \"<uuid>\" | sed \"s/<uuid>//g\" | sed \"s/<\\/uuid>//g\" | tr -d \" \"'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0 and result.stdout.strip():
existing_uuid = result.stdout.strip()
logger.info(f"Found existing VM UUID: {existing_uuid}")
# Always undefine existing VM with NVRAM flag (since we create persistent VMs)
logger.info(
f"VM {VM_NAME} exists, undefining with NVRAM for reconfiguration...")
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh undefine {VM_NAME} --nvram'",
shell=True,
check=True
)
logger.info(
f"VM {VM_NAME} undefined for reconfiguration (with NVRAM)")
# Generate VM XML with appropriate UUID
vm_xml = self.create_vm_xml(existing_uuid)
xml_file = f"/tmp/{VM_NAME}.xml"
with open(xml_file, 'w') as f:
f.write(vm_xml)
# Copy XML to Unraid and define/redefine VM
subprocess.run(
f"scp {xml_file} {UNRAID_USER}@{UNRAID_HOST}:/tmp/",
shell=True,
check=True
)
# Define VM as persistent domain
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh define /tmp/{VM_NAME}.xml'",
shell=True,
check=True
)
# Ensure VM is set to autostart for persistent configuration
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh autostart {VM_NAME}'",
shell=True,
check=False # Don't fail if autostart is already enabled
)
action = "updated" if vm_exists else "created"
logger.info(f"VM {VM_NAME} {action} successfully")
# Cleanup
os.remove(xml_file)
return True
except Exception as e:
logger.error(f"Failed to create VM: {e}")
return False
def create_nvram_file(self, vm_uuid: str) -> bool:
"""Create NVRAM file for UEFI VM."""
try:
nvram_path = f"/etc/libvirt/qemu/nvram/{vm_uuid}_VARS-pure-efi.fd"
# Check if NVRAM file already exists
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'test -f {nvram_path}'",
shell=True,
capture_output=True
)
if result.returncode == 0:
logger.info(f"NVRAM file already exists: {nvram_path}")
return True
# Copy template to create NVRAM file
logger.info(f"Creating NVRAM file: {nvram_path}")
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'cp /usr/share/qemu/ovmf-x64/OVMF_VARS-pure-efi.fd {nvram_path}'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0:
logger.info("NVRAM file created successfully")
return True
else:
logger.error(f"Failed to create NVRAM file: {result.stderr}")
return False
except Exception as e:
logger.error(f"Error creating NVRAM file: {e}")
return False
def start_vm(self) -> bool:
"""Start the VM if it's not already running."""
try:
# Check if VM is already running
current_status = self.vm_status()
if current_status == "running":
logger.info(f"VM {VM_NAME} is already running")
return True
logger.info(f"Starting VM {VM_NAME}...")
# For new VMs, we need to extract the UUID and create NVRAM file
vm_exists = self.check_vm_exists()
if not vm_exists:
logger.error("Cannot start VM that doesn't exist")
return False
# Get VM UUID from XML
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh dumpxml {VM_NAME} | grep \"<uuid>\" | sed \"s/<uuid>//g\" | sed \"s/<\\/uuid>//g\" | tr -d \" \"'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0 and result.stdout.strip():
vm_uuid = result.stdout.strip()
logger.info(f"VM UUID: {vm_uuid}")
# Create NVRAM file if it doesn't exist
if not self.create_nvram_file(vm_uuid):
return False
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh start {VM_NAME}'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0:
logger.info(f"VM {VM_NAME} started successfully")
return True
else:
logger.error(f"Failed to start VM: {result.stderr}")
return False
except Exception as e:
logger.error(f"Error starting VM: {e}")
return False
def stop_vm(self) -> bool:
"""Stop the VM."""
try:
logger.info(f"Stopping VM {VM_NAME}...")
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh shutdown {VM_NAME}'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0:
logger.info(f"VM {VM_NAME} stopped successfully")
return True
else:
logger.error(f"Failed to stop VM: {result.stderr}")
return False
except Exception as e:
logger.error(f"Error stopping VM: {e}")
return False
def get_vm_ip(self) -> Optional[str]:
"""Get VM IP address."""
try:
# Wait for VM to get IP
for attempt in range(30):
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh domifaddr {VM_NAME}'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0 and 'ipv4' in result.stdout:
lines = result.stdout.strip().split('\n')
for line in lines:
if 'ipv4' in line:
# Extract IP from line like: vnet0 52:54:00:xx:xx:xx ipv4 192.168.1.100/24
parts = line.split()
if len(parts) >= 4:
ip_with_mask = parts[3]
ip = ip_with_mask.split('/')[0]
logger.info(f"VM IP address: {ip}")
return ip
logger.info(f"Waiting for VM IP... (attempt {attempt + 1}/30)")
time.sleep(10)
logger.error("Failed to get VM IP address")
return None
except Exception as e:
logger.error(f"Error getting VM IP: {e}")
return None
def create_cloud_init_iso(self, vm_ip: str) -> bool:
"""Create cloud-init ISO for automated Ubuntu installation."""
try:
logger.info("Creating cloud-init ISO...")
# Get environment variables
repo_url = os.getenv('REPO_URL', '')
github_token = os.getenv('GITHUB_TOKEN', '')
ssh_public_key = os.getenv('SSH_PUBLIC_KEY', '')
# Extract repository name from URL
if repo_url:
# Extract owner/repo from URL like https://github.com/owner/repo
github_repo = repo_url.replace(
'https://github.com/', '').replace('.git', '')
else:
logger.error("REPO_URL environment variable not set")
return False
# Create cloud-init user-data with complete ThrillWiki deployment
user_data = f"""#cloud-config
runcmd:
- [eval, 'echo $(cat /proc/cmdline) "autoinstall" > /root/cmdline']
- [eval, 'mount -n --bind -o ro /root/cmdline /proc/cmdline']
- [eval, 'snap restart subiquity.subiquity-server']
- [eval, 'snap restart subiquity.subiquity-service']
autoinstall:
version: 1
locale: en_US
keyboard:
layout: us
ssh:
install-server: true
authorized-keys:
- {ssh_public_key}
allow-pw: false
storage:
layout:
name: direct
identity:
hostname: thrillwiki-vm
username: ubuntu
password: '$6$rounds=4096$saltsalt$hash' # disabled
kernel:
package: linux-generic
early-commands:
- systemctl stop ssh
packages:
- curl
- git
- build-essential
- python3-pip
- postgresql
- postgresql-contrib
- nginx
- nodejs
- npm
- pipx
late-commands:
- apt install pipx -y
- echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' > /target/etc/sudoers.d/ubuntu
- /target/usr/bin/pipx install uv
# Setup ThrillWiki deployment script
- |
cat > /target/home/ubuntu/deploy-thrillwiki.sh << 'DEPLOY_EOF'
#!/bin/bash
set -e
# Wait for system to be ready
sleep 30
# Clone ThrillWiki repository with GitHub token
export GITHUB_TOKEN=$(cat /home/ubuntu/.github-token 2>/dev/null || echo "")
if [ -n "$GITHUB_TOKEN" ]; then
git clone https://$GITHUB_TOKEN@github.com/{github_repo} /home/ubuntu/thrillwiki
else
git clone https://github.com/{github_repo} /home/ubuntu/thrillwiki
fi
cd /home/ubuntu/thrillwiki
# Setup UV and Python environment
export PATH="/home/ubuntu/.local/bin:$PATH"
uv venv
source .venv/bin/activate
# Install dependencies
uv sync
# Setup PostgreSQL
sudo -u postgres createuser ubuntu
sudo -u postgres createdb thrillwiki_production
sudo -u postgres psql -c "ALTER USER ubuntu WITH SUPERUSER;"
# Setup environment
cp ***REMOVED***.example ***REMOVED***
echo "DEBUG=False" >> ***REMOVED***
echo "DATABASE_URL=postgresql://ubuntu@localhost/thrillwiki_production" >> ***REMOVED***
echo "ALLOWED_HOSTS=*" >> ***REMOVED***
# Run migrations and collect static files
uv run manage.py migrate
uv run manage.py collectstatic --noinput
uv run manage.py tailwind build
# Setup systemd services
sudo cp [AWS-SECRET-REMOVED]thrillwiki.service /etc/systemd/system/
sudo cp [AWS-SECRET-REMOVED]thrillwiki-webhook.service /etc/systemd/system/
# Update service files with correct paths
sudo sed -i "s|/opt/thrillwiki|/home/ubuntu/thrillwiki|g" /etc/systemd/system/thrillwiki.service
sudo sed -i "s|/opt/thrillwiki|/home/ubuntu/thrillwiki|g" /etc/systemd/system/thrillwiki-webhook.service
# Enable and start services
sudo systemctl daemon-reload
sudo systemctl enable thrillwiki
sudo systemctl enable thrillwiki-webhook
sudo systemctl start thrillwiki
sudo systemctl start thrillwiki-webhook
echo "ThrillWiki deployment completed successfully!"
DEPLOY_EOF
- chmod +x /target/home/ubuntu/deploy-thrillwiki.sh
- chroot /target chown ubuntu:ubuntu /home/ubuntu/deploy-thrillwiki.sh
# Create systemd service to run deployment after first boot
- |
cat > /target/etc/systemd/system/thrillwiki-deploy.service << 'SERVICE_EOF'
[Unit]
Description=Deploy ThrillWiki on first boot
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
User=ubuntu
ExecStart=/home/ubuntu/deploy-thrillwiki.sh
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
SERVICE_EOF
- chroot /target systemctl enable thrillwiki-deploy
user-data:
disable_root: true
ssh_pwauth: false
power_state:
mode: reboot
"""
meta_data = f"""instance-id: thrillwiki-vm-001
local-hostname: thrillwiki-vm
network:
version: 2
ethernets:
enp1s0:
dhcp4: true
"""
# Create temp directory for cloud-init files
cloud_init_dir = "/tmp/cloud-init"
os.makedirs(cloud_init_dir, exist_ok=True)
with open(f"{cloud_init_dir}/user-data", 'w') as f:
f.write(user_data)
with open(f"{cloud_init_dir}/meta-data", 'w') as f:
f.write(meta_data)
# Create ISO
iso_path = f"/tmp/{VM_NAME}-cloud-init.iso"
# Try different ISO creation tools
iso_created = False
# Try genisoimage first
try:
subprocess.run([
'genisoimage',
'-output', iso_path,
'-volid', 'cidata',
'-joliet',
'-rock',
cloud_init_dir
], check=True)
iso_created = True
except FileNotFoundError:
logger.warning("genisoimage not found, trying mkisofs...")
# Try mkisofs as fallback
if not iso_created:
try:
subprocess.run([
'mkisofs',
'-output', iso_path,
'-volid', 'cidata',
'-joliet',
'-rock',
cloud_init_dir
], check=True)
iso_created = True
except FileNotFoundError:
logger.warning(
"mkisofs not found, trying hdiutil (macOS)...")
# Try hdiutil for macOS
if not iso_created:
try:
subprocess.run([
'hdiutil', 'makehybrid',
'-iso', '-joliet',
'-o', iso_path,
cloud_init_dir
], check=True)
iso_created = True
except FileNotFoundError:
logger.error(
"No ISO creation tool found. Please install genisoimage, mkisofs, or use macOS hdiutil")
return False
if not iso_created:
logger.error("Failed to create ISO with any available tool")
return False
# Copy ISO to Unraid
subprocess.run(
f"scp {iso_path} {UNRAID_USER}@{UNRAID_HOST}:/mnt/user/isos/",
shell=True,
check=True
)
logger.info("Cloud-init ISO created successfully")
return True
except Exception as e:
logger.error(f"Failed to create cloud-init ISO: {e}")
return False
def vm_status(self) -> str:
"""Get VM status."""
try:
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh domstate {VM_NAME}'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0:
return result.stdout.strip()
else:
return "unknown"
except Exception as e:
logger.error(f"Error getting VM status: {e}")
return "error"
def delete_vm(self) -> bool:
"""Completely remove VM and all associated files."""
try:
logger.info(f"Deleting VM {VM_NAME} and all associated files...")
# Check if VM exists
if not self.check_vm_exists():
logger.info(f"VM {VM_NAME} does not exist")
return True
# Stop VM if running
if self.vm_status() == "running":
logger.info(f"Stopping VM {VM_NAME}...")
self.stop_vm()
import time
time.sleep(5)
# Undefine VM with NVRAM
logger.info(f"Undefining VM {VM_NAME}...")
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh undefine {VM_NAME} --nvram'",
shell=True,
check=True
)
# Remove VM directory and all files
logger.info(f"Removing VM directory and files...")
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rm -rf {self.vm_config_path}'",
shell=True,
check=True
)
# Remove cloud-init ISO
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rm -f /mnt/user/isos/{VM_NAME}-cloud-init.iso'",
shell=True,
check=False # Don't fail if file doesn't exist
)
logger.info(f"VM {VM_NAME} completely removed")
return True
except Exception as e:
logger.error(f"Failed to delete VM: {e}")
return False
def main():
"""Main function."""
import argparse
parser = argparse.ArgumentParser(
description='Unraid VM Manager for ThrillWiki')
parser.add_argument('action', choices=['create', 'start', 'stop', 'status', 'ip', 'setup', 'delete'],
help='Action to perform')
args = parser.parse_args()
# Create logs directory
os.makedirs('logs', exist_ok=True)
vm_manager = UnraidVMManager()
if args.action == 'create':
success = vm_manager.create_vm()
sys.exit(0 if success else 1)
elif args.action == 'start':
success = vm_manager.start_vm()
sys.exit(0 if success else 1)
elif args.action == 'stop':
success = vm_manager.stop_vm()
sys.exit(0 if success else 1)
elif args.action == 'status':
status = vm_manager.vm_status()
print(f"VM Status: {status}")
sys.exit(0)
elif args.action == 'ip':
ip = vm_manager.get_vm_ip()
if ip:
print(f"VM IP: {ip}")
sys.exit(0)
else:
print("Failed to get VM IP")
sys.exit(1)
elif args.action == 'setup':
logger.info("Setting up complete VM environment...")
# Create VM
if not vm_manager.create_vm():
sys.exit(1)
# Start VM
if not vm_manager.start_vm():
sys.exit(1)
# Get IP
vm_ip = vm_manager.get_vm_ip()
if not vm_ip:
sys.exit(1)
print(f"VM setup complete. IP: {vm_ip}")
print("You can now connect via SSH and complete the ThrillWiki setup.")
sys.exit(0)
elif args.action == 'delete':
success = vm_manager.delete_vm()
sys.exit(0 if success else 1)
if __name__ == '__main__':
main()