#!/usr/bin/env python3
"""
Unraid VM Manager for ThrillWiki
This script automates VM creation, configuration, and management on Unraid.
"""
import os
import sys
import json
import time
import logging
import requests
import subprocess
from pathlib import Path
from typing import Dict, Optional, List
# Configuration
UNRAID_HOST = os***REMOVED***iron.get('UNRAID_HOST', 'localhost')
UNRAID_USER = os***REMOVED***iron.get('UNRAID_USER', 'root')
UNRAID_PASSWORD = os***REMOVED***iron.get('UNRAID_PASSWORD', '')
VM_NAME = os***REMOVED***iron.get('VM_NAME', 'thrillwiki-vm')
VM_TEMPLATE = os***REMOVED***iron.get('VM_TEMPLATE', 'Ubuntu Server 22.04')
VM_MEMORY = int(os***REMOVED***iron.get('VM_MEMORY', 4096)) # MB
VM_VCPUS = int(os***REMOVED***iron.get('VM_VCPUS', 2))
VM_DISK_SIZE = int(os***REMOVED***iron.get('VM_DISK_SIZE', 50)) # GB
SSH_PUBLIC_KEY = os***REMOVED***iron.get('SSH_PUBLIC_KEY', '')
# Network Configuration
VM_IP = os***REMOVED***iron.get('VM_IP', '192.168.20.20')
VM_GATEWAY = os***REMOVED***iron.get('VM_GATEWAY', '192.168.20.1')
VM_NETMASK = os***REMOVED***iron.get('VM_NETMASK', '255.255.255.0')
VM_NETWORK = os***REMOVED***iron.get('VM_NETWORK', '192.168.20.0/24')
# GitHub Configuration
REPO_URL = os***REMOVED***iron.get('REPO_URL', '')
GITHUB_USERNAME = os***REMOVED***iron.get('GITHUB_USERNAME', '')
GITHUB_TOKEN = os***REMOVED***iron.get('GITHUB_TOKEN', '')
GITHUB_API_ENABLED = os***REMOVED***iron.get(
'GITHUB_API_ENABLED', 'false').lower() == 'true'
# Setup logging
os.makedirs('logs', exist_ok=True)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('logs/unraid-vm.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
class UnraidVMManager:
"""Manages VMs on Unraid server."""
def __init__(self):
self.session = requests.Session()
self.base_url = f"http://{UNRAID_HOST}"
self.vm_config_path = f"/mnt/user/domains/{VM_NAME}"
def authenticate(self) -> bool:
"""Authenticate with Unraid server."""
try:
login_url = f"{self.base_url}/login"
login_data = {
'username': UNRAID_USER,
'password': UNRAID_PASSWORD
}
response = self.session.post(login_url, data=login_data)
if response.status_code == 200:
logger.info("Successfully authenticated with Unraid")
return True
else:
logger.error(f"Authentication failed: {response.status_code}")
return False
except Exception as e:
logger.error(f"Authentication error: {e}")
return False
def check_vm_exists(self) -> bool:
"""Check if VM already exists."""
try:
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh list --all | grep {VM_NAME}'",
shell=True,
capture_output=True,
text=True
)
return VM_NAME in result.stdout
except Exception as e:
logger.error(f"Error checking VM existence: {e}")
return False
def create_vm_xml(self, existing_uuid: str = None) -> str:
"""Generate VM XML configuration."""
import uuid
vm_uuid = existing_uuid if existing_uuid else str(uuid.uuid4())
xml_template = f"""
{VM_NAME}
{vm_uuid}
{VM_MEMORY * 1024}
{VM_MEMORY * 1024}
{VM_VCPUS}
hvm
/usr/share/qemu/ovmf-x64/OVMF_CODE-pure-efi.fd
/etc/libvirt/qemu/nvram/{vm_uuid}_VARS-pure-efi.fd
destroy
restart
restart
/usr/local/sbin/qemu
"""
return xml_template.strip()
def create_vm(self) -> bool:
"""Create or update the VM on Unraid."""
try:
vm_exists = self.check_vm_exists()
if vm_exists:
logger.info(
f"VM {VM_NAME} already exists, updating configuration...")
# Stop VM if running before updating
if self.vm_status() == "running":
logger.info(
f"Stopping VM {VM_NAME} for configuration update...")
self.stop_vm()
# Wait for VM to stop
import time
time.sleep(5)
else:
logger.info(f"Creating VM {VM_NAME}...")
# Create VM directory
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'mkdir -p {self.vm_config_path}'",
shell=True,
check=True
)
# Create virtual disk only if VM doesn't exist
disk_cmd = f"""
ssh {UNRAID_USER}@{UNRAID_HOST} 'qemu-img create -f qcow2 {self.vm_config_path}/vdisk1.qcow2 {VM_DISK_SIZE}G'
"""
subprocess.run(disk_cmd, shell=True, check=True)
# Create cloud-init ISO for automated installation and ThrillWiki deployment
logger.info(
"Creating cloud-init ISO for automated Ubuntu and ThrillWiki setup...")
if not self.create_cloud_init_iso(VM_IP):
logger.error("Failed to create cloud-init ISO")
return False
existing_uuid = None
if vm_exists:
# Get existing VM UUID
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh dumpxml {VM_NAME} | grep \"\" | sed \"s///g\" | sed \"s/<\\/uuid>//g\" | tr -d \" \"'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0 and result.stdout.strip():
existing_uuid = result.stdout.strip()
logger.info(f"Found existing VM UUID: {existing_uuid}")
# Always undefine existing VM with NVRAM flag (since we create persistent VMs)
logger.info(
f"VM {VM_NAME} exists, undefining with NVRAM for reconfiguration...")
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh undefine {VM_NAME} --nvram'",
shell=True,
check=True
)
logger.info(
f"VM {VM_NAME} undefined for reconfiguration (with NVRAM)")
# Generate VM XML with appropriate UUID
vm_xml = self.create_vm_xml(existing_uuid)
xml_file = f"/tmp/{VM_NAME}.xml"
with open(xml_file, 'w') as f:
f.write(vm_xml)
# Copy XML to Unraid and define/redefine VM
subprocess.run(
f"scp {xml_file} {UNRAID_USER}@{UNRAID_HOST}:/tmp/",
shell=True,
check=True
)
# Define VM as persistent domain
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh define /tmp/{VM_NAME}.xml'",
shell=True,
check=True
)
# Ensure VM is set to autostart for persistent configuration
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh autostart {VM_NAME}'",
shell=True,
check=False # Don't fail if autostart is already enabled
)
action = "updated" if vm_exists else "created"
logger.info(f"VM {VM_NAME} {action} successfully")
# Cleanup
os.remove(xml_file)
return True
except Exception as e:
logger.error(f"Failed to create VM: {e}")
return False
def create_nvram_file(self, vm_uuid: str) -> bool:
"""Create NVRAM file for UEFI VM."""
try:
nvram_path = f"/etc/libvirt/qemu/nvram/{vm_uuid}_VARS-pure-efi.fd"
# Check if NVRAM file already exists
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'test -f {nvram_path}'",
shell=True,
capture_output=True
)
if result.returncode == 0:
logger.info(f"NVRAM file already exists: {nvram_path}")
return True
# Copy template to create NVRAM file
logger.info(f"Creating NVRAM file: {nvram_path}")
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'cp /usr/share/qemu/ovmf-x64/OVMF_VARS-pure-efi.fd {nvram_path}'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0:
logger.info("NVRAM file created successfully")
return True
else:
logger.error(f"Failed to create NVRAM file: {result.stderr}")
return False
except Exception as e:
logger.error(f"Error creating NVRAM file: {e}")
return False
def start_vm(self) -> bool:
"""Start the VM if it's not already running."""
try:
# Check if VM is already running
current_status = self.vm_status()
if current_status == "running":
logger.info(f"VM {VM_NAME} is already running")
return True
logger.info(f"Starting VM {VM_NAME}...")
# For new VMs, we need to extract the UUID and create NVRAM file
vm_exists = self.check_vm_exists()
if not vm_exists:
logger.error("Cannot start VM that doesn't exist")
return False
# Get VM UUID from XML
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh dumpxml {VM_NAME} | grep \"\" | sed \"s///g\" | sed \"s/<\\/uuid>//g\" | tr -d \" \"'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0 and result.stdout.strip():
vm_uuid = result.stdout.strip()
logger.info(f"VM UUID: {vm_uuid}")
# Create NVRAM file if it doesn't exist
if not self.create_nvram_file(vm_uuid):
return False
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh start {VM_NAME}'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0:
logger.info(f"VM {VM_NAME} started successfully")
return True
else:
logger.error(f"Failed to start VM: {result.stderr}")
return False
except Exception as e:
logger.error(f"Error starting VM: {e}")
return False
def stop_vm(self) -> bool:
"""Stop the VM."""
try:
logger.info(f"Stopping VM {VM_NAME}...")
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh shutdown {VM_NAME}'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0:
logger.info(f"VM {VM_NAME} stopped successfully")
return True
else:
logger.error(f"Failed to stop VM: {result.stderr}")
return False
except Exception as e:
logger.error(f"Error stopping VM: {e}")
return False
def get_vm_ip(self) -> Optional[str]:
"""Get VM IP address."""
try:
# Wait for VM to get IP
for attempt in range(30):
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh domifaddr {VM_NAME}'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0 and 'ipv4' in result.stdout:
lines = result.stdout.strip().split('\n')
for line in lines:
if 'ipv4' in line:
# Extract IP from line like: vnet0 52:54:00:xx:xx:xx ipv4 192.168.1.100/24
parts = line.split()
if len(parts) >= 4:
ip_with_mask = parts[3]
ip = ip_with_mask.split('/')[0]
logger.info(f"VM IP address: {ip}")
return ip
logger.info(f"Waiting for VM IP... (attempt {attempt + 1}/30)")
time.sleep(10)
logger.error("Failed to get VM IP address")
return None
except Exception as e:
logger.error(f"Error getting VM IP: {e}")
return None
def create_cloud_init_iso(self, vm_ip: str) -> bool:
"""Create cloud-init ISO for automated Ubuntu installation."""
try:
logger.info("Creating cloud-init ISO...")
# Get environment variables
repo_url = os.getenv('REPO_URL', '')
github_token = os.getenv('GITHUB_TOKEN', '')
ssh_public_key = os.getenv('SSH_PUBLIC_KEY', '')
# Extract repository name from URL
if repo_url:
# Extract owner/repo from URL like https://github.com/owner/repo
github_repo = repo_url.replace(
'https://github.com/', '').replace('.git', '')
else:
logger.error("REPO_URL environment variable not set")
return False
# Create cloud-init user-data with complete ThrillWiki deployment
user_data = f"""#cloud-config
runcmd:
- [eval, 'echo $(cat /proc/cmdline) "autoinstall" > /root/cmdline']
- [eval, 'mount -n --bind -o ro /root/cmdline /proc/cmdline']
- [eval, 'snap restart subiquity.subiquity-server']
- [eval, 'snap restart subiquity.subiquity-service']
autoinstall:
version: 1
locale: en_US
keyboard:
layout: us
ssh:
install-server: true
authorized-keys:
- {ssh_public_key}
allow-pw: false
storage:
layout:
name: direct
identity:
hostname: thrillwiki-vm
username: ubuntu
password: '$6$rounds=4096$saltsalt$hash' # disabled
kernel:
package: linux-generic
early-commands:
- systemctl stop ssh
packages:
- curl
- git
- build-essential
- python3-pip
- postgresql
- postgresql-contrib
- nginx
- nodejs
- npm
- pipx
late-commands:
- apt install pipx -y
- echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' > /target/etc/sudoers.d/ubuntu
- /target/usr/bin/pipx install uv
# Setup ThrillWiki deployment script
- |
cat > /target/home/ubuntu/deploy-thrillwiki.sh << 'DEPLOY_EOF'
#!/bin/bash
set -e
# Wait for system to be ready
sleep 30
# Clone ThrillWiki repository with GitHub token
export GITHUB_TOKEN=$(cat /home/ubuntu/.github-token 2>/dev/null || echo "")
if [ -n "$GITHUB_TOKEN" ]; then
git clone https://$GITHUB_TOKEN@github.com/{github_repo} /home/ubuntu/thrillwiki
else
git clone https://github.com/{github_repo} /home/ubuntu/thrillwiki
fi
cd /home/ubuntu/thrillwiki
# Setup UV and Python environment
export PATH="/home/ubuntu/.local/bin:$PATH"
uv venv
source .venv/bin/activate
# Install dependencies
uv sync
# Setup PostgreSQL
sudo -u postgres createuser ubuntu
sudo -u postgres createdb thrillwiki_production
sudo -u postgres psql -c "ALTER USER ubuntu WITH SUPERUSER;"
# Setup environment
cp ***REMOVED***.example ***REMOVED***
echo "DEBUG=False" >> ***REMOVED***
echo "DATABASE_URL=postgresql://ubuntu@localhost/thrillwiki_production" >> ***REMOVED***
echo "ALLOWED_HOSTS=*" >> ***REMOVED***
# Run migrations and collect static files
uv run manage.py migrate
uv run manage.py collectstatic --noinput
uv run manage.py tailwind build
# Setup systemd services
sudo cp [AWS-SECRET-REMOVED]thrillwiki.service /etc/systemd/system/
sudo cp [AWS-SECRET-REMOVED]thrillwiki-webhook.service /etc/systemd/system/
# Update service files with correct paths
sudo sed -i "s|/opt/thrillwiki|/home/ubuntu/thrillwiki|g" /etc/systemd/system/thrillwiki.service
sudo sed -i "s|/opt/thrillwiki|/home/ubuntu/thrillwiki|g" /etc/systemd/system/thrillwiki-webhook.service
# Enable and start services
sudo systemctl daemon-reload
sudo systemctl enable thrillwiki
sudo systemctl enable thrillwiki-webhook
sudo systemctl start thrillwiki
sudo systemctl start thrillwiki-webhook
echo "ThrillWiki deployment completed successfully!"
DEPLOY_EOF
- chmod +x /target/home/ubuntu/deploy-thrillwiki.sh
- chroot /target chown ubuntu:ubuntu /home/ubuntu/deploy-thrillwiki.sh
# Create systemd service to run deployment after first boot
- |
cat > /target/etc/systemd/system/thrillwiki-deploy.service << 'SERVICE_EOF'
[Unit]
Description=Deploy ThrillWiki on first boot
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
User=ubuntu
ExecStart=/home/ubuntu/deploy-thrillwiki.sh
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
SERVICE_EOF
- chroot /target systemctl enable thrillwiki-deploy
user-data:
disable_root: true
ssh_pwauth: false
power_state:
mode: reboot
"""
meta_data = f"""instance-id: thrillwiki-vm-001
local-hostname: thrillwiki-vm
network:
version: 2
ethernets:
enp1s0:
dhcp4: true
"""
# Create temp directory for cloud-init files
cloud_init_dir = "/tmp/cloud-init"
os.makedirs(cloud_init_dir, exist_ok=True)
with open(f"{cloud_init_dir}/user-data", 'w') as f:
f.write(user_data)
with open(f"{cloud_init_dir}/meta-data", 'w') as f:
f.write(meta_data)
# Create ISO
iso_path = f"/tmp/{VM_NAME}-cloud-init.iso"
# Try different ISO creation tools
iso_created = False
# Try genisoimage first
try:
subprocess.run([
'genisoimage',
'-output', iso_path,
'-volid', 'cidata',
'-joliet',
'-rock',
cloud_init_dir
], check=True)
iso_created = True
except FileNotFoundError:
logger.warning("genisoimage not found, trying mkisofs...")
# Try mkisofs as fallback
if not iso_created:
try:
subprocess.run([
'mkisofs',
'-output', iso_path,
'-volid', 'cidata',
'-joliet',
'-rock',
cloud_init_dir
], check=True)
iso_created = True
except FileNotFoundError:
logger.warning(
"mkisofs not found, trying hdiutil (macOS)...")
# Try hdiutil for macOS
if not iso_created:
try:
subprocess.run([
'hdiutil', 'makehybrid',
'-iso', '-joliet',
'-o', iso_path,
cloud_init_dir
], check=True)
iso_created = True
except FileNotFoundError:
logger.error(
"No ISO creation tool found. Please install genisoimage, mkisofs, or use macOS hdiutil")
return False
if not iso_created:
logger.error("Failed to create ISO with any available tool")
return False
# Copy ISO to Unraid
subprocess.run(
f"scp {iso_path} {UNRAID_USER}@{UNRAID_HOST}:/mnt/user/isos/",
shell=True,
check=True
)
logger.info("Cloud-init ISO created successfully")
return True
except Exception as e:
logger.error(f"Failed to create cloud-init ISO: {e}")
return False
def vm_status(self) -> str:
"""Get VM status."""
try:
result = subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh domstate {VM_NAME}'",
shell=True,
capture_output=True,
text=True
)
if result.returncode == 0:
return result.stdout.strip()
else:
return "unknown"
except Exception as e:
logger.error(f"Error getting VM status: {e}")
return "error"
def delete_vm(self) -> bool:
"""Completely remove VM and all associated files."""
try:
logger.info(f"Deleting VM {VM_NAME} and all associated files...")
# Check if VM exists
if not self.check_vm_exists():
logger.info(f"VM {VM_NAME} does not exist")
return True
# Stop VM if running
if self.vm_status() == "running":
logger.info(f"Stopping VM {VM_NAME}...")
self.stop_vm()
import time
time.sleep(5)
# Undefine VM with NVRAM
logger.info(f"Undefining VM {VM_NAME}...")
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh undefine {VM_NAME} --nvram'",
shell=True,
check=True
)
# Remove VM directory and all files
logger.info(f"Removing VM directory and files...")
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rm -rf {self.vm_config_path}'",
shell=True,
check=True
)
# Remove cloud-init ISO
subprocess.run(
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rm -f /mnt/user/isos/{VM_NAME}-cloud-init.iso'",
shell=True,
check=False # Don't fail if file doesn't exist
)
logger.info(f"VM {VM_NAME} completely removed")
return True
except Exception as e:
logger.error(f"Failed to delete VM: {e}")
return False
def main():
"""Main function."""
import argparse
parser = argparse.ArgumentParser(
description='Unraid VM Manager for ThrillWiki')
parser.add_argument('action', choices=['create', 'start', 'stop', 'status', 'ip', 'setup', 'delete'],
help='Action to perform')
args = parser.parse_args()
# Create logs directory
os.makedirs('logs', exist_ok=True)
vm_manager = UnraidVMManager()
if args.action == 'create':
success = vm_manager.create_vm()
sys.exit(0 if success else 1)
elif args.action == 'start':
success = vm_manager.start_vm()
sys.exit(0 if success else 1)
elif args.action == 'stop':
success = vm_manager.stop_vm()
sys.exit(0 if success else 1)
elif args.action == 'status':
status = vm_manager.vm_status()
print(f"VM Status: {status}")
sys.exit(0)
elif args.action == 'ip':
ip = vm_manager.get_vm_ip()
if ip:
print(f"VM IP: {ip}")
sys.exit(0)
else:
print("Failed to get VM IP")
sys.exit(1)
elif args.action == 'setup':
logger.info("Setting up complete VM environment...")
# Create VM
if not vm_manager.create_vm():
sys.exit(1)
# Start VM
if not vm_manager.start_vm():
sys.exit(1)
# Get IP
vm_ip = vm_manager.get_vm_ip()
if not vm_ip:
sys.exit(1)
print(f"VM setup complete. IP: {vm_ip}")
print("You can now connect via SSH and complete the ThrillWiki setup.")
sys.exit(0)
elif args.action == 'delete':
success = vm_manager.delete_vm()
sys.exit(0 if success else 1)
if __name__ == '__main__':
main()