mirror of
https://github.com/pacnpal/thrillwiki_django_no_react.git
synced 2025-12-20 18:31:09 -05:00
- Add complete backend/ directory with full Django application - Add frontend/ directory with Vite + TypeScript setup ready for Next.js - Add comprehensive shared/ directory with: - Complete documentation and memory-bank archives - Media files and avatars (letters, park/ride images) - Deployment scripts and automation tools - Shared types and utilities - Add architecture/ directory with migration guides - Configure pnpm workspace for monorepo development - Update .gitignore to exclude .django_tailwind_cli/ build artifacts - Preserve all historical documentation in shared/docs/memory-bank/ - Set up proper structure for full-stack development with shared resources
1308 lines
49 KiB
Python
Executable File
1308 lines
49 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
Unraid VM Manager for ThrillWiki - Modular Ubuntu Autoinstall
|
|
Follows the Ubuntu autoinstall guide exactly:
|
|
1. Creates modified Ubuntu ISO with autoinstall configuration
|
|
2. Manages VM lifecycle on Unraid server
|
|
3. Handles ThrillWiki deployment automation
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import time
|
|
import logging
|
|
import subprocess
|
|
import shutil
|
|
from pathlib import Path
|
|
from typing import Optional
|
|
|
|
# Import our modular components
|
|
# Note: UnraidVMManager is defined locally in this file
|
|
|
|
# Configuration
|
|
UNRAID_HOST = os.environ.get("UNRAID_HOST", "localhost")
|
|
UNRAID_USER = os.environ.get("UNRAID_USER", "root")
|
|
VM_NAME = os.environ.get("VM_NAME", "thrillwiki-vm")
|
|
VM_MEMORY = int(os.environ.get("VM_MEMORY", 4096)) # MB
|
|
VM_VCPUS = int(os.environ.get("VM_VCPUS", 2))
|
|
VM_DISK_SIZE = int(os.environ.get("VM_DISK_SIZE", 50)) # GB
|
|
SSH_PUBLIC_KEY = os.environ.get("SSH_PUBLIC_KEY", "")
|
|
|
|
# Network Configuration
|
|
VM_IP = os.environ.get("VM_IP", "dhcp")
|
|
VM_GATEWAY = os.environ.get("VM_GATEWAY", "192.168.20.1")
|
|
VM_NETMASK = os.environ.get("VM_NETMASK", "255.255.255.0")
|
|
VM_NETWORK = os.environ.get("VM_NETWORK", "192.168.20.0/24")
|
|
|
|
# GitHub Configuration
|
|
REPO_URL = os.environ.get("REPO_URL", "")
|
|
GITHUB_USERNAME = os.environ.get("GITHUB_USERNAME", "")
|
|
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN", "")
|
|
|
|
# Ubuntu version preference
|
|
UBUNTU_VERSION = os.environ.get("UBUNTU_VERSION", "24.04")
|
|
|
|
# Setup logging
|
|
os.makedirs("logs", exist_ok=True)
|
|
logging.basicConfig(
|
|
level=logging.INFO,
|
|
format="%(asctime)s - %(levelname)s - %(message)s",
|
|
handlers=[
|
|
logging.FileHandler("logs/unraid-vm.log"),
|
|
logging.StreamHandler(),
|
|
],
|
|
)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class UnraidVMManager:
|
|
"""Manages VMs on Unraid server."""
|
|
|
|
def __init__(self):
|
|
self.vm_config_path = f"/mnt/user/domains/{VM_NAME}"
|
|
|
|
def authenticate(self) -> bool:
|
|
"""Test SSH connectivity to Unraid server."""
|
|
try:
|
|
result = subprocess.run(
|
|
f"ssh -o ConnectTimeout=10 {UNRAID_USER}@{UNRAID_HOST} 'echo Connected'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=15,
|
|
)
|
|
|
|
if result.returncode == 0 and "Connected" in result.stdout:
|
|
logger.info("Successfully connected to Unraid via SSH")
|
|
return True
|
|
else:
|
|
logger.error(f"SSH connection failed: {result.stderr}")
|
|
return False
|
|
|
|
except Exception as e:
|
|
logger.error(f"SSH authentication error: {e}")
|
|
return False
|
|
|
|
def check_vm_exists(self) -> bool:
|
|
"""Check if VM already exists."""
|
|
try:
|
|
result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh list --all | grep {VM_NAME}'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
return VM_NAME in result.stdout
|
|
except Exception as e:
|
|
logger.error(f"Error checking VM existence: {e}")
|
|
return False
|
|
|
|
def _generate_mac_suffix(self) -> str:
|
|
"""Generate MAC address suffix based on VM IP or name."""
|
|
if VM_IP.lower() != "dhcp" and "." in VM_IP:
|
|
# Use last octet of static IP for MAC generation
|
|
last_octet = int(VM_IP.split(".")[-1])
|
|
return f"{last_octet:02x}:7d:fd"
|
|
else:
|
|
# Use hash of VM name for consistent MAC generation
|
|
import hashlib
|
|
|
|
hash_obj = hashlib.md5(VM_NAME.encode())
|
|
hash_bytes = hash_obj.digest()[:3]
|
|
return ":".join([f"{b:02x}" for b in hash_bytes])
|
|
|
|
def create_vm_xml(self, existing_uuid: str = None) -> str:
|
|
"""Generate VM XML configuration from template file."""
|
|
import uuid
|
|
|
|
vm_uuid = existing_uuid if existing_uuid else str(uuid.uuid4())
|
|
|
|
# Detect Ubuntu ISO dynamically
|
|
ubuntu_iso_path = self._detect_ubuntu_iso()
|
|
if not ubuntu_iso_path:
|
|
raise FileNotFoundError("No Ubuntu ISO found for VM template")
|
|
|
|
# Read XML template from file
|
|
template_path = Path(__file__).parent / "thrillwiki-vm-template.xml"
|
|
if not template_path.exists():
|
|
raise FileNotFoundError(f"VM XML template not found at {template_path}")
|
|
|
|
with open(template_path, "r", encoding="utf-8") as f:
|
|
xml_template = f.read()
|
|
|
|
# Calculate CPU topology
|
|
cpu_cores = VM_VCPUS // 2 if VM_VCPUS > 1 else 1
|
|
cpu_threads = 2 if VM_VCPUS > 1 else 1
|
|
mac_suffix = self._generate_mac_suffix()
|
|
|
|
# Replace placeholders with actual values
|
|
xml_content = xml_template.format(
|
|
VM_NAME=VM_NAME,
|
|
VM_UUID=vm_uuid,
|
|
VM_MEMORY_KIB=VM_MEMORY * 1024,
|
|
VM_VCPUS=VM_VCPUS,
|
|
CPU_CORES=cpu_cores,
|
|
CPU_THREADS=cpu_threads,
|
|
MAC_SUFFIX=mac_suffix,
|
|
UBUNTU_ISO_PATH=ubuntu_iso_path,
|
|
)
|
|
|
|
return xml_content.strip()
|
|
|
|
def _detect_ubuntu_iso(self) -> Optional[str]:
|
|
"""Detect and return the path of the best available Ubuntu ISO."""
|
|
try:
|
|
# Find all Ubuntu ISOs
|
|
find_all_result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'find /mnt/user/isos -name \"ubuntu*.iso\" -type f | sort -V'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
|
|
if find_all_result.returncode != 0 or not find_all_result.stdout.strip():
|
|
return None
|
|
|
|
available_isos = find_all_result.stdout.strip().split("\n")
|
|
|
|
# Prioritize ISOs by version and type
|
|
# Sort by preference: 24.04 LTS > 22.04 LTS > 23.x > 20.04 > others
|
|
# Within each version, prefer the latest point release
|
|
priority_versions = [
|
|
"24.04", # Ubuntu 24.04 LTS (highest priority)
|
|
"22.04", # Ubuntu 22.04 LTS
|
|
"23.10", # Ubuntu 23.10
|
|
"23.04", # Ubuntu 23.04
|
|
"20.04", # Ubuntu 20.04 LTS
|
|
]
|
|
|
|
# Find the best ISO based on priority, preferring latest point
|
|
# releases
|
|
for version in priority_versions:
|
|
# Find all ISOs for this version
|
|
version_isos = []
|
|
for iso in available_isos:
|
|
if version in iso and (
|
|
"server" in iso.lower() or "live" in iso.lower()
|
|
):
|
|
version_isos.append(iso)
|
|
|
|
if version_isos:
|
|
# Sort by version number (reverse to get latest first)
|
|
# This will put 24.04.3 before 24.04.2 before 24.04.1
|
|
# before 24.04
|
|
version_isos.sort(reverse=True)
|
|
return version_isos[0]
|
|
|
|
# If no priority match, use the first server/live ISO found
|
|
for iso in available_isos:
|
|
if "server" in iso.lower() or "live" in iso.lower():
|
|
return iso
|
|
|
|
# If still no match, use the first Ubuntu ISO found (any type)
|
|
if available_isos:
|
|
return available_isos[0]
|
|
|
|
return None
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error detecting Ubuntu ISO: {e}")
|
|
return None
|
|
|
|
def create_vm(self) -> bool:
|
|
"""Create or update the VM on Unraid."""
|
|
try:
|
|
vm_exists = self.check_vm_exists()
|
|
|
|
if vm_exists:
|
|
logger.info(f"VM {VM_NAME} already exists, updating configuration...")
|
|
# Always try to stop VM before updating (force stop)
|
|
current_status = self.vm_status()
|
|
logger.info(f"Current VM status: {current_status}")
|
|
|
|
if current_status not in ["shut off", "unknown"]:
|
|
logger.info(f"Stopping VM {VM_NAME} for configuration update...")
|
|
self.stop_vm()
|
|
# Wait for VM to stop
|
|
time.sleep(3)
|
|
else:
|
|
logger.info(f"VM {VM_NAME} is already stopped")
|
|
else:
|
|
logger.info(f"Creating VM {VM_NAME}...")
|
|
|
|
# Ensure VM directory exists (for both new and updated VMs)
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'mkdir -p {self.vm_config_path}'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
|
|
# Create virtual disk if it doesn't exist (for both new and updated
|
|
# VMs)
|
|
disk_check = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'test -f {self.vm_config_path}/vdisk1.qcow2'",
|
|
shell=True,
|
|
capture_output=True,
|
|
)
|
|
|
|
if disk_check.returncode != 0:
|
|
logger.info(f"Creating virtual disk for VM {VM_NAME}...")
|
|
disk_cmd = f"""
|
|
ssh {UNRAID_USER}@{UNRAID_HOST} 'qemu-img create -f qcow2 {self.vm_config_path}/vdisk1.qcow2 {VM_DISK_SIZE}G'
|
|
"""
|
|
subprocess.run(disk_cmd, shell=True, check=True)
|
|
else:
|
|
logger.info(f"Virtual disk already exists for VM {VM_NAME}")
|
|
|
|
# Always create/recreate cloud-init ISO for automated installation and ThrillWiki deployment
|
|
# This ensures the latest configuration is used whether creating or
|
|
# updating the VM
|
|
logger.info(
|
|
"Creating cloud-init ISO for automated Ubuntu and ThrillWiki setup..."
|
|
)
|
|
if not self.create_cloud_init_iso(VM_IP):
|
|
logger.error("Failed to create cloud-init ISO")
|
|
return False
|
|
|
|
# For Ubuntu 24.04, use UEFI boot instead of kernel extraction
|
|
# Ubuntu 24.04 has issues with direct kernel boot autoinstall
|
|
logger.info("Using UEFI boot for Ubuntu 24.04 compatibility...")
|
|
if not self.fallback_to_uefi_boot():
|
|
logger.error("UEFI boot setup failed")
|
|
return False
|
|
|
|
existing_uuid = None
|
|
|
|
if vm_exists:
|
|
# Get existing VM UUID
|
|
result = subprocess.run(
|
|
f'ssh {UNRAID_USER}@{UNRAID_HOST} \'virsh dumpxml {VM_NAME} | grep "<uuid>" | sed "s/<uuid>//g" | sed "s/<\\/uuid>//g" | tr -d " "\'',
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
|
|
if result.returncode == 0 and result.stdout.strip():
|
|
existing_uuid = result.stdout.strip()
|
|
logger.info(f"Found existing VM UUID: {existing_uuid}")
|
|
|
|
# Check if VM is persistent or transient
|
|
persistent_check = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh list --persistent --all | grep {VM_NAME}'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
|
|
is_persistent = VM_NAME in persistent_check.stdout
|
|
|
|
if is_persistent:
|
|
# Undefine persistent VM with NVRAM flag
|
|
logger.info(
|
|
f"VM {VM_NAME} is persistent, undefining with NVRAM for reconfiguration..."
|
|
)
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh undefine {VM_NAME} --nvram'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
logger.info(
|
|
f"Persistent VM {VM_NAME} undefined for reconfiguration"
|
|
)
|
|
else:
|
|
# Handle transient VM - just destroy it
|
|
logger.info(
|
|
f"VM {VM_NAME} is transient, destroying for reconfiguration..."
|
|
)
|
|
# Stop the VM first if it's running
|
|
if self.vm_status() == "running":
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh destroy {VM_NAME}'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
logger.info(f"Transient VM {VM_NAME} destroyed for reconfiguration")
|
|
|
|
# Generate VM XML with appropriate UUID
|
|
vm_xml = self.create_vm_xml(existing_uuid)
|
|
xml_file = f"/tmp/{VM_NAME}.xml"
|
|
|
|
with open(xml_file, "w", encoding="utf-8") as f:
|
|
f.write(vm_xml)
|
|
|
|
# Copy XML to Unraid and define/redefine VM
|
|
subprocess.run(
|
|
f"scp {xml_file} {UNRAID_USER}@{UNRAID_HOST}:/tmp/",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
|
|
# Define VM as persistent domain
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh define /tmp/{VM_NAME}.xml'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
|
|
# Ensure VM is set to autostart for persistent configuration
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh autostart {VM_NAME}'",
|
|
shell=True,
|
|
check=False, # Don't fail if autostart is already enabled
|
|
)
|
|
|
|
action = "updated" if vm_exists else "created"
|
|
logger.info(f"VM {VM_NAME} {action} successfully")
|
|
|
|
# Cleanup
|
|
os.remove(xml_file)
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to create VM: {e}")
|
|
return False
|
|
|
|
def extract_ubuntu_kernel(self) -> bool:
|
|
"""Extract Ubuntu kernel and initrd from ISO for direct boot."""
|
|
try:
|
|
# Check available Ubuntu ISOs and select the correct one
|
|
iso_mount_point = "/tmp/ubuntu-iso"
|
|
|
|
logger.info("Checking for available Ubuntu ISOs...")
|
|
# List available Ubuntu ISOs with detailed information
|
|
result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'ls -la /mnt/user/isos/ubuntu*.iso 2>/dev/null || echo \"No Ubuntu ISOs found\"'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
|
|
logger.info(f"Available ISOs: {result.stdout}")
|
|
|
|
# First, try to find ANY existing Ubuntu ISOs dynamically
|
|
# This will find all Ubuntu ISOs regardless of naming convention
|
|
find_all_result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'find /mnt/user/isos -name \"ubuntu*.iso\" -type f | sort -V'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
|
|
ubuntu_iso_path = None
|
|
available_isos = []
|
|
|
|
if find_all_result.returncode == 0 and find_all_result.stdout.strip():
|
|
available_isos = find_all_result.stdout.strip().split("\n")
|
|
logger.info(
|
|
f"Found {
|
|
len(available_isos)} Ubuntu ISOs: {available_isos}"
|
|
)
|
|
|
|
# Prioritize ISOs by version and type (prefer LTS, prefer newer versions)
|
|
# Sort by preference: 24.04 LTS > 22.04 LTS > 23.x > 20.04 > others
|
|
# Within each version, prefer the latest point release
|
|
priority_versions = [
|
|
"24.04", # Ubuntu 24.04 LTS (highest priority)
|
|
"22.04", # Ubuntu 22.04 LTS
|
|
"23.10", # Ubuntu 23.10
|
|
"23.04", # Ubuntu 23.04
|
|
"20.04", # Ubuntu 20.04 LTS
|
|
]
|
|
|
|
# Find the best ISO based on priority, preferring latest point
|
|
# releases
|
|
for version in priority_versions:
|
|
# Find all ISOs for this version
|
|
version_isos = []
|
|
for iso in available_isos:
|
|
if version in iso and (
|
|
"server" in iso.lower() or "live" in iso.lower()
|
|
):
|
|
version_isos.append(iso)
|
|
|
|
if version_isos:
|
|
# Sort by version number (reverse to get latest first)
|
|
# This will put 24.04.3 before 24.04.2 before 24.04.1
|
|
# before 24.04
|
|
version_isos.sort(reverse=True)
|
|
ubuntu_iso_path = version_isos[0]
|
|
logger.info(
|
|
f"Selected latest Ubuntu {version} ISO: {ubuntu_iso_path}"
|
|
)
|
|
break
|
|
|
|
# If no priority match, use the first server/live ISO found
|
|
if not ubuntu_iso_path:
|
|
for iso in available_isos:
|
|
if "server" in iso.lower() or "live" in iso.lower():
|
|
ubuntu_iso_path = iso
|
|
logger.info(
|
|
f"Selected Ubuntu server/live ISO: {ubuntu_iso_path}"
|
|
)
|
|
break
|
|
|
|
# If still no match, use the first Ubuntu ISO found (any type)
|
|
if not ubuntu_iso_path and available_isos:
|
|
ubuntu_iso_path = available_isos[0]
|
|
logger.info(
|
|
f"Selected first available Ubuntu ISO: {ubuntu_iso_path}"
|
|
)
|
|
logger.warning(
|
|
f"Using non-server Ubuntu ISO - this may not support autoinstall"
|
|
)
|
|
|
|
if not ubuntu_iso_path:
|
|
logger.error("No Ubuntu server ISO found in /mnt/user/isos/")
|
|
logger.error("")
|
|
logger.error("🔥 MISSING UBUNTU ISO - ACTION REQUIRED 🔥")
|
|
logger.error("")
|
|
logger.error(
|
|
"Please download Ubuntu LTS Server ISO to your Unraid server:"
|
|
)
|
|
logger.error("")
|
|
logger.error(
|
|
"📦 RECOMMENDED: Ubuntu 24.04 LTS (Noble Numbat) - Latest LTS:"
|
|
)
|
|
logger.error(" 1. Go to: https://releases.ubuntu.com/24.04/")
|
|
logger.error(" 2. Download: ubuntu-24.04-live-server-amd64.iso")
|
|
logger.error(" 3. Upload to: /mnt/user/isos/ on your Unraid server")
|
|
logger.error("")
|
|
logger.error(
|
|
"📦 ALTERNATIVE: Ubuntu 22.04 LTS (Jammy Jellyfish) - Stable:"
|
|
)
|
|
logger.error(" 1. Go to: https://releases.ubuntu.com/22.04/")
|
|
logger.error(" 2. Download: ubuntu-22.04-live-server-amd64.iso")
|
|
logger.error(" 3. Upload to: /mnt/user/isos/ on your Unraid server")
|
|
logger.error("")
|
|
logger.error("💡 Quick download via wget on Unraid server:")
|
|
logger.error(" # For Ubuntu 24.04 LTS (recommended):")
|
|
logger.error(
|
|
" wget -P /mnt/user/isos/ https://releases.ubuntu.com/24.04/ubuntu-24.04-live-server-amd64.iso"
|
|
)
|
|
logger.error(" # For Ubuntu 22.04 LTS (stable):")
|
|
logger.error(
|
|
" wget -P /mnt/user/isos/ https://releases.ubuntu.com/22.04/ubuntu-22.04-live-server-amd64.iso"
|
|
)
|
|
logger.error("")
|
|
logger.error("Then re-run this script.")
|
|
logger.error("")
|
|
return False
|
|
|
|
# Verify ISO file integrity
|
|
logger.info(f"Verifying ISO file: {ubuntu_iso_path}")
|
|
stat_result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'stat {ubuntu_iso_path}'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
if stat_result.returncode != 0:
|
|
logger.error(f"Cannot access ISO file: {ubuntu_iso_path}")
|
|
return False
|
|
|
|
logger.info(f"ISO file stats: {stat_result.stdout.strip()}")
|
|
|
|
# Clean up any previous mount points
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'umount {iso_mount_point} 2>/dev/null || true'",
|
|
shell=True,
|
|
check=False,
|
|
)
|
|
|
|
# Remove mount point if it exists
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rmdir {iso_mount_point} 2>/dev/null || true'",
|
|
shell=True,
|
|
check=False,
|
|
)
|
|
|
|
# Create mount point
|
|
logger.info(f"Creating mount point: {iso_mount_point}")
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'mkdir -p {iso_mount_point}'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
|
|
# Check if loop module is loaded
|
|
logger.info("Checking loop module availability...")
|
|
loop_check = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'lsmod | grep loop || modprobe loop'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
logger.info(f"Loop module check: {loop_check.stdout}")
|
|
|
|
# Mount ISO with more verbose output
|
|
logger.info(f"Mounting ISO: {ubuntu_iso_path} to {iso_mount_point}")
|
|
mount_result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'mount -o loop,ro {ubuntu_iso_path} {iso_mount_point}'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
|
|
if mount_result.returncode != 0:
|
|
logger.error(
|
|
f"Failed to mount ISO. Return code: {
|
|
mount_result.returncode}"
|
|
)
|
|
logger.error(f"STDOUT: {mount_result.stdout}")
|
|
logger.error(f"STDERR: {mount_result.stderr}")
|
|
return False
|
|
|
|
logger.info("ISO mounted successfully")
|
|
|
|
# Create directory for extracted kernel files
|
|
kernel_dir = f"/mnt/user/domains/{VM_NAME}/kernel"
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'mkdir -p {kernel_dir}'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
|
|
# Extract kernel and initrd
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'cp {iso_mount_point}/casper/vmlinuz {kernel_dir}/'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'cp {iso_mount_point}/casper/initrd {kernel_dir}/'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
|
|
# Unmount ISO
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'umount {iso_mount_point}'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
|
|
# Remove mount point
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rmdir {iso_mount_point}'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
|
|
logger.info("Ubuntu kernel and initrd extracted successfully")
|
|
return True
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to extract Ubuntu kernel: {e}")
|
|
# Clean up on failure
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'umount {iso_mount_point} 2>/dev/null || true'",
|
|
shell=True,
|
|
check=False,
|
|
)
|
|
return False
|
|
|
|
def fallback_to_uefi_boot(self) -> bool:
|
|
"""Fallback to UEFI boot when kernel extraction fails."""
|
|
try:
|
|
logger.info("Setting up fallback UEFI boot configuration...")
|
|
|
|
# First, detect available Ubuntu ISO for the fallback template
|
|
ubuntu_iso_path = self._detect_ubuntu_iso()
|
|
if not ubuntu_iso_path:
|
|
logger.error("Cannot create UEFI fallback without Ubuntu ISO")
|
|
return False
|
|
|
|
# Create a fallback VM XML template path
|
|
fallback_template_path = (
|
|
Path(__file__).parent / "thrillwiki-vm-uefi-fallback-template.xml"
|
|
)
|
|
|
|
# Create fallback UEFI template with detected Ubuntu ISO
|
|
logger.info(
|
|
f"Creating fallback UEFI template with detected ISO: {ubuntu_iso_path}"
|
|
)
|
|
uefi_template = f"""<?xml version='1.0' encoding='UTF-8'?>
|
|
<domain type='kvm'>
|
|
<name>{{VM_NAME}}</name>
|
|
<uuid>{{VM_UUID}}</uuid>
|
|
<metadata>
|
|
<vmtemplate xmlns="unraid" name="ThrillWiki VM" iconold="ubuntu.png" icon="ubuntu.png" os="linux" webui=""/>
|
|
</metadata>
|
|
<memory unit='KiB'>{{VM_MEMORY_KIB}}</memory>
|
|
<currentMemory unit='KiB'>{{VM_MEMORY_KIB}}</currentMemory>
|
|
<vcpu placement='static'>{{VM_VCPUS}}</vcpu>
|
|
<os>
|
|
<type arch='x86_64' machine='pc-q35-9.2'>hvm</type>
|
|
<loader readonly='yes' type='pflash'>/usr/share/qemu/ovmf-x64/OVMF_CODE-pure-efi.fd</loader>
|
|
<nvram>/etc/libvirt/qemu/nvram/{{VM_UUID}}_VARS-pure-efi.fd</nvram>
|
|
<boot dev='cdrom'/>
|
|
<boot dev='hd'/>
|
|
</os>
|
|
<features>
|
|
<acpi/>
|
|
<apic/>
|
|
<vmport state='off'/>
|
|
</features>
|
|
<cpu mode='host-passthrough' check='none' migratable='on'>
|
|
<topology sockets='1' dies='1' clusters='1' cores='{{CPU_CORES}}' threads='{{CPU_THREADS}}'/>
|
|
<cache mode='passthrough'/>
|
|
<feature policy='require' name='topoext'/>
|
|
</cpu>
|
|
<clock offset='utc'>
|
|
<timer name='hpet' present='no'/>
|
|
<timer name='hypervclock' present='yes'/>
|
|
<timer name='pit' tickpolicy='delay'/>
|
|
<timer name='rtc' tickpolicy='catchup'/>
|
|
</clock>
|
|
<on_poweroff>destroy</on_poweroff>
|
|
<on_reboot>restart</on_reboot>
|
|
<on_crash>restart</on_crash>
|
|
<pm>
|
|
<suspend-to-mem enabled='no'/>
|
|
<suspend-to-disk enabled='no'/>
|
|
</pm>
|
|
<devices>
|
|
<emulator>/usr/local/sbin/qemu</emulator>
|
|
<disk type='file' device='disk'>
|
|
<driver name='qemu' type='qcow2' cache='writeback' discard='ignore'/>
|
|
<source file='/mnt/user/domains/{VM_NAME}/vdisk1.qcow2'/>
|
|
<target dev='hdc' bus='virtio'/>
|
|
<boot order='2'/>
|
|
<address type='pci' domain='0x0000' bus='0x02' slot='0x00' function='0x0'/>
|
|
</disk>
|
|
<disk type='file' device='cdrom'>
|
|
<driver name='qemu' type='raw'/>
|
|
<source file='{ubuntu_iso_path}'/>
|
|
<target dev='hda' bus='sata'/>
|
|
<readonly/>
|
|
<boot order='1'/>
|
|
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
|
|
</disk>
|
|
<disk type='file' device='cdrom'>
|
|
<driver name='qemu' type='raw'/>
|
|
<source file='/mnt/user/isos/{VM_NAME}-cloud-init.iso'/>
|
|
<target dev='hdb' bus='sata'/>
|
|
<readonly/>
|
|
<address type='drive' controller='0' bus='0' target='0' unit='1'/>
|
|
</disk>
|
|
<controller type='usb' index='0' model='qemu-xhci' ports='15'>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
|
|
</controller>
|
|
<controller type='pci' index='0' model='pcie-root'/>
|
|
<controller type='pci' index='1' model='pcie-root-port'>
|
|
<model name='pcie-root-port'/>
|
|
<target chassis='1' port='0x10'/>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0' multifunction='on'/>
|
|
</controller>
|
|
<controller type='pci' index='2' model='pcie-root-port'>
|
|
<model name='pcie-root-port'/>
|
|
<target chassis='2' port='0x11'/>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x1'/>
|
|
</controller>
|
|
<controller type='pci' index='3' model='pcie-root-port'>
|
|
<model name='pcie-root-port'/>
|
|
<target chassis='3' port='0x12'/>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x2'/>
|
|
</controller>
|
|
<controller type='pci' index='4' model='pcie-root-port'>
|
|
<model name='pcie-root-port'/>
|
|
<target chassis='4' port='0x13'/>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x3'/>
|
|
</controller>
|
|
<controller type='pci' index='5' model='pcie-root-port'>
|
|
<model name='pcie-root-port'/>
|
|
<target chassis='5' port='0x14'/>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x4'/>
|
|
</controller>
|
|
<controller type='virtio-serial' index='0'>
|
|
<address type='pci' domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
|
|
</controller>
|
|
<controller type='sata' index='0'>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x1f' function='0x2'/>
|
|
</controller>
|
|
<interface type='bridge'>
|
|
<mac address='52:54:00:{{MAC_SUFFIX}}'/>
|
|
<source bridge='br0.20'/>
|
|
<model type='virtio'/>
|
|
<address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x0'/>
|
|
</interface>
|
|
<serial type='pty'>
|
|
<target type='isa-serial' port='0'>
|
|
<model name='isa-serial'/>
|
|
</target>
|
|
</serial>
|
|
<console type='pty'>
|
|
<target type='serial' port='0'/>
|
|
</console>
|
|
<channel type='unix'>
|
|
<target type='virtio' name='org.qemu.guest_agent.0'/>
|
|
<address type='virtio-serial' controller='0' bus='0' port='1'/>
|
|
</channel>
|
|
<input type='tablet' bus='usb'>
|
|
<address type='usb' bus='0' port='1'/>
|
|
</input>
|
|
<input type='mouse' bus='ps2'/>
|
|
<input type='keyboard' bus='ps2'/>
|
|
<graphics type='vnc' port='-1' autoport='yes' websocket='-1' listen='0.0.0.0' sharePolicy='ignore'>
|
|
<listen type='address' address='0.0.0.0'/>
|
|
</graphics>
|
|
<audio id='1' type='none'/>
|
|
<video>
|
|
<model type='qxl' ram='65536' vram='65536' vram64='65535' vgamem='65536' heads='1' primary='yes'/>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x1e' function='0x0'/>
|
|
</video>
|
|
<watchdog model='itco' action='reset'/>
|
|
<memballoon model='virtio'>
|
|
<address type='pci' domain='0x0000' bus='0x05' slot='0x00' function='0x0'/>
|
|
</memballoon>
|
|
</devices>
|
|
</domain>"""
|
|
|
|
with open(fallback_template_path, "w", encoding="utf-8") as f:
|
|
f.write(uefi_template)
|
|
|
|
logger.info(f"Created fallback UEFI template: {fallback_template_path}")
|
|
|
|
# Update the template path to use the fallback
|
|
original_template = Path(__file__).parent / "thrillwiki-vm-template.xml"
|
|
fallback_template = (
|
|
Path(__file__).parent / "thrillwiki-vm-uefi-fallback-template.xml"
|
|
)
|
|
|
|
# Backup original template and replace with fallback
|
|
if original_template.exists():
|
|
backup_path = (
|
|
Path(__file__).parent / "thrillwiki-vm-template.xml.backup"
|
|
)
|
|
original_template.rename(backup_path)
|
|
logger.info(f"Backed up original template to {backup_path}")
|
|
|
|
fallback_template.rename(original_template)
|
|
logger.info("Switched to UEFI fallback template")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to set up UEFI fallback: {e}")
|
|
return False
|
|
|
|
def create_nvram_file(self, vm_uuid: str) -> bool:
|
|
"""Create NVRAM file for UEFI VM."""
|
|
try:
|
|
nvram_path = f"/etc/libvirt/qemu/nvram/{vm_uuid}_VARS-pure-efi.fd"
|
|
|
|
# Check if NVRAM file already exists
|
|
result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'test -f {nvram_path}'",
|
|
shell=True,
|
|
capture_output=True,
|
|
)
|
|
|
|
if result.returncode == 0:
|
|
logger.info(f"NVRAM file already exists: {nvram_path}")
|
|
return True
|
|
|
|
# Copy template to create NVRAM file
|
|
logger.info(f"Creating NVRAM file: {nvram_path}")
|
|
result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'cp /usr/share/qemu/ovmf-x64/OVMF_VARS-pure-efi.fd {nvram_path}'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
|
|
if result.returncode == 0:
|
|
logger.info("NVRAM file created successfully")
|
|
return True
|
|
else:
|
|
logger.error(f"Failed to create NVRAM file: {result.stderr}")
|
|
return False
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error creating NVRAM file: {e}")
|
|
return False
|
|
|
|
def start_vm(self) -> bool:
|
|
"""Start the VM if it's not already running."""
|
|
try:
|
|
# Check if VM is already running
|
|
current_status = self.vm_status()
|
|
if current_status == "running":
|
|
logger.info(f"VM {VM_NAME} is already running")
|
|
return True
|
|
|
|
logger.info(f"Starting VM {VM_NAME}...")
|
|
|
|
# For new VMs, we need to extract the UUID and create NVRAM file
|
|
vm_exists = self.check_vm_exists()
|
|
if not vm_exists:
|
|
logger.error("Cannot start VM that doesn't exist")
|
|
return False
|
|
|
|
# Get VM UUID from XML
|
|
result = subprocess.run(
|
|
f'ssh {UNRAID_USER}@{UNRAID_HOST} \'virsh dumpxml {VM_NAME} | grep "<uuid>" | sed "s/<uuid>//g" | sed "s/<\\/uuid>//g" | tr -d " "\'',
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
|
|
if result.returncode == 0 and result.stdout.strip():
|
|
vm_uuid = result.stdout.strip()
|
|
logger.info(f"VM UUID: {vm_uuid}")
|
|
|
|
# Create NVRAM file if it doesn't exist
|
|
if not self.create_nvram_file(vm_uuid):
|
|
return False
|
|
|
|
result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh start {VM_NAME}'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
|
|
if result.returncode == 0:
|
|
logger.info(f"VM {VM_NAME} started successfully")
|
|
return True
|
|
else:
|
|
logger.error(f"Failed to start VM: {result.stderr}")
|
|
return False
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error starting VM: {e}")
|
|
return False
|
|
|
|
def stop_vm(self) -> bool:
|
|
"""Stop the VM with timeout and force destroy if needed."""
|
|
try:
|
|
logger.info(f"Stopping VM {VM_NAME}...")
|
|
|
|
# Try graceful shutdown first
|
|
result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh shutdown {VM_NAME}'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=10, # 10 second timeout for the command itself
|
|
)
|
|
|
|
if result.returncode == 0:
|
|
# Wait up to 30 seconds for graceful shutdown
|
|
logger.info(f"Waiting for VM {VM_NAME} to shutdown gracefully...")
|
|
for i in range(30):
|
|
status = self.vm_status()
|
|
if status in ["shut off", "unknown"]:
|
|
logger.info(f"VM {VM_NAME} stopped gracefully")
|
|
return True
|
|
time.sleep(1)
|
|
|
|
# If still running after 30 seconds, force destroy
|
|
logger.warning(
|
|
f"VM {VM_NAME} didn't shutdown gracefully, forcing destroy..."
|
|
)
|
|
destroy_result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh destroy {VM_NAME}'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=10,
|
|
)
|
|
|
|
if destroy_result.returncode == 0:
|
|
logger.info(f"VM {VM_NAME} forcefully destroyed")
|
|
return True
|
|
else:
|
|
logger.error(
|
|
f"Failed to destroy VM: {
|
|
destroy_result.stderr}"
|
|
)
|
|
return False
|
|
else:
|
|
logger.error(
|
|
f"Failed to initiate VM shutdown: {
|
|
result.stderr}"
|
|
)
|
|
return False
|
|
|
|
except subprocess.TimeoutExpired:
|
|
logger.error(f"Timeout stopping VM {VM_NAME}")
|
|
return False
|
|
except Exception as e:
|
|
logger.error(f"Error stopping VM: {e}")
|
|
return False
|
|
|
|
def get_vm_ip(self) -> Optional[str]:
|
|
"""Get VM IP address."""
|
|
try:
|
|
# Wait for VM to get IP - Ubuntu autoinstall can take 20-30 minutes
|
|
max_attempts = 120 # 20 minutes total wait time
|
|
for attempt in range(max_attempts):
|
|
result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh domifaddr {VM_NAME}'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
|
|
if result.returncode == 0 and "ipv4" in result.stdout:
|
|
lines = result.stdout.strip().split("\n")
|
|
for line in lines:
|
|
if "ipv4" in line:
|
|
# Extract IP from line like: vnet0
|
|
# 52:54:00:xx:xx:xx ipv4
|
|
# 192.168.1.100/24
|
|
parts = line.split()
|
|
if len(parts) >= 4:
|
|
ip_with_mask = parts[3]
|
|
ip = ip_with_mask.split("/")[0]
|
|
logger.info(f"VM IP address: {ip}")
|
|
return ip
|
|
|
|
logger.info(
|
|
f"Waiting for VM IP... (attempt {
|
|
attempt + 1}/{max_attempts}) - Ubuntu autoinstall in progress"
|
|
)
|
|
time.sleep(10)
|
|
|
|
logger.error("Failed to get VM IP address")
|
|
return None
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error getting VM IP: {e}")
|
|
return None
|
|
|
|
def create_cloud_init_iso(self, vm_ip: str) -> bool:
|
|
"""Create cloud-init ISO for automated Ubuntu installation with autoinstall support."""
|
|
try:
|
|
logger.info("Creating cloud-init ISO with Ubuntu autoinstall support...")
|
|
|
|
# Get environment variables
|
|
repo_url = os.getenv("REPO_URL", "")
|
|
ssh_public_key = os.getenv("SSH_PUBLIC_KEY", "")
|
|
|
|
# Read autoinstall user-data template
|
|
autoinstall_template_path = (
|
|
Path(__file__).parent / "autoinstall-user-data.yaml"
|
|
)
|
|
if not autoinstall_template_path.exists():
|
|
logger.error(
|
|
f"Autoinstall template not found at {autoinstall_template_path}"
|
|
)
|
|
return False
|
|
|
|
with open(autoinstall_template_path, "r", encoding="utf-8") as f:
|
|
autoinstall_template = f.read()
|
|
|
|
# Replace placeholders in autoinstall template
|
|
user_data = autoinstall_template.format(
|
|
SSH_PUBLIC_KEY=(
|
|
ssh_public_key if ssh_public_key else "# No SSH key provided"
|
|
),
|
|
GITHUB_REPO=repo_url if repo_url else "",
|
|
)
|
|
|
|
# Update network configuration in autoinstall based on VM_IP
|
|
# setting
|
|
if vm_ip.lower() == "dhcp":
|
|
# Replace the static network config with DHCP
|
|
user_data = user_data.replace("dhcp4: true", "dhcp4: true")
|
|
else:
|
|
# Update with static IP configuration
|
|
gateway = os.getenv("VM_GATEWAY", "192.168.20.1")
|
|
network_config = f"""dhcp4: false
|
|
addresses:
|
|
- {vm_ip}/24
|
|
gateway4: {gateway}
|
|
nameservers:
|
|
addresses:
|
|
- 8.8.8.8
|
|
- 8.8.4.4"""
|
|
user_data = user_data.replace("dhcp4: true", network_config)
|
|
|
|
# Force clean temp directory for cloud-init files
|
|
cloud_init_dir = "/tmp/cloud-init"
|
|
if os.path.exists(cloud_init_dir):
|
|
shutil.rmtree(cloud_init_dir)
|
|
os.makedirs(cloud_init_dir, exist_ok=True)
|
|
|
|
# Create server/ directory for autoinstall as per Ubuntu guide
|
|
server_dir = f"{cloud_init_dir}/server"
|
|
os.makedirs(server_dir, exist_ok=True)
|
|
|
|
# Create user-data file in server/ directory with autoinstall
|
|
# configuration
|
|
with open(f"{server_dir}/user-data", "w", encoding="utf-8") as f:
|
|
f.write(user_data)
|
|
|
|
# Create empty meta-data file in server/ directory as per Ubuntu
|
|
# guide
|
|
with open(f"{server_dir}/meta-data", "w", encoding="utf-8") as f:
|
|
f.write("")
|
|
|
|
# Create root level meta-data for cloud-init
|
|
meta_data = f"""instance-id: thrillwiki-vm-{int(time.time())}
|
|
local-hostname: thrillwiki-vm
|
|
"""
|
|
with open(f"{cloud_init_dir}/meta-data", "w", encoding="utf-8") as f:
|
|
f.write(meta_data)
|
|
|
|
# Create user-data at root level (minimal cloud-config)
|
|
root_user_data = """#cloud-config
|
|
# Root level cloud-config for compatibility
|
|
# Main autoinstall config is in /server/user-data
|
|
"""
|
|
with open(f"{cloud_init_dir}/user-data", "w", encoding="utf-8") as f:
|
|
f.write(root_user_data)
|
|
|
|
# Force remove old ISO first
|
|
iso_path = f"/tmp/{VM_NAME}-cloud-init.iso"
|
|
if os.path.exists(iso_path):
|
|
os.remove(iso_path)
|
|
logger.info(f"Removed old cloud-init ISO: {iso_path}")
|
|
|
|
# Try different ISO creation tools
|
|
iso_created = False
|
|
|
|
# Try genisoimage first
|
|
try:
|
|
subprocess.run(
|
|
[
|
|
"genisoimage",
|
|
"-output",
|
|
iso_path,
|
|
"-volid",
|
|
"cidata",
|
|
"-joliet",
|
|
"-rock",
|
|
cloud_init_dir,
|
|
],
|
|
check=True,
|
|
)
|
|
iso_created = True
|
|
except FileNotFoundError:
|
|
logger.warning("genisoimage not found, trying mkisofs...")
|
|
|
|
# Try mkisofs as fallback
|
|
if not iso_created:
|
|
try:
|
|
subprocess.run(
|
|
[
|
|
"mkisofs",
|
|
"-output",
|
|
iso_path,
|
|
"-volid",
|
|
"cidata",
|
|
"-joliet",
|
|
"-rock",
|
|
cloud_init_dir,
|
|
],
|
|
check=True,
|
|
)
|
|
iso_created = True
|
|
except FileNotFoundError:
|
|
logger.warning("mkisofs not found, trying hdiutil (macOS)...")
|
|
|
|
# Try hdiutil for macOS
|
|
if not iso_created:
|
|
try:
|
|
subprocess.run(
|
|
[
|
|
"hdiutil",
|
|
"makehybrid",
|
|
"-iso",
|
|
"-joliet",
|
|
"-o",
|
|
iso_path,
|
|
cloud_init_dir,
|
|
],
|
|
check=True,
|
|
)
|
|
iso_created = True
|
|
except FileNotFoundError:
|
|
logger.error(
|
|
"No ISO creation tool found. Please install genisoimage, mkisofs, or use macOS hdiutil"
|
|
)
|
|
return False
|
|
|
|
if not iso_created:
|
|
logger.error("Failed to create ISO with any available tool")
|
|
return False
|
|
|
|
# Force remove old ISO from Unraid first, then copy new one
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rm -f /mnt/user/isos/{VM_NAME}-cloud-init.iso'",
|
|
shell=True,
|
|
check=False, # Don't fail if file doesn't exist
|
|
)
|
|
logger.info(
|
|
f"Removed old cloud-init ISO from Unraid: /mnt/user/isos/{VM_NAME}-cloud-init.iso"
|
|
)
|
|
|
|
# Copy new ISO to Unraid
|
|
subprocess.run(
|
|
f"scp {iso_path} {UNRAID_USER}@{UNRAID_HOST}:/mnt/user/isos/",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
logger.info(
|
|
f"Copied new cloud-init ISO to Unraid: /mnt/user/isos/{VM_NAME}-cloud-init.iso"
|
|
)
|
|
|
|
logger.info("Cloud-init ISO created successfully")
|
|
return True
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to create cloud-init ISO: {e}")
|
|
return False
|
|
|
|
def vm_status(self) -> str:
|
|
"""Get VM status."""
|
|
try:
|
|
result = subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh domstate {VM_NAME}'",
|
|
shell=True,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
|
|
if result.returncode == 0:
|
|
return result.stdout.strip()
|
|
else:
|
|
return "unknown"
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error getting VM status: {e}")
|
|
return "error"
|
|
|
|
def delete_vm(self) -> bool:
|
|
"""Completely remove VM and all associated files."""
|
|
try:
|
|
logger.info(f"Deleting VM {VM_NAME} and all associated files...")
|
|
|
|
# Check if VM exists
|
|
if not self.check_vm_exists():
|
|
logger.info(f"VM {VM_NAME} does not exist")
|
|
return True
|
|
|
|
# Stop VM if running
|
|
if self.vm_status() == "running":
|
|
logger.info(f"Stopping VM {VM_NAME}...")
|
|
self.stop_vm()
|
|
import time
|
|
|
|
time.sleep(5)
|
|
|
|
# Undefine VM with NVRAM
|
|
logger.info(f"Undefining VM {VM_NAME}...")
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh undefine {VM_NAME} --nvram'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
|
|
# Remove VM directory and all files
|
|
logger.info(f"Removing VM directory and files...")
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rm -rf {self.vm_config_path}'",
|
|
shell=True,
|
|
check=True,
|
|
)
|
|
|
|
# Remove cloud-init ISO
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rm -f /mnt/user/isos/{VM_NAME}-cloud-init.iso'",
|
|
shell=True,
|
|
check=False, # Don't fail if file doesn't exist
|
|
)
|
|
|
|
# Remove extracted kernel files
|
|
subprocess.run(
|
|
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rm -rf /mnt/user/domains/{VM_NAME}/kernel'",
|
|
shell=True,
|
|
check=False, # Don't fail if directory doesn't exist
|
|
)
|
|
|
|
logger.info(f"VM {VM_NAME} completely removed")
|
|
return True
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to delete VM: {e}")
|
|
return False
|
|
|
|
|
|
def main():
|
|
"""Main function."""
|
|
import argparse
|
|
|
|
parser = argparse.ArgumentParser(description="Unraid VM Manager for ThrillWiki")
|
|
parser.add_argument(
|
|
"action",
|
|
choices=["create", "start", "stop", "status", "ip", "setup", "delete"],
|
|
help="Action to perform",
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
|
|
# Create logs directory
|
|
os.makedirs("logs", exist_ok=True)
|
|
|
|
vm_manager = UnraidVMManager()
|
|
|
|
if args.action == "create":
|
|
success = vm_manager.create_vm()
|
|
sys.exit(0 if success else 1)
|
|
|
|
elif args.action == "start":
|
|
success = vm_manager.start_vm()
|
|
sys.exit(0 if success else 1)
|
|
|
|
elif args.action == "stop":
|
|
success = vm_manager.stop_vm()
|
|
sys.exit(0 if success else 1)
|
|
|
|
elif args.action == "status":
|
|
status = vm_manager.vm_status()
|
|
print(f"VM Status: {status}")
|
|
sys.exit(0)
|
|
|
|
elif args.action == "ip":
|
|
ip = vm_manager.get_vm_ip()
|
|
if ip:
|
|
print(f"VM IP: {ip}")
|
|
sys.exit(0)
|
|
else:
|
|
print("Failed to get VM IP")
|
|
sys.exit(1)
|
|
|
|
elif args.action == "setup":
|
|
logger.info("Setting up complete VM environment...")
|
|
|
|
# Create VM
|
|
if not vm_manager.create_vm():
|
|
sys.exit(1)
|
|
|
|
# Start VM
|
|
if not vm_manager.start_vm():
|
|
sys.exit(1)
|
|
|
|
# Get IP
|
|
vm_ip = vm_manager.get_vm_ip()
|
|
if not vm_ip:
|
|
sys.exit(1)
|
|
|
|
print(f"VM setup complete. IP: {vm_ip}")
|
|
print("You can now connect via SSH and complete the ThrillWiki setup.")
|
|
|
|
sys.exit(0)
|
|
|
|
elif args.action == "delete":
|
|
success = vm_manager.delete_vm()
|
|
sys.exit(0 if success else 1)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|