mirror of
https://github.com/pacnpal/thrillwiki_django_no_react.git
synced 2025-12-21 02:31:08 -05:00
Refactor test utilities and enhance ASGI settings
- Cleaned up and standardized assertions in ApiTestMixin for API response validation. - Updated ASGI settings to use os.environ for setting the DJANGO_SETTINGS_MODULE. - Removed unused imports and improved formatting in settings.py. - Refactored URL patterns in urls.py for better readability and organization. - Enhanced view functions in views.py for consistency and clarity. - Added .flake8 configuration for linting and style enforcement. - Introduced type stubs for django-environ to improve type checking with Pylance.
This commit is contained in:
@@ -10,7 +10,6 @@ Follows the Ubuntu autoinstall guide exactly:
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import subprocess
|
||||
import tempfile
|
||||
@@ -26,7 +25,7 @@ UBUNTU_MIRRORS = [
|
||||
"https://releases.ubuntu.com", # Official Ubuntu releases (primary)
|
||||
"http://archive.ubuntu.com/ubuntu-releases", # Official archive
|
||||
"http://mirror.csclub.uwaterloo.ca/ubuntu-releases", # University of Waterloo
|
||||
"http://mirror.math.princeton.edu/pub/ubuntu-releases" # Princeton mirror
|
||||
"http://mirror.math.princeton.edu/pub/ubuntu-releases", # Princeton mirror
|
||||
]
|
||||
UBUNTU_24_04_ISO = "24.04/ubuntu-24.04.3-live-server-amd64.iso"
|
||||
UBUNTU_22_04_ISO = "22.04/ubuntu-22.04.3-live-server-amd64.iso"
|
||||
@@ -36,28 +35,30 @@ def get_latest_ubuntu_server_iso(version: str) -> Optional[str]:
|
||||
"""Dynamically find the latest point release for a given Ubuntu version."""
|
||||
try:
|
||||
import re
|
||||
|
||||
for mirror in UBUNTU_MIRRORS:
|
||||
try:
|
||||
url = f"{mirror}/{version}/"
|
||||
response = urllib.request.urlopen(url, timeout=10)
|
||||
content = response.read().decode('utf-8')
|
||||
|
||||
content = response.read().decode("utf-8")
|
||||
|
||||
# Find all server ISO files for this version
|
||||
pattern = rf'ubuntu-{re.escape(version)}\.[0-9]+-live-server-amd64\.iso'
|
||||
pattern = rf"ubuntu-{
|
||||
re.escape(version)}\.[0-9]+-live-server-amd64\.iso"
|
||||
matches = re.findall(pattern, content)
|
||||
|
||||
|
||||
if matches:
|
||||
# Sort by version and return the latest
|
||||
matches.sort(key=lambda x: [int(n) for n in re.findall(r'\d+', x)])
|
||||
matches.sort(key=lambda x: [int(n) for n in re.findall(r"\d+", x)])
|
||||
latest_iso = matches[-1]
|
||||
return f"{version}/{latest_iso}"
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to check {mirror}/{version}/: {e}")
|
||||
continue
|
||||
|
||||
|
||||
logger.warning(f"Could not dynamically detect latest ISO for Ubuntu {version}")
|
||||
return None
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in dynamic ISO detection: {e}")
|
||||
return None
|
||||
@@ -65,61 +66,74 @@ def get_latest_ubuntu_server_iso(version: str) -> Optional[str]:
|
||||
|
||||
class UbuntuISOBuilder:
|
||||
"""Builds modified Ubuntu ISO with autoinstall configuration."""
|
||||
|
||||
|
||||
def __init__(self, vm_name: str, work_dir: Optional[str] = None):
|
||||
self.vm_name = vm_name
|
||||
self.work_dir = Path(work_dir) if work_dir else Path(tempfile.mkdtemp(prefix="ubuntu-autoinstall-"))
|
||||
self.work_dir = (
|
||||
Path(work_dir)
|
||||
if work_dir
|
||||
else Path(tempfile.mkdtemp(prefix="ubuntu-autoinstall-"))
|
||||
)
|
||||
self.source_files_dir = self.work_dir / "source-files"
|
||||
self.boot_dir = self.work_dir / "BOOT"
|
||||
self.server_dir = self.source_files_dir / "server"
|
||||
self.grub_cfg_path = self.source_files_dir / "boot" / "grub" / "grub.cfg"
|
||||
|
||||
|
||||
# Ensure directories exist
|
||||
self.work_dir.mkdir(exist_ok=True, parents=True)
|
||||
self.source_files_dir.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
|
||||
def check_tools(self) -> bool:
|
||||
"""Check if required tools are available."""
|
||||
required_tools = []
|
||||
|
||||
|
||||
# Check for 7zip equivalent (p7zip on macOS/Linux)
|
||||
if not shutil.which("7z") and not shutil.which("7za"):
|
||||
logger.error("7zip not found. Install with: brew install p7zip (macOS) or apt install p7zip-full (Ubuntu)")
|
||||
logger.error(
|
||||
"7zip not found. Install with: brew install p7zip (macOS) or apt install p7zip-full (Ubuntu)"
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
# Check for xorriso equivalent
|
||||
if not shutil.which("xorriso") and not shutil.which("mkisofs") and not shutil.which("hdiutil"):
|
||||
logger.error("No ISO creation tool found. Install xorriso, mkisofs, or use macOS hdiutil")
|
||||
if (
|
||||
not shutil.which("xorriso")
|
||||
and not shutil.which("mkisofs")
|
||||
and not shutil.which("hdiutil")
|
||||
):
|
||||
logger.error(
|
||||
"No ISO creation tool found. Install xorriso, mkisofs, or use macOS hdiutil"
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def download_ubuntu_iso(self, version: str = "24.04") -> Path:
|
||||
"""Download Ubuntu ISO if not already present, trying multiple mirrors."""
|
||||
iso_filename = f"ubuntu-{version}-live-server-amd64.iso"
|
||||
iso_path = self.work_dir / iso_filename
|
||||
|
||||
|
||||
if iso_path.exists():
|
||||
logger.info(f"Ubuntu ISO already exists: {iso_path}")
|
||||
return iso_path
|
||||
|
||||
|
||||
if version == "24.04":
|
||||
iso_subpath = UBUNTU_24_04_ISO
|
||||
elif version == "22.04":
|
||||
iso_subpath = UBUNTU_22_04_ISO
|
||||
else:
|
||||
raise ValueError(f"Unsupported Ubuntu version: {version}")
|
||||
|
||||
|
||||
# Try each mirror until one works
|
||||
last_error = None
|
||||
for mirror in UBUNTU_MIRRORS:
|
||||
iso_url = f"{mirror}/{iso_subpath}"
|
||||
logger.info(f"Trying to download Ubuntu {version} ISO from {iso_url}")
|
||||
|
||||
|
||||
try:
|
||||
# Try downloading from this mirror
|
||||
urllib.request.urlretrieve(iso_url, iso_path)
|
||||
logger.info(f"✅ Ubuntu ISO downloaded successfully from {mirror}: {iso_path}")
|
||||
logger.info(
|
||||
f"✅ Ubuntu ISO downloaded successfully from {mirror}: {iso_path}"
|
||||
)
|
||||
return iso_path
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
@@ -128,27 +142,37 @@ class UbuntuISOBuilder:
|
||||
if iso_path.exists():
|
||||
iso_path.unlink()
|
||||
continue
|
||||
|
||||
|
||||
# If we get here, all mirrors failed
|
||||
logger.error(f"Failed to download Ubuntu ISO from all mirrors. Last error: {last_error}")
|
||||
logger.error(
|
||||
f"Failed to download Ubuntu ISO from all mirrors. Last error: {last_error}"
|
||||
)
|
||||
raise last_error
|
||||
|
||||
|
||||
def extract_iso(self, iso_path: Path) -> bool:
|
||||
"""Extract Ubuntu ISO following the guide."""
|
||||
logger.info(f"Extracting ISO: {iso_path}")
|
||||
|
||||
|
||||
# Use 7z to extract ISO
|
||||
seven_zip_cmd = "7z" if shutil.which("7z") else "7za"
|
||||
|
||||
|
||||
try:
|
||||
# Extract ISO: 7z -y x ubuntu.iso -osource-files
|
||||
result = subprocess.run([
|
||||
seven_zip_cmd, "-y", "x", str(iso_path),
|
||||
f"-o{self.source_files_dir}"
|
||||
], capture_output=True, text=True, check=True)
|
||||
|
||||
subprocess.run(
|
||||
[
|
||||
seven_zip_cmd,
|
||||
"-y",
|
||||
"x",
|
||||
str(iso_path),
|
||||
f"-o{self.source_files_dir}",
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
logger.info("ISO extracted successfully")
|
||||
|
||||
|
||||
# Move [BOOT] directory as per guide: mv '[BOOT]' ../BOOT
|
||||
boot_source = self.source_files_dir / "[BOOT]"
|
||||
if boot_source.exists():
|
||||
@@ -156,249 +180,304 @@ class UbuntuISOBuilder:
|
||||
logger.info(f"Moved [BOOT] directory to {self.boot_dir}")
|
||||
else:
|
||||
logger.warning("[BOOT] directory not found in extracted files")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to extract ISO: {e.stderr}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting ISO: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def modify_grub_config(self) -> bool:
|
||||
"""Modify GRUB configuration to add autoinstall menu entry."""
|
||||
logger.info("Modifying GRUB configuration...")
|
||||
|
||||
|
||||
if not self.grub_cfg_path.exists():
|
||||
logger.error(f"GRUB config not found: {self.grub_cfg_path}")
|
||||
return False
|
||||
|
||||
|
||||
try:
|
||||
# Read existing GRUB config
|
||||
with open(self.grub_cfg_path, 'r', encoding='utf-8') as f:
|
||||
with open(self.grub_cfg_path, "r", encoding="utf-8") as f:
|
||||
grub_content = f.read()
|
||||
|
||||
|
||||
# Autoinstall menu entry as per guide
|
||||
autoinstall_entry = '''menuentry "Autoinstall Ubuntu Server" {
|
||||
autoinstall_entry = """menuentry "Autoinstall Ubuntu Server" {
|
||||
set gfxpayload=keep
|
||||
linux /casper/vmlinuz quiet autoinstall ds=nocloud\\;s=/cdrom/server/ ---
|
||||
initrd /casper/initrd
|
||||
}
|
||||
|
||||
'''
|
||||
|
||||
"""
|
||||
|
||||
# Insert autoinstall entry at the beginning of menu entries
|
||||
# Find the first menuentry and insert before it
|
||||
import re
|
||||
|
||||
first_menu_match = re.search(r'(menuentry\s+["\'])', grub_content)
|
||||
if first_menu_match:
|
||||
insert_pos = first_menu_match.start()
|
||||
modified_content = (
|
||||
grub_content[:insert_pos] +
|
||||
autoinstall_entry +
|
||||
grub_content[insert_pos:]
|
||||
grub_content[:insert_pos]
|
||||
+ autoinstall_entry
|
||||
+ grub_content[insert_pos:]
|
||||
)
|
||||
else:
|
||||
# Fallback: append at the end
|
||||
modified_content = grub_content + "\n" + autoinstall_entry
|
||||
|
||||
|
||||
# Write modified GRUB config
|
||||
with open(self.grub_cfg_path, 'w', encoding='utf-8') as f:
|
||||
with open(self.grub_cfg_path, "w", encoding="utf-8") as f:
|
||||
f.write(modified_content)
|
||||
|
||||
|
||||
logger.info("GRUB configuration modified successfully")
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to modify GRUB config: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def create_autoinstall_config(self, user_data: str) -> bool:
|
||||
"""Create autoinstall configuration in server/ directory."""
|
||||
logger.info("Creating autoinstall configuration...")
|
||||
|
||||
|
||||
try:
|
||||
# Create server directory
|
||||
self.server_dir.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
|
||||
# Create empty meta-data file (as per guide)
|
||||
meta_data_path = self.server_dir / "meta-data"
|
||||
meta_data_path.touch()
|
||||
logger.info(f"Created empty meta-data: {meta_data_path}")
|
||||
|
||||
|
||||
# Create user-data file with autoinstall configuration
|
||||
user_data_path = self.server_dir / "user-data"
|
||||
with open(user_data_path, 'w', encoding='utf-8') as f:
|
||||
with open(user_data_path, "w", encoding="utf-8") as f:
|
||||
f.write(user_data)
|
||||
logger.info(f"Created user-data: {user_data_path}")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create autoinstall config: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def rebuild_iso(self, output_path: Path) -> bool:
|
||||
"""Rebuild ISO with autoinstall configuration using xorriso."""
|
||||
logger.info(f"Rebuilding ISO: {output_path}")
|
||||
|
||||
|
||||
try:
|
||||
# Change to source-files directory for xorriso command
|
||||
original_cwd = os.getcwd()
|
||||
os.chdir(self.source_files_dir)
|
||||
|
||||
|
||||
# Remove existing output file
|
||||
if output_path.exists():
|
||||
output_path.unlink()
|
||||
|
||||
|
||||
# Try different ISO creation methods in order of preference
|
||||
success = False
|
||||
|
||||
|
||||
# Method 1: xorriso (most complete)
|
||||
if shutil.which("xorriso") and not success:
|
||||
try:
|
||||
logger.info("Trying xorriso method...")
|
||||
cmd = [
|
||||
"xorriso", "-as", "mkisofs", "-r",
|
||||
"-V", f"Ubuntu 24.04 LTS AUTO (EFIBIOS)",
|
||||
"-o", str(output_path),
|
||||
"--grub2-mbr", f"..{os.sep}BOOT{os.sep}1-Boot-NoEmul.img",
|
||||
"-partition_offset", "16",
|
||||
"xorriso",
|
||||
"-as",
|
||||
"mkisofs",
|
||||
"-r",
|
||||
"-V",
|
||||
f"Ubuntu 24.04 LTS AUTO (EFIBIOS)",
|
||||
"-o",
|
||||
str(output_path),
|
||||
"--grub2-mbr",
|
||||
f"..{os.sep}BOOT{os.sep}1-Boot-NoEmul.img",
|
||||
"-partition_offset",
|
||||
"16",
|
||||
"--mbr-force-bootable",
|
||||
"-append_partition", "2", "28732ac11ff8d211ba4b00a0c93ec93b",
|
||||
"-append_partition",
|
||||
"2",
|
||||
"28732ac11ff8d211ba4b00a0c93ec93b",
|
||||
f"..{os.sep}BOOT{os.sep}2-Boot-NoEmul.img",
|
||||
"-appended_part_as_gpt",
|
||||
"-iso_mbr_part_type", "a2a0d0ebe5b9334487c068b6b72699c7",
|
||||
"-c", "/boot.catalog",
|
||||
"-b", "/boot/grub/i386-pc/eltorito.img",
|
||||
"-no-emul-boot", "-boot-load-size", "4", "-boot-info-table", "--grub2-boot-info",
|
||||
"-eltorito-alt-boot",
|
||||
"-e", "--interval:appended_partition_2:::",
|
||||
"-iso_mbr_part_type",
|
||||
"a2a0d0ebe5b9334487c068b6b72699c7",
|
||||
"-c",
|
||||
"/boot.catalog",
|
||||
"-b",
|
||||
"/boot/grub/i386-pc/eltorito.img",
|
||||
"-no-emul-boot",
|
||||
"."
|
||||
"-boot-load-size",
|
||||
"4",
|
||||
"-boot-info-table",
|
||||
"--grub2-boot-info",
|
||||
"-eltorito-alt-boot",
|
||||
"-e",
|
||||
"--interval:appended_partition_2:::",
|
||||
"-no-emul-boot",
|
||||
".",
|
||||
]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
success = True
|
||||
logger.info("✅ ISO created with xorriso")
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.warning(f"xorriso failed: {e.stderr}")
|
||||
if output_path.exists():
|
||||
output_path.unlink()
|
||||
|
||||
|
||||
# Method 2: mkisofs with joliet-long
|
||||
if shutil.which("mkisofs") and not success:
|
||||
try:
|
||||
logger.info("Trying mkisofs with joliet-long...")
|
||||
cmd = [
|
||||
"mkisofs", "-r", "-V", f"Ubuntu 24.04 LTS AUTO",
|
||||
"-cache-inodes", "-J", "-joliet-long", "-l",
|
||||
"-b", "boot/grub/i386-pc/eltorito.img",
|
||||
"-c", "boot.catalog",
|
||||
"-no-emul-boot", "-boot-load-size", "4", "-boot-info-table",
|
||||
"-o", str(output_path),
|
||||
"."
|
||||
"mkisofs",
|
||||
"-r",
|
||||
"-V",
|
||||
f"Ubuntu 24.04 LTS AUTO",
|
||||
"-cache-inodes",
|
||||
"-J",
|
||||
"-joliet-long",
|
||||
"-l",
|
||||
"-b",
|
||||
"boot/grub/i386-pc/eltorito.img",
|
||||
"-c",
|
||||
"boot.catalog",
|
||||
"-no-emul-boot",
|
||||
"-boot-load-size",
|
||||
"4",
|
||||
"-boot-info-table",
|
||||
"-o",
|
||||
str(output_path),
|
||||
".",
|
||||
]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
success = True
|
||||
logger.info("✅ ISO created with mkisofs (joliet-long)")
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.warning(f"mkisofs with joliet-long failed: {e.stderr}")
|
||||
if output_path.exists():
|
||||
output_path.unlink()
|
||||
|
||||
|
||||
# Method 3: mkisofs without Joliet (fallback)
|
||||
if shutil.which("mkisofs") and not success:
|
||||
try:
|
||||
logger.info("Trying mkisofs without Joliet (fallback)...")
|
||||
cmd = [
|
||||
"mkisofs", "-r", "-V", f"Ubuntu 24.04 LTS AUTO",
|
||||
"-cache-inodes", "-l", # No -J (Joliet) to avoid filename conflicts
|
||||
"-b", "boot/grub/i386-pc/eltorito.img",
|
||||
"-c", "boot.catalog",
|
||||
"-no-emul-boot", "-boot-load-size", "4", "-boot-info-table",
|
||||
"-o", str(output_path),
|
||||
"."
|
||||
"mkisofs",
|
||||
"-r",
|
||||
"-V",
|
||||
f"Ubuntu 24.04 LTS AUTO",
|
||||
"-cache-inodes",
|
||||
"-l", # No -J (Joliet) to avoid filename conflicts
|
||||
"-b",
|
||||
"boot/grub/i386-pc/eltorito.img",
|
||||
"-c",
|
||||
"boot.catalog",
|
||||
"-no-emul-boot",
|
||||
"-boot-load-size",
|
||||
"4",
|
||||
"-boot-info-table",
|
||||
"-o",
|
||||
str(output_path),
|
||||
".",
|
||||
]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
success = True
|
||||
logger.info("✅ ISO created with mkisofs (no Joliet)")
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.warning(f"mkisofs without Joliet failed: {e.stderr}")
|
||||
logger.warning(
|
||||
f"mkisofs without Joliet failed: {
|
||||
e.stderr}"
|
||||
)
|
||||
if output_path.exists():
|
||||
output_path.unlink()
|
||||
|
||||
|
||||
# Method 4: macOS hdiutil
|
||||
if shutil.which("hdiutil") and not success:
|
||||
try:
|
||||
logger.info("Trying hdiutil (macOS)...")
|
||||
cmd = [
|
||||
"hdiutil", "makehybrid", "-iso", "-joliet", "-o", str(output_path), "."
|
||||
"hdiutil",
|
||||
"makehybrid",
|
||||
"-iso",
|
||||
"-joliet",
|
||||
"-o",
|
||||
str(output_path),
|
||||
".",
|
||||
]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
success = True
|
||||
logger.info("✅ ISO created with hdiutil")
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.warning(f"hdiutil failed: {e.stderr}")
|
||||
if output_path.exists():
|
||||
output_path.unlink()
|
||||
|
||||
|
||||
if not success:
|
||||
logger.error("All ISO creation methods failed")
|
||||
return False
|
||||
|
||||
|
||||
# Verify the output file was created
|
||||
if not output_path.exists():
|
||||
logger.error("ISO file was not created despite success message")
|
||||
return False
|
||||
|
||||
|
||||
logger.info(f"ISO rebuilt successfully: {output_path}")
|
||||
logger.info(f"ISO size: {output_path.stat().st_size / (1024*1024):.1f} MB")
|
||||
logger.info(
|
||||
f"ISO size: {output_path.stat().st_size / (1024 * 1024):.1f} MB"
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error rebuilding ISO: {e}")
|
||||
return False
|
||||
finally:
|
||||
# Return to original directory
|
||||
os.chdir(original_cwd)
|
||||
|
||||
def build_autoinstall_iso(self, user_data: str, output_path: Path, ubuntu_version: str = "24.04") -> bool:
|
||||
|
||||
def build_autoinstall_iso(
|
||||
self, user_data: str, output_path: Path, ubuntu_version: str = "24.04"
|
||||
) -> bool:
|
||||
"""Complete ISO build process following the Ubuntu autoinstall guide."""
|
||||
logger.info(f"🚀 Starting Ubuntu {ubuntu_version} autoinstall ISO build process")
|
||||
|
||||
logger.info(
|
||||
f"🚀 Starting Ubuntu {ubuntu_version} autoinstall ISO build process"
|
||||
)
|
||||
|
||||
try:
|
||||
# Step 1: Check tools
|
||||
if not self.check_tools():
|
||||
return False
|
||||
|
||||
|
||||
# Step 2: Download Ubuntu ISO
|
||||
iso_path = self.download_ubuntu_iso(ubuntu_version)
|
||||
|
||||
# Step 3: Extract ISO
|
||||
|
||||
# Step 3: Extract ISO
|
||||
if not self.extract_iso(iso_path):
|
||||
return False
|
||||
|
||||
|
||||
# Step 4: Modify GRUB
|
||||
if not self.modify_grub_config():
|
||||
return False
|
||||
|
||||
|
||||
# Step 5: Create autoinstall config
|
||||
if not self.create_autoinstall_config(user_data):
|
||||
return False
|
||||
|
||||
|
||||
# Step 6: Rebuild ISO
|
||||
if not self.rebuild_iso(output_path):
|
||||
return False
|
||||
|
||||
|
||||
logger.info(f"🎉 Successfully created autoinstall ISO: {output_path}")
|
||||
logger.info(f"📁 Work directory: {self.work_dir}")
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to build autoinstall ISO: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def cleanup(self):
|
||||
"""Clean up temporary work directory."""
|
||||
if self.work_dir.exists():
|
||||
@@ -409,8 +488,9 @@ class UbuntuISOBuilder:
|
||||
def main():
|
||||
"""Test the ISO builder."""
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
|
||||
# Sample autoinstall user-data
|
||||
user_data = """#cloud-config
|
||||
autoinstall:
|
||||
@@ -433,16 +513,16 @@ autoinstall:
|
||||
late-commands:
|
||||
- curtin in-target -- apt-get autoremove -y
|
||||
"""
|
||||
|
||||
|
||||
builder = UbuntuISOBuilder("test-vm")
|
||||
output_path = Path("/tmp/ubuntu-24.04-autoinstall.iso")
|
||||
|
||||
|
||||
success = builder.build_autoinstall_iso(user_data, output_path)
|
||||
if success:
|
||||
print(f"✅ ISO created: {output_path}")
|
||||
else:
|
||||
print("❌ ISO creation failed")
|
||||
|
||||
|
||||
# Optionally clean up
|
||||
# builder.cleanup()
|
||||
|
||||
|
||||
@@ -3,79 +3,79 @@
|
||||
Unraid VM Manager for ThrillWiki - Main Orchestrator
|
||||
Follows the Ubuntu autoinstall guide exactly:
|
||||
1. Creates modified Ubuntu ISO with autoinstall configuration
|
||||
2. Manages VM lifecycle on Unraid server
|
||||
2. Manages VM lifecycle on Unraid server
|
||||
3. Handles ThrillWiki deployment automation
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# Import our modular components
|
||||
from iso_builder import UbuntuISOBuilder
|
||||
from vm_manager import UnraidVMManager
|
||||
|
||||
# Configuration
|
||||
UNRAID_HOST = os***REMOVED***iron.get("UNRAID_HOST", "localhost")
|
||||
UNRAID_USER = os***REMOVED***iron.get("UNRAID_USER", "root")
|
||||
VM_NAME = os***REMOVED***iron.get("VM_NAME", "thrillwiki-vm")
|
||||
VM_MEMORY = int(os***REMOVED***iron.get("VM_MEMORY", 4096)) # MB
|
||||
VM_VCPUS = int(os***REMOVED***iron.get("VM_VCPUS", 2))
|
||||
VM_DISK_SIZE = int(os***REMOVED***iron.get("VM_DISK_SIZE", 50)) # GB
|
||||
SSH_PUBLIC_KEY = os***REMOVED***iron.get("SSH_PUBLIC_KEY", "")
|
||||
UNRAID_HOST = os.environ.get("UNRAID_HOST", "localhost")
|
||||
UNRAID_USER = os.environ.get("UNRAID_USER", "root")
|
||||
VM_NAME = os.environ.get("VM_NAME", "thrillwiki-vm")
|
||||
VM_MEMORY = int(os.environ.get("VM_MEMORY", 4096)) # MB
|
||||
VM_VCPUS = int(os.environ.get("VM_VCPUS", 2))
|
||||
VM_DISK_SIZE = int(os.environ.get("VM_DISK_SIZE", 50)) # GB
|
||||
SSH_PUBLIC_KEY = os.environ.get("SSH_PUBLIC_KEY", "")
|
||||
|
||||
# Network Configuration
|
||||
VM_IP = os***REMOVED***iron.get("VM_IP", "dhcp")
|
||||
VM_GATEWAY = os***REMOVED***iron.get("VM_GATEWAY", "192.168.20.1")
|
||||
VM_NETMASK = os***REMOVED***iron.get("VM_NETMASK", "255.255.255.0")
|
||||
VM_NETWORK = os***REMOVED***iron.get("VM_NETWORK", "192.168.20.0/24")
|
||||
VM_IP = os.environ.get("VM_IP", "dhcp")
|
||||
VM_GATEWAY = os.environ.get("VM_GATEWAY", "192.168.20.1")
|
||||
VM_NETMASK = os.environ.get("VM_NETMASK", "255.255.255.0")
|
||||
VM_NETWORK = os.environ.get("VM_NETWORK", "192.168.20.0/24")
|
||||
|
||||
# GitHub Configuration
|
||||
REPO_URL = os***REMOVED***iron.get("REPO_URL", "")
|
||||
GITHUB_USERNAME = os***REMOVED***iron.get("GITHUB_USERNAME", "")
|
||||
GITHUB_TOKEN = os***REMOVED***iron.get("GITHUB_TOKEN", "")
|
||||
REPO_URL = os.environ.get("REPO_URL", "")
|
||||
GITHUB_USERNAME = os.environ.get("GITHUB_USERNAME", "")
|
||||
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN", "")
|
||||
|
||||
# Ubuntu version preference
|
||||
UBUNTU_VERSION = os***REMOVED***iron.get("UBUNTU_VERSION", "24.04")
|
||||
UBUNTU_VERSION = os.environ.get("UBUNTU_VERSION", "24.04")
|
||||
|
||||
# Setup logging
|
||||
os.makedirs("logs", exist_ok=True)
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||
handlers=[logging.FileHandler("logs/unraid-vm.log"), logging.StreamHandler()],
|
||||
handlers=[
|
||||
logging.FileHandler("logs/unraid-vm.log"),
|
||||
logging.StreamHandler(),
|
||||
],
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ThrillWikiVMOrchestrator:
|
||||
"""Main orchestrator for ThrillWiki VM deployment."""
|
||||
|
||||
|
||||
def __init__(self):
|
||||
self.vm_manager = UnraidVMManager(VM_NAME, UNRAID_HOST, UNRAID_USER)
|
||||
self.iso_builder = None
|
||||
|
||||
|
||||
def create_autoinstall_user_data(self) -> str:
|
||||
"""Create autoinstall user-data configuration."""
|
||||
# Read autoinstall template
|
||||
template_path = Path(__file__).parent / "autoinstall-user-data.yaml"
|
||||
if not template_path.exists():
|
||||
raise FileNotFoundError(f"Autoinstall template not found: {template_path}")
|
||||
|
||||
with open(template_path, 'r', encoding='utf-8') as f:
|
||||
|
||||
with open(template_path, "r", encoding="utf-8") as f:
|
||||
template = f.read()
|
||||
|
||||
# Replace placeholders using string replacement (avoiding .format() due to curly braces in YAML)
|
||||
|
||||
# Replace placeholders using string replacement (avoiding .format() due
|
||||
# to curly braces in YAML)
|
||||
user_data = template.replace(
|
||||
"{SSH_PUBLIC_KEY}", SSH_PUBLIC_KEY if SSH_PUBLIC_KEY else "# No SSH key provided"
|
||||
).replace(
|
||||
"{GITHUB_REPO}", REPO_URL if REPO_URL else ""
|
||||
)
|
||||
|
||||
"{SSH_PUBLIC_KEY}",
|
||||
SSH_PUBLIC_KEY if SSH_PUBLIC_KEY else "# No SSH key provided",
|
||||
).replace("{GITHUB_REPO}", REPO_URL if REPO_URL else "")
|
||||
|
||||
# Update network configuration based on VM_IP setting
|
||||
if VM_IP.lower() == "dhcp":
|
||||
# Keep DHCP configuration as-is
|
||||
@@ -91,74 +91,74 @@ class ThrillWikiVMOrchestrator:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4"""
|
||||
user_data = user_data.replace("dhcp4: true", network_config)
|
||||
|
||||
|
||||
return user_data
|
||||
|
||||
|
||||
def build_autoinstall_iso(self) -> Path:
|
||||
"""Build Ubuntu autoinstall ISO following the guide."""
|
||||
logger.info("🔨 Building Ubuntu autoinstall ISO...")
|
||||
|
||||
|
||||
# Create ISO builder
|
||||
self.iso_builder = UbuntuISOBuilder(VM_NAME)
|
||||
|
||||
|
||||
# Create user-data configuration
|
||||
user_data = self.create_autoinstall_user_data()
|
||||
|
||||
|
||||
# Build autoinstall ISO
|
||||
iso_output_path = Path(f"/tmp/{VM_NAME}-ubuntu-autoinstall.iso")
|
||||
|
||||
|
||||
success = self.iso_builder.build_autoinstall_iso(
|
||||
user_data=user_data,
|
||||
output_path=iso_output_path,
|
||||
ubuntu_version=UBUNTU_VERSION
|
||||
ubuntu_version=UBUNTU_VERSION,
|
||||
)
|
||||
|
||||
|
||||
if not success:
|
||||
raise RuntimeError("Failed to build autoinstall ISO")
|
||||
|
||||
|
||||
logger.info(f"✅ Autoinstall ISO built successfully: {iso_output_path}")
|
||||
return iso_output_path
|
||||
|
||||
|
||||
def deploy_vm(self) -> bool:
|
||||
"""Complete VM deployment process."""
|
||||
try:
|
||||
logger.info("🚀 Starting ThrillWiki VM deployment...")
|
||||
|
||||
|
||||
# Step 1: Check SSH connectivity
|
||||
logger.info("📡 Testing Unraid connectivity...")
|
||||
if not self.vm_manager.authenticate():
|
||||
logger.error("❌ Cannot connect to Unraid server")
|
||||
return False
|
||||
|
||||
|
||||
# Step 2: Build autoinstall ISO
|
||||
logger.info("🔨 Building Ubuntu autoinstall ISO...")
|
||||
iso_path = self.build_autoinstall_iso()
|
||||
|
||||
|
||||
# Step 3: Upload ISO to Unraid
|
||||
logger.info("📤 Uploading autoinstall ISO to Unraid...")
|
||||
remote_iso_path = self.vm_manager.upload_iso_to_unraid(iso_path)
|
||||
|
||||
self.vm_manager.upload_iso_to_unraid(iso_path)
|
||||
|
||||
# Step 4: Create/update VM configuration
|
||||
logger.info("⚙️ Creating VM configuration...")
|
||||
success = self.vm_manager.create_vm(
|
||||
vm_memory=VM_MEMORY,
|
||||
vm_vcpus=VM_VCPUS,
|
||||
vm_vcpus=VM_VCPUS,
|
||||
vm_disk_size=VM_DISK_SIZE,
|
||||
vm_ip=VM_IP
|
||||
vm_ip=VM_IP,
|
||||
)
|
||||
|
||||
|
||||
if not success:
|
||||
logger.error("❌ Failed to create VM configuration")
|
||||
return False
|
||||
|
||||
|
||||
# Step 5: Start VM
|
||||
logger.info("🟢 Starting VM...")
|
||||
success = self.vm_manager.start_vm()
|
||||
|
||||
|
||||
if not success:
|
||||
logger.error("❌ Failed to start VM")
|
||||
return False
|
||||
|
||||
|
||||
logger.info("🎉 VM deployment completed successfully!")
|
||||
logger.info("")
|
||||
logger.info("📋 Next Steps:")
|
||||
@@ -167,9 +167,9 @@ class ThrillWikiVMOrchestrator:
|
||||
logger.info("3. Use 'python main.py ip' to get VM IP when ready")
|
||||
logger.info("4. SSH to VM and run /home/thrillwiki/deploy-thrillwiki.sh")
|
||||
logger.info("")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ VM deployment failed: {e}")
|
||||
return False
|
||||
@@ -177,7 +177,7 @@ class ThrillWikiVMOrchestrator:
|
||||
# Cleanup ISO builder temp files
|
||||
if self.iso_builder:
|
||||
self.iso_builder.cleanup()
|
||||
|
||||
|
||||
def get_vm_info(self) -> dict:
|
||||
"""Get VM information."""
|
||||
return {
|
||||
@@ -186,7 +186,7 @@ class ThrillWikiVMOrchestrator:
|
||||
"ip": self.vm_manager.get_vm_ip(),
|
||||
"memory": VM_MEMORY,
|
||||
"vcpus": VM_VCPUS,
|
||||
"disk_size": VM_DISK_SIZE
|
||||
"disk_size": VM_DISK_SIZE,
|
||||
}
|
||||
|
||||
|
||||
@@ -204,17 +204,26 @@ Examples:
|
||||
python main.py status # Get VM status
|
||||
python main.py delete # Remove VM completely
|
||||
""",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
|
||||
|
||||
parser.add_argument(
|
||||
"action",
|
||||
choices=["setup", "create", "start", "stop", "status", "ip", "delete", "info"],
|
||||
help="Action to perform"
|
||||
choices=[
|
||||
"setup",
|
||||
"create",
|
||||
"start",
|
||||
"stop",
|
||||
"status",
|
||||
"ip",
|
||||
"delete",
|
||||
"info",
|
||||
],
|
||||
help="Action to perform",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
# Create orchestrator
|
||||
orchestrator = ThrillWikiVMOrchestrator()
|
||||
|
||||
@@ -225,7 +234,9 @@ Examples:
|
||||
|
||||
elif args.action == "create":
|
||||
logger.info("⚙️ Creating VM configuration...")
|
||||
success = orchestrator.vm_manager.create_vm(VM_MEMORY, VM_VCPUS, VM_DISK_SIZE, VM_IP)
|
||||
success = orchestrator.vm_manager.create_vm(
|
||||
VM_MEMORY, VM_VCPUS, VM_DISK_SIZE, VM_IP
|
||||
)
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
elif args.action == "start":
|
||||
@@ -248,7 +259,9 @@ Examples:
|
||||
if ip:
|
||||
print(f"VM IP: {ip}")
|
||||
print(f"SSH: ssh thrillwiki@{ip}")
|
||||
print(f"Deploy: ssh thrillwiki@{ip} '/home/thrillwiki/deploy-thrillwiki.sh'")
|
||||
print(
|
||||
f"Deploy: ssh thrillwiki@{ip} '/home/thrillwiki/deploy-thrillwiki.sh'"
|
||||
)
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("❌ Failed to get VM IP (VM may not be ready yet)")
|
||||
|
||||
@@ -6,11 +6,8 @@ Uses pre-built template VMs for fast deployment instead of autoinstall.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# Import our modular components
|
||||
from template_manager import TemplateVMManager
|
||||
@@ -19,90 +16,92 @@ from vm_manager_template import UnraidTemplateVMManager
|
||||
|
||||
class ConfigLoader:
|
||||
"""Dynamic configuration loader that reads environment variables when needed."""
|
||||
|
||||
|
||||
def __init__(self):
|
||||
# Try to load ***REMOVED***.unraid if it exists to ensure we have the latest config
|
||||
# Try to load ***REMOVED***.unraid if it exists to ensure we have the
|
||||
# latest config
|
||||
self._load_env_file()
|
||||
|
||||
|
||||
def _load_env_file(self):
|
||||
"""Load ***REMOVED***.unraid file if it exists."""
|
||||
# Find the project directory (two levels up from this script)
|
||||
script_dir = Path(__file__).parent
|
||||
project_dir = script_dir.parent.parent
|
||||
env_file = project_dir / "***REMOVED***.unraid"
|
||||
|
||||
|
||||
if env_file.exists():
|
||||
try:
|
||||
with open(env_file, 'r') as f:
|
||||
with open(env_file, "r") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line and not line.startswith('#') and '=' in line:
|
||||
key, value = line.split('=', 1)
|
||||
if line and not line.startswith("#") and "=" in line:
|
||||
key, value = line.split("=", 1)
|
||||
# Remove quotes if present
|
||||
value = value.strip('"\'')
|
||||
# Only set if not already in environment (env vars take precedence)
|
||||
if key not in os***REMOVED***iron:
|
||||
os***REMOVED***iron[key] = value
|
||||
|
||||
value = value.strip("\"'")
|
||||
# Only set if not already in environment (env vars
|
||||
# take precedence)
|
||||
if key not in os.environ:
|
||||
os.environ[key] = value
|
||||
|
||||
logging.info(f"📝 Loaded configuration from {env_file}")
|
||||
except Exception as e:
|
||||
logging.warning(f"⚠️ Could not load ***REMOVED***.unraid: {e}")
|
||||
|
||||
|
||||
@property
|
||||
def UNRAID_HOST(self):
|
||||
return os***REMOVED***iron.get("UNRAID_HOST", "localhost")
|
||||
|
||||
return os.environ.get("UNRAID_HOST", "localhost")
|
||||
|
||||
@property
|
||||
def UNRAID_USER(self):
|
||||
return os***REMOVED***iron.get("UNRAID_USER", "root")
|
||||
|
||||
return os.environ.get("UNRAID_USER", "root")
|
||||
|
||||
@property
|
||||
def VM_NAME(self):
|
||||
return os***REMOVED***iron.get("VM_NAME", "thrillwiki-vm")
|
||||
|
||||
return os.environ.get("VM_NAME", "thrillwiki-vm")
|
||||
|
||||
@property
|
||||
def VM_MEMORY(self):
|
||||
return int(os***REMOVED***iron.get("VM_MEMORY", 4096))
|
||||
|
||||
return int(os.environ.get("VM_MEMORY", 4096))
|
||||
|
||||
@property
|
||||
def VM_VCPUS(self):
|
||||
return int(os***REMOVED***iron.get("VM_VCPUS", 2))
|
||||
|
||||
return int(os.environ.get("VM_VCPUS", 2))
|
||||
|
||||
@property
|
||||
def VM_DISK_SIZE(self):
|
||||
return int(os***REMOVED***iron.get("VM_DISK_SIZE", 50))
|
||||
|
||||
return int(os.environ.get("VM_DISK_SIZE", 50))
|
||||
|
||||
@property
|
||||
def SSH_PUBLIC_KEY(self):
|
||||
return os***REMOVED***iron.get("SSH_PUBLIC_KEY", "")
|
||||
|
||||
return os.environ.get("SSH_PUBLIC_KEY", "")
|
||||
|
||||
@property
|
||||
def VM_IP(self):
|
||||
return os***REMOVED***iron.get("VM_IP", "dhcp")
|
||||
|
||||
return os.environ.get("VM_IP", "dhcp")
|
||||
|
||||
@property
|
||||
def VM_GATEWAY(self):
|
||||
return os***REMOVED***iron.get("VM_GATEWAY", "192.168.20.1")
|
||||
|
||||
return os.environ.get("VM_GATEWAY", "192.168.20.1")
|
||||
|
||||
@property
|
||||
def VM_NETMASK(self):
|
||||
return os***REMOVED***iron.get("VM_NETMASK", "255.255.255.0")
|
||||
|
||||
return os.environ.get("VM_NETMASK", "255.255.255.0")
|
||||
|
||||
@property
|
||||
def VM_NETWORK(self):
|
||||
return os***REMOVED***iron.get("VM_NETWORK", "192.168.20.0/24")
|
||||
|
||||
return os.environ.get("VM_NETWORK", "192.168.20.0/24")
|
||||
|
||||
@property
|
||||
def REPO_URL(self):
|
||||
return os***REMOVED***iron.get("REPO_URL", "")
|
||||
|
||||
return os.environ.get("REPO_URL", "")
|
||||
|
||||
@property
|
||||
def GITHUB_USERNAME(self):
|
||||
return os***REMOVED***iron.get("GITHUB_USERNAME", "")
|
||||
|
||||
return os.environ.get("GITHUB_USERNAME", "")
|
||||
|
||||
@property
|
||||
def GITHUB_TOKEN(self):
|
||||
return os***REMOVED***iron.get("GITHUB_TOKEN", "")
|
||||
return os.environ.get("GITHUB_TOKEN", "")
|
||||
|
||||
|
||||
# Create a global configuration instance
|
||||
@@ -114,14 +113,18 @@ os.makedirs("logs", exist_ok=True)
|
||||
# Configure console handler with line buffering
|
||||
console_handler = logging.StreamHandler(sys.stdout)
|
||||
console_handler.setLevel(logging.INFO)
|
||||
console_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
|
||||
console_handler.setFormatter(
|
||||
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
|
||||
)
|
||||
# Force flush after each log message
|
||||
console_handler.flush = lambda: sys.stdout.flush()
|
||||
|
||||
# Configure file handler
|
||||
file_handler = logging.FileHandler("logs/unraid-vm.log")
|
||||
file_handler.setLevel(logging.INFO)
|
||||
file_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
|
||||
file_handler.setFormatter(
|
||||
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
|
||||
)
|
||||
|
||||
# Set up basic config with both handlers
|
||||
logging.basicConfig(
|
||||
@@ -136,76 +139,91 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class ThrillWikiTemplateVMOrchestrator:
|
||||
"""Main orchestrator for template-based ThrillWiki VM deployment."""
|
||||
|
||||
|
||||
def __init__(self):
|
||||
# Log current configuration for debugging
|
||||
logger.info(f"🔧 Using configuration: UNRAID_HOST={config.UNRAID_HOST}, UNRAID_USER={config.UNRAID_USER}, VM_NAME={config.VM_NAME}")
|
||||
|
||||
self.template_manager = TemplateVMManager(config.UNRAID_HOST, config.UNRAID_USER)
|
||||
self.vm_manager = UnraidTemplateVMManager(config.VM_NAME, config.UNRAID_HOST, config.UNRAID_USER)
|
||||
|
||||
logger.info(
|
||||
f"🔧 Using configuration: UNRAID_HOST={
|
||||
config.UNRAID_HOST}, UNRAID_USER={
|
||||
config.UNRAID_USER}, VM_NAME={
|
||||
config.VM_NAME}"
|
||||
)
|
||||
|
||||
self.template_manager = TemplateVMManager(
|
||||
config.UNRAID_HOST, config.UNRAID_USER
|
||||
)
|
||||
self.vm_manager = UnraidTemplateVMManager(
|
||||
config.VM_NAME, config.UNRAID_HOST, config.UNRAID_USER
|
||||
)
|
||||
|
||||
def check_template_ready(self) -> bool:
|
||||
"""Check if template VM is ready for use."""
|
||||
logger.info("🔍 Checking template VM availability...")
|
||||
|
||||
|
||||
if not self.template_manager.check_template_exists():
|
||||
logger.error("❌ Template VM disk not found!")
|
||||
logger.error("Please ensure 'thrillwiki-template-ubuntu' VM exists and is properly configured")
|
||||
logger.error("Template should be located at: /mnt/user/domains/thrillwiki-template-ubuntu/vdisk1.qcow2")
|
||||
logger.error(
|
||||
"Please ensure 'thrillwiki-template-ubuntu' VM exists and is properly configured"
|
||||
)
|
||||
logger.error(
|
||||
"Template should be located at: /mnt/user/domains/thrillwiki-template-ubuntu/vdisk1.qcow2"
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
# Check template status
|
||||
if not self.template_manager.update_template():
|
||||
logger.warning("⚠️ Template VM may be running - this could cause issues")
|
||||
logger.warning("Ensure the template VM is stopped before creating new instances")
|
||||
|
||||
logger.warning(
|
||||
"Ensure the template VM is stopped before creating new instances"
|
||||
)
|
||||
|
||||
info = self.template_manager.get_template_info()
|
||||
if info:
|
||||
logger.info(f"📋 Template Info:")
|
||||
logger.info(f" Virtual Size: {info['virtual_size']}")
|
||||
logger.info(f" File Size: {info['file_size']}")
|
||||
logger.info(f" Last Modified: {info['last_modified']}")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def deploy_vm_from_template(self) -> bool:
|
||||
"""Complete template-based VM deployment process."""
|
||||
try:
|
||||
logger.info("🚀 Starting ThrillWiki template-based VM deployment...")
|
||||
|
||||
|
||||
# Step 1: Check SSH connectivity
|
||||
logger.info("📡 Testing Unraid connectivity...")
|
||||
if not self.vm_manager.authenticate():
|
||||
logger.error("❌ Cannot connect to Unraid server")
|
||||
return False
|
||||
|
||||
|
||||
# Step 2: Check template availability
|
||||
logger.info("🔍 Verifying template VM...")
|
||||
if not self.check_template_ready():
|
||||
logger.error("❌ Template VM not ready")
|
||||
return False
|
||||
|
||||
|
||||
# Step 3: Create VM from template
|
||||
logger.info("⚙️ Creating VM from template...")
|
||||
success = self.vm_manager.create_vm_from_template(
|
||||
vm_memory=config.VM_MEMORY,
|
||||
vm_vcpus=config.VM_VCPUS,
|
||||
vm_vcpus=config.VM_VCPUS,
|
||||
vm_disk_size=config.VM_DISK_SIZE,
|
||||
vm_ip=config.VM_IP
|
||||
vm_ip=config.VM_IP,
|
||||
)
|
||||
|
||||
|
||||
if not success:
|
||||
logger.error("❌ Failed to create VM from template")
|
||||
return False
|
||||
|
||||
|
||||
# Step 4: Start VM
|
||||
logger.info("🟢 Starting VM...")
|
||||
success = self.vm_manager.start_vm()
|
||||
|
||||
|
||||
if not success:
|
||||
logger.error("❌ Failed to start VM")
|
||||
return False
|
||||
|
||||
|
||||
logger.info("🎉 Template-based VM deployment completed successfully!")
|
||||
logger.info("")
|
||||
logger.info("📋 Next Steps:")
|
||||
@@ -214,44 +232,54 @@ class ThrillWikiTemplateVMOrchestrator:
|
||||
logger.info("3. Use 'python main_template.py ip' to get VM IP when ready")
|
||||
logger.info("4. SSH to VM and run deployment commands")
|
||||
logger.info("")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Template VM deployment failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def deploy_and_configure_thrillwiki(self) -> bool:
|
||||
"""Deploy VM from template and configure ThrillWiki."""
|
||||
try:
|
||||
logger.info("🚀 Starting complete ThrillWiki deployment from template...")
|
||||
|
||||
|
||||
# Step 1: Deploy VM from template
|
||||
if not self.deploy_vm_from_template():
|
||||
return False
|
||||
|
||||
|
||||
# Step 2: Wait for VM to be accessible and configure ThrillWiki
|
||||
if config.REPO_URL:
|
||||
logger.info("🔧 Configuring ThrillWiki on VM...")
|
||||
success = self.vm_manager.customize_vm_for_thrillwiki(config.REPO_URL, config.GITHUB_TOKEN)
|
||||
|
||||
success = self.vm_manager.customize_vm_for_thrillwiki(
|
||||
config.REPO_URL, config.GITHUB_TOKEN
|
||||
)
|
||||
|
||||
if success:
|
||||
vm_ip = self.vm_manager.get_vm_ip()
|
||||
logger.info("🎉 Complete ThrillWiki deployment successful!")
|
||||
logger.info(f"🌐 ThrillWiki is available at: http://{vm_ip}:8000")
|
||||
else:
|
||||
logger.warning("⚠️ VM deployed but ThrillWiki configuration may have failed")
|
||||
logger.info("You can manually configure ThrillWiki by SSH'ing to the VM")
|
||||
logger.warning(
|
||||
"⚠️ VM deployed but ThrillWiki configuration may have failed"
|
||||
)
|
||||
logger.info(
|
||||
"You can manually configure ThrillWiki by SSH'ing to the VM"
|
||||
)
|
||||
else:
|
||||
logger.info("📝 No repository URL provided - VM deployed but ThrillWiki not configured")
|
||||
logger.info("Set REPO_URL environment variable to auto-configure ThrillWiki")
|
||||
|
||||
logger.info(
|
||||
"📝 No repository URL provided - VM deployed but ThrillWiki not configured"
|
||||
)
|
||||
logger.info(
|
||||
"Set REPO_URL environment variable to auto-configure ThrillWiki"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Complete deployment failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def get_vm_info(self) -> dict:
|
||||
"""Get VM information."""
|
||||
return {
|
||||
@@ -261,7 +289,7 @@ class ThrillWikiTemplateVMOrchestrator:
|
||||
"memory": config.VM_MEMORY,
|
||||
"vcpus": config.VM_VCPUS,
|
||||
"disk_size": config.VM_DISK_SIZE,
|
||||
"deployment_type": "template-based"
|
||||
"deployment_type": "template-based",
|
||||
}
|
||||
|
||||
|
||||
@@ -281,24 +309,35 @@ Examples:
|
||||
python main_template.py delete # Remove VM completely
|
||||
python main_template.py template # Manage template VM
|
||||
""",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
|
||||
|
||||
parser.add_argument(
|
||||
"action",
|
||||
choices=["setup", "deploy", "create", "start", "stop", "status", "ip", "delete", "info", "template"],
|
||||
help="Action to perform"
|
||||
choices=[
|
||||
"setup",
|
||||
"deploy",
|
||||
"create",
|
||||
"start",
|
||||
"stop",
|
||||
"status",
|
||||
"ip",
|
||||
"delete",
|
||||
"info",
|
||||
"template",
|
||||
],
|
||||
help="Action to perform",
|
||||
)
|
||||
|
||||
|
||||
parser.add_argument(
|
||||
"template_action",
|
||||
nargs="?",
|
||||
choices=["info", "check", "update", "list"],
|
||||
help="Template management action (used with 'template' action)"
|
||||
help="Template management action (used with 'template' action)",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
# Create orchestrator
|
||||
orchestrator = ThrillWikiTemplateVMOrchestrator()
|
||||
|
||||
@@ -314,7 +353,12 @@ Examples:
|
||||
|
||||
elif args.action == "create":
|
||||
logger.info("⚙️ Creating VM from template...")
|
||||
success = orchestrator.vm_manager.create_vm_from_template(config.VM_MEMORY, config.VM_VCPUS, config.VM_DISK_SIZE, config.VM_IP)
|
||||
success = orchestrator.vm_manager.create_vm_from_template(
|
||||
config.VM_MEMORY,
|
||||
config.VM_VCPUS,
|
||||
config.VM_DISK_SIZE,
|
||||
config.VM_IP,
|
||||
)
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
elif args.action == "start":
|
||||
@@ -362,7 +406,7 @@ Examples:
|
||||
|
||||
elif args.action == "template":
|
||||
template_action = args.template_action or "info"
|
||||
|
||||
|
||||
if template_action == "info":
|
||||
logger.info("📋 Template VM Information")
|
||||
info = orchestrator.template_manager.get_template_info()
|
||||
@@ -374,7 +418,7 @@ Examples:
|
||||
else:
|
||||
print("❌ Failed to get template information")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
elif template_action == "check":
|
||||
if orchestrator.template_manager.check_template_exists():
|
||||
logger.info("✅ Template VM disk exists and is ready to use")
|
||||
@@ -382,21 +426,29 @@ Examples:
|
||||
else:
|
||||
logger.error("❌ Template VM disk not found")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
elif template_action == "update":
|
||||
success = orchestrator.template_manager.update_template()
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
|
||||
elif template_action == "list":
|
||||
logger.info("📋 Template-based VM Instances")
|
||||
instances = orchestrator.template_manager.list_template_instances()
|
||||
if instances:
|
||||
for instance in instances:
|
||||
status_emoji = "🟢" if instance["status"] == "running" else "🔴" if instance["status"] == "shut off" else "🟡"
|
||||
print(f"{status_emoji} {instance['name']} ({instance['status']})")
|
||||
status_emoji = (
|
||||
"🟢"
|
||||
if instance["status"] == "running"
|
||||
else "🔴" if instance["status"] == "shut off" else "🟡"
|
||||
)
|
||||
print(
|
||||
f"{status_emoji} {
|
||||
instance['name']} ({
|
||||
instance['status']})"
|
||||
)
|
||||
else:
|
||||
print("No template instances found")
|
||||
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
|
||||
@@ -9,21 +9,20 @@ import sys
|
||||
import time
|
||||
import logging
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict
|
||||
from typing import Dict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TemplateVMManager:
|
||||
"""Manages template-based VM deployment on Unraid."""
|
||||
|
||||
|
||||
def __init__(self, unraid_host: str, unraid_user: str = "root"):
|
||||
self.unraid_host = unraid_host
|
||||
self.unraid_user = unraid_user
|
||||
self.template_vm_name = "thrillwiki-template-ubuntu"
|
||||
self.template_path = f"/mnt/user/domains/{self.template_vm_name}"
|
||||
|
||||
|
||||
def authenticate(self) -> bool:
|
||||
"""Test SSH connectivity to Unraid server."""
|
||||
try:
|
||||
@@ -32,9 +31,9 @@ class TemplateVMManager:
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=15
|
||||
timeout=15,
|
||||
)
|
||||
|
||||
|
||||
if result.returncode == 0 and "Connected" in result.stdout:
|
||||
logger.info("Successfully connected to Unraid via SSH")
|
||||
return True
|
||||
@@ -44,7 +43,7 @@ class TemplateVMManager:
|
||||
except Exception as e:
|
||||
logger.error(f"SSH authentication error: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def check_template_exists(self) -> bool:
|
||||
"""Check if template VM disk exists."""
|
||||
try:
|
||||
@@ -55,26 +54,35 @@ class TemplateVMManager:
|
||||
text=True,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
logger.info(f"Template VM disk found at {self.template_path}/vdisk1.qcow2")
|
||||
logger.info(
|
||||
f"Template VM disk found at {
|
||||
self.template_path}/vdisk1.qcow2"
|
||||
)
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Template VM disk not found at {self.template_path}/vdisk1.qcow2")
|
||||
logger.error(
|
||||
f"Template VM disk not found at {
|
||||
self.template_path}/vdisk1.qcow2"
|
||||
)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking template existence: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def get_template_info(self) -> Dict[str, str]:
|
||||
"""Get information about the template VM."""
|
||||
try:
|
||||
# Get disk size
|
||||
size_result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'qemu-img info {self.template_path}/vdisk1.qcow2 | grep \"virtual size\"'",
|
||||
f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'qemu-img info {
|
||||
self.template_path}/vdisk1.qcow2 | grep \"virtual size\"'",
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
# Get file size
|
||||
file_size_result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'ls -lh {self.template_path}/vdisk1.qcow2'",
|
||||
@@ -82,7 +90,7 @@ class TemplateVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
# Get last modification time
|
||||
mod_time_result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'stat -c \"%y\" {self.template_path}/vdisk1.qcow2'",
|
||||
@@ -90,59 +98,74 @@ class TemplateVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
info = {
|
||||
"template_path": f"{self.template_path}/vdisk1.qcow2",
|
||||
"virtual_size": size_result.stdout.strip() if size_result.returncode == 0 else "Unknown",
|
||||
"file_size": file_size_result.stdout.split()[4] if file_size_result.returncode == 0 else "Unknown",
|
||||
"last_modified": mod_time_result.stdout.strip() if mod_time_result.returncode == 0 else "Unknown"
|
||||
"template_path": f"{
|
||||
self.template_path}/vdisk1.qcow2",
|
||||
"virtual_size": (
|
||||
size_result.stdout.strip()
|
||||
if size_result.returncode == 0
|
||||
else "Unknown"
|
||||
),
|
||||
"file_size": (
|
||||
file_size_result.stdout.split()[4]
|
||||
if file_size_result.returncode == 0
|
||||
else "Unknown"
|
||||
),
|
||||
"last_modified": (
|
||||
mod_time_result.stdout.strip()
|
||||
if mod_time_result.returncode == 0
|
||||
else "Unknown"
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
return info
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting template info: {e}")
|
||||
return {}
|
||||
|
||||
|
||||
def copy_template_disk(self, target_vm_name: str) -> bool:
|
||||
"""Copy template VM disk to a new VM instance."""
|
||||
try:
|
||||
if not self.check_template_exists():
|
||||
logger.error("Template VM disk not found. Cannot proceed with copy.")
|
||||
return False
|
||||
|
||||
|
||||
target_path = f"/mnt/user/domains/{target_vm_name}"
|
||||
target_disk = f"{target_path}/vdisk1.qcow2"
|
||||
|
||||
|
||||
logger.info(f"Copying template disk to new VM: {target_vm_name}")
|
||||
|
||||
|
||||
# Create target directory
|
||||
subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'mkdir -p {target_path}'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
|
||||
# Check if target disk already exists
|
||||
disk_check = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'test -f {target_disk}'",
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
|
||||
if disk_check.returncode == 0:
|
||||
logger.warning(f"Target disk already exists: {target_disk}")
|
||||
logger.info("Removing existing disk to replace with fresh template copy...")
|
||||
logger.info(
|
||||
"Removing existing disk to replace with fresh template copy..."
|
||||
)
|
||||
subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'rm -f {target_disk}'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
|
||||
# Copy template disk with rsync progress display
|
||||
logger.info("🚀 Copying template disk with rsync progress display...")
|
||||
start_time = time.time()
|
||||
|
||||
|
||||
# First, get the size of the template disk for progress calculation
|
||||
size_result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'stat -c%s {self.template_path}/vdisk1.qcow2'",
|
||||
@@ -150,25 +173,31 @@ class TemplateVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
template_size = "unknown size"
|
||||
if size_result.returncode == 0:
|
||||
size_bytes = int(size_result.stdout.strip())
|
||||
if size_bytes > 1024*1024*1024: # GB
|
||||
template_size = f"{size_bytes/(1024*1024*1024):.1f}GB"
|
||||
elif size_bytes > 1024*1024: # MB
|
||||
template_size = f"{size_bytes/(1024*1024):.1f}MB"
|
||||
if size_bytes > 1024 * 1024 * 1024: # GB
|
||||
template_size = f"{size_bytes /
|
||||
(1024 *
|
||||
1024 *
|
||||
1024):.1f}GB"
|
||||
elif size_bytes > 1024 * 1024: # MB
|
||||
template_size = f"{size_bytes / (1024 * 1024):.1f}MB"
|
||||
else:
|
||||
template_size = f"{size_bytes/1024:.1f}KB"
|
||||
|
||||
template_size = f"{size_bytes / 1024:.1f}KB"
|
||||
|
||||
logger.info(f"📊 Template disk size: {template_size}")
|
||||
|
||||
|
||||
# Use rsync with progress display
|
||||
logger.info("📈 Using rsync for real-time progress display...")
|
||||
|
||||
|
||||
# Force rsync to output progress to stderr and capture it
|
||||
copy_cmd = f"ssh {self.unraid_user}@{self.unraid_host} 'rsync -av --progress --stats {self.template_path}/vdisk1.qcow2 {target_disk}'"
|
||||
|
||||
copy_cmd = f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'rsync -av --progress --stats {
|
||||
self.template_path}/vdisk1.qcow2 {target_disk}'"
|
||||
|
||||
# Run with real-time output, unbuffered
|
||||
process = subprocess.Popen(
|
||||
copy_cmd,
|
||||
@@ -177,12 +206,11 @@ class TemplateVMManager:
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
bufsize=0, # Unbuffered
|
||||
universal_newlines=True
|
||||
universal_newlines=True,
|
||||
)
|
||||
|
||||
|
||||
import select
|
||||
import sys
|
||||
|
||||
|
||||
# Read both stdout and stderr for progress with real-time display
|
||||
while True:
|
||||
# Check if process is still running
|
||||
@@ -194,16 +222,18 @@ class TemplateVMManager:
|
||||
print(f"📊 {remaining_out.strip()}", flush=True)
|
||||
logger.info(f"📊 {remaining_out.strip()}")
|
||||
if remaining_err:
|
||||
for line in remaining_err.strip().split('\n'):
|
||||
for line in remaining_err.strip().split("\n"):
|
||||
if line.strip():
|
||||
print(f"⚡ {line.strip()}", flush=True)
|
||||
logger.info(f"⚡ {line.strip()}")
|
||||
break
|
||||
|
||||
|
||||
# Use select to check for available data
|
||||
try:
|
||||
ready, _, _ = select.select([process.stdout, process.stderr], [], [], 0.1)
|
||||
|
||||
ready, _, _ = select.select(
|
||||
[process.stdout, process.stderr], [], [], 0.1
|
||||
)
|
||||
|
||||
for stream in ready:
|
||||
line = stream.readline()
|
||||
if line:
|
||||
@@ -214,24 +244,36 @@ class TemplateVMManager:
|
||||
logger.info(f"📊 {line}")
|
||||
else: # stderr
|
||||
# rsync progress goes to stderr
|
||||
if any(keyword in line for keyword in ['%', 'bytes/sec', 'to-check=', 'xfr#']):
|
||||
if any(
|
||||
keyword in line
|
||||
for keyword in [
|
||||
"%",
|
||||
"bytes/sec",
|
||||
"to-check=",
|
||||
"xfr#",
|
||||
]
|
||||
):
|
||||
print(f"⚡ {line}", flush=True)
|
||||
logger.info(f"⚡ {line}")
|
||||
else:
|
||||
print(f"📋 {line}", flush=True)
|
||||
logger.info(f"📋 {line}")
|
||||
except select.error:
|
||||
# Fallback for systems without select (like some Windows environments)
|
||||
print("⚠️ select() not available, using fallback method...", flush=True)
|
||||
# Fallback for systems without select (like some Windows
|
||||
# environments)
|
||||
print(
|
||||
"⚠️ select() not available, using fallback method...",
|
||||
flush=True,
|
||||
)
|
||||
logger.info("⚠️ select() not available, using fallback method...")
|
||||
|
||||
|
||||
# Simple fallback - just wait and read what's available
|
||||
time.sleep(0.5)
|
||||
try:
|
||||
# Try to read non-blocking
|
||||
import fcntl
|
||||
import os
|
||||
|
||||
|
||||
# Make stdout/stderr non-blocking
|
||||
fd_out = process.stdout.fileno()
|
||||
fd_err = process.stderr.fileno()
|
||||
@@ -239,41 +281,55 @@ class TemplateVMManager:
|
||||
fl_err = fcntl.fcntl(fd_err, fcntl.F_GETFL)
|
||||
fcntl.fcntl(fd_out, fcntl.F_SETFL, fl_out | os.O_NONBLOCK)
|
||||
fcntl.fcntl(fd_err, fcntl.F_SETFL, fl_err | os.O_NONBLOCK)
|
||||
|
||||
|
||||
try:
|
||||
out_line = process.stdout.readline()
|
||||
if out_line:
|
||||
print(f"📊 {out_line.strip()}", flush=True)
|
||||
logger.info(f"📊 {out_line.strip()}")
|
||||
except:
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
err_line = process.stderr.readline()
|
||||
if err_line:
|
||||
if any(keyword in err_line for keyword in ['%', 'bytes/sec', 'to-check=', 'xfr#']):
|
||||
if any(
|
||||
keyword in err_line
|
||||
for keyword in [
|
||||
"%",
|
||||
"bytes/sec",
|
||||
"to-check=",
|
||||
"xfr#",
|
||||
]
|
||||
):
|
||||
print(f"⚡ {err_line.strip()}", flush=True)
|
||||
logger.info(f"⚡ {err_line.strip()}")
|
||||
else:
|
||||
print(f"📋 {err_line.strip()}", flush=True)
|
||||
logger.info(f"📋 {err_line.strip()}")
|
||||
except:
|
||||
except BaseException:
|
||||
pass
|
||||
except ImportError:
|
||||
# If fcntl not available, just continue
|
||||
print("📊 Progress display limited - continuing copy...", flush=True)
|
||||
print(
|
||||
"📊 Progress display limited - continuing copy...",
|
||||
flush=True,
|
||||
)
|
||||
logger.info("📊 Progress display limited - continuing copy...")
|
||||
break
|
||||
|
||||
|
||||
copy_result_code = process.wait()
|
||||
|
||||
|
||||
end_time = time.time()
|
||||
copy_time = end_time - start_time
|
||||
|
||||
|
||||
if copy_result_code == 0:
|
||||
logger.info(f"✅ Template disk copied successfully in {copy_time:.1f} seconds")
|
||||
logger.info(
|
||||
f"✅ Template disk copied successfully in {
|
||||
copy_time:.1f} seconds"
|
||||
)
|
||||
logger.info(f"🎯 New VM disk created: {target_disk}")
|
||||
|
||||
|
||||
# Verify the copy by checking file size
|
||||
verify_result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'ls -lh {target_disk}'",
|
||||
@@ -281,43 +337,46 @@ class TemplateVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
if verify_result.returncode == 0:
|
||||
file_info = verify_result.stdout.strip().split()
|
||||
if len(file_info) >= 5:
|
||||
copied_size = file_info[4]
|
||||
logger.info(f"📋 Copied disk size: {copied_size}")
|
||||
|
||||
|
||||
return True
|
||||
else:
|
||||
logger.error(f"❌ Failed to copy template disk (exit code: {copy_result_code})")
|
||||
logger.error(
|
||||
f"❌ Failed to copy template disk (exit code: {copy_result_code})"
|
||||
)
|
||||
logger.error("Check Unraid server disk space and permissions")
|
||||
return False
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error copying template disk: {e}")
|
||||
return False
|
||||
|
||||
def prepare_vm_from_template(self, target_vm_name: str, vm_memory: int,
|
||||
vm_vcpus: int, vm_ip: str) -> bool:
|
||||
|
||||
def prepare_vm_from_template(
|
||||
self, target_vm_name: str, vm_memory: int, vm_vcpus: int, vm_ip: str
|
||||
) -> bool:
|
||||
"""Complete template-based VM preparation."""
|
||||
try:
|
||||
logger.info(f"Preparing VM '{target_vm_name}' from template...")
|
||||
|
||||
|
||||
# Step 1: Copy template disk
|
||||
if not self.copy_template_disk(target_vm_name):
|
||||
return False
|
||||
|
||||
|
||||
logger.info(f"VM '{target_vm_name}' prepared successfully from template")
|
||||
logger.info("The VM disk is ready with Ubuntu pre-installed")
|
||||
logger.info("You can now create the VM configuration and start it")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error preparing VM from template: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def update_template(self) -> bool:
|
||||
"""Update the template VM with latest changes."""
|
||||
try:
|
||||
@@ -328,7 +387,7 @@ class TemplateVMManager:
|
||||
logger.info("3. Updating ThrillWiki dependencies")
|
||||
logger.info("4. Stopping the template VM")
|
||||
logger.info("5. The disk will automatically be the new template")
|
||||
|
||||
|
||||
# Check template VM status
|
||||
template_status = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh domstate {self.template_vm_name}'",
|
||||
@@ -336,18 +395,23 @@ class TemplateVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
if template_status.returncode == 0:
|
||||
status = template_status.stdout.strip()
|
||||
logger.info(f"Template VM '{self.template_vm_name}' status: {status}")
|
||||
|
||||
logger.info(
|
||||
f"Template VM '{
|
||||
self.template_vm_name}' status: {status}"
|
||||
)
|
||||
|
||||
if status == "running":
|
||||
logger.warning("Template VM is currently running!")
|
||||
logger.warning("Stop the template VM when updates are complete")
|
||||
logger.warning("Running VMs should not be used as templates")
|
||||
return False
|
||||
elif status in ["shut off", "shutoff"]:
|
||||
logger.info("Template VM is properly stopped and ready to use as template")
|
||||
logger.info(
|
||||
"Template VM is properly stopped and ready to use as template"
|
||||
)
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"Template VM in unexpected state: {status}")
|
||||
@@ -355,11 +419,11 @@ class TemplateVMManager:
|
||||
else:
|
||||
logger.error("Could not check template VM status")
|
||||
return False
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating template: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def list_template_instances(self) -> list:
|
||||
"""List all VMs that were created from the template."""
|
||||
try:
|
||||
@@ -370,18 +434,18 @@ class TemplateVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error("Failed to list VMs")
|
||||
return []
|
||||
|
||||
all_vms = result.stdout.strip().split('\n')
|
||||
|
||||
|
||||
all_vms = result.stdout.strip().split("\n")
|
||||
|
||||
# Filter for thrillwiki VMs (excluding template)
|
||||
template_instances = []
|
||||
for vm in all_vms:
|
||||
vm = vm.strip()
|
||||
if vm and 'thrillwiki' in vm.lower() and vm != self.template_vm_name:
|
||||
if vm and "thrillwiki" in vm.lower() and vm != self.template_vm_name:
|
||||
# Get VM status
|
||||
status_result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh domstate {vm}'",
|
||||
@@ -389,11 +453,15 @@ class TemplateVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
status = status_result.stdout.strip() if status_result.returncode == 0 else "unknown"
|
||||
status = (
|
||||
status_result.stdout.strip()
|
||||
if status_result.returncode == 0
|
||||
else "unknown"
|
||||
)
|
||||
template_instances.append({"name": vm, "status": status})
|
||||
|
||||
|
||||
return template_instances
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing template instances: {e}")
|
||||
return []
|
||||
@@ -402,7 +470,7 @@ class TemplateVMManager:
|
||||
def main():
|
||||
"""Main entry point for template manager."""
|
||||
import argparse
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="ThrillWiki Template VM Manager",
|
||||
epilog="""
|
||||
@@ -412,39 +480,35 @@ Examples:
|
||||
python template_manager.py list # List template instances
|
||||
python template_manager.py update # Update template VM
|
||||
""",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
|
||||
|
||||
parser.add_argument(
|
||||
"action",
|
||||
choices=["info", "copy", "list", "update", "check"],
|
||||
help="Action to perform"
|
||||
help="Action to perform",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"vm_name",
|
||||
nargs="?",
|
||||
help="VM name (required for copy action)"
|
||||
)
|
||||
|
||||
|
||||
parser.add_argument("vm_name", nargs="?", help="VM name (required for copy action)")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
# Get Unraid connection details from environment
|
||||
unraid_host = os***REMOVED***iron.get("UNRAID_HOST")
|
||||
unraid_user = os***REMOVED***iron.get("UNRAID_USER", "root")
|
||||
|
||||
unraid_host = os.environ.get("UNRAID_HOST")
|
||||
unraid_user = os.environ.get("UNRAID_USER", "root")
|
||||
|
||||
if not unraid_host:
|
||||
logger.error("UNRAID_HOST environment variable is required")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Create template manager
|
||||
template_manager = TemplateVMManager(unraid_host, unraid_user)
|
||||
|
||||
|
||||
# Authenticate
|
||||
if not template_manager.authenticate():
|
||||
logger.error("Failed to connect to Unraid server")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if args.action == "info":
|
||||
logger.info("📋 Template VM Information")
|
||||
info = template_manager.get_template_info()
|
||||
@@ -456,7 +520,7 @@ Examples:
|
||||
else:
|
||||
print("❌ Failed to get template information")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
elif args.action == "check":
|
||||
if template_manager.check_template_exists():
|
||||
logger.info("✅ Template VM disk exists and is ready to use")
|
||||
@@ -464,25 +528,33 @@ Examples:
|
||||
else:
|
||||
logger.error("❌ Template VM disk not found")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
elif args.action == "copy":
|
||||
if not args.vm_name:
|
||||
logger.error("VM name is required for copy action")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
success = template_manager.copy_template_disk(args.vm_name)
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
|
||||
elif args.action == "list":
|
||||
logger.info("📋 Template-based VM Instances")
|
||||
instances = template_manager.list_template_instances()
|
||||
if instances:
|
||||
for instance in instances:
|
||||
status_emoji = "🟢" if instance["status"] == "running" else "🔴" if instance["status"] == "shut off" else "🟡"
|
||||
print(f"{status_emoji} {instance['name']} ({instance['status']})")
|
||||
status_emoji = (
|
||||
"🟢"
|
||||
if instance["status"] == "running"
|
||||
else "🔴" if instance["status"] == "shut off" else "🟡"
|
||||
)
|
||||
print(
|
||||
f"{status_emoji} {
|
||||
instance['name']} ({
|
||||
instance['status']})"
|
||||
)
|
||||
else:
|
||||
print("No template instances found")
|
||||
|
||||
|
||||
elif args.action == "update":
|
||||
success = template_manager.update_template()
|
||||
sys.exit(0 if success else 1)
|
||||
@@ -495,5 +567,5 @@ if __name__ == "__main__":
|
||||
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||
handlers=[logging.StreamHandler()],
|
||||
)
|
||||
|
||||
|
||||
main()
|
||||
|
||||
@@ -5,7 +5,6 @@ This script provides basic validation to check if our autoinstall config
|
||||
complies with the official schema structure.
|
||||
"""
|
||||
|
||||
import json
|
||||
import yaml
|
||||
import sys
|
||||
from pathlib import Path
|
||||
@@ -13,15 +12,15 @@ from pathlib import Path
|
||||
|
||||
def load_autoinstall_config(template_path: str) -> dict:
|
||||
"""Load the autoinstall configuration from the template file."""
|
||||
with open(template_path, 'r') as f:
|
||||
with open(template_path, "r") as f:
|
||||
content = f.read()
|
||||
|
||||
|
||||
# Parse the cloud-config YAML
|
||||
config = yaml.safe_load(content)
|
||||
|
||||
|
||||
# Extract the autoinstall section
|
||||
if 'autoinstall' in config:
|
||||
return config['autoinstall']
|
||||
if "autoinstall" in config:
|
||||
return config["autoinstall"]
|
||||
else:
|
||||
raise ValueError("No autoinstall section found in cloud-config")
|
||||
|
||||
@@ -29,154 +28,158 @@ def load_autoinstall_config(template_path: str) -> dict:
|
||||
def validate_required_fields(config: dict) -> list:
|
||||
"""Validate required fields according to schema."""
|
||||
errors = []
|
||||
|
||||
|
||||
# Check version field (required)
|
||||
if 'version' not in config:
|
||||
if "version" not in config:
|
||||
errors.append("Missing required field: version")
|
||||
elif not isinstance(config['version'], int) or config['version'] != 1:
|
||||
elif not isinstance(config["version"], int) or config["version"] != 1:
|
||||
errors.append("Invalid version: must be integer 1")
|
||||
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def validate_identity_section(config: dict) -> list:
|
||||
"""Validate identity section."""
|
||||
errors = []
|
||||
|
||||
if 'identity' in config:
|
||||
identity = config['identity']
|
||||
required_fields = ['username', 'hostname', 'password']
|
||||
|
||||
|
||||
if "identity" in config:
|
||||
identity = config["identity"]
|
||||
required_fields = ["username", "hostname", "password"]
|
||||
|
||||
for field in required_fields:
|
||||
if field not in identity:
|
||||
errors.append(f"Identity section missing required field: {field}")
|
||||
|
||||
|
||||
# Additional validation
|
||||
if 'username' in identity and not isinstance(identity['username'], str):
|
||||
if "username" in identity and not isinstance(identity["username"], str):
|
||||
errors.append("Identity username must be a string")
|
||||
|
||||
if 'hostname' in identity and not isinstance(identity['hostname'], str):
|
||||
|
||||
if "hostname" in identity and not isinstance(identity["hostname"], str):
|
||||
errors.append("Identity hostname must be a string")
|
||||
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def validate_network_section(config: dict) -> list:
|
||||
"""Validate network section."""
|
||||
errors = []
|
||||
|
||||
if 'network' in config:
|
||||
network = config['network']
|
||||
|
||||
if 'version' not in network:
|
||||
|
||||
if "network" in config:
|
||||
network = config["network"]
|
||||
|
||||
if "version" not in network:
|
||||
errors.append("Network section missing required field: version")
|
||||
elif network['version'] != 2:
|
||||
elif network["version"] != 2:
|
||||
errors.append("Network version must be 2")
|
||||
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def validate_keyboard_section(config: dict) -> list:
|
||||
"""Validate keyboard section."""
|
||||
errors = []
|
||||
|
||||
if 'keyboard' in config:
|
||||
keyboard = config['keyboard']
|
||||
|
||||
if 'layout' not in keyboard:
|
||||
|
||||
if "keyboard" in config:
|
||||
keyboard = config["keyboard"]
|
||||
|
||||
if "layout" not in keyboard:
|
||||
errors.append("Keyboard section missing required field: layout")
|
||||
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def validate_ssh_section(config: dict) -> list:
|
||||
"""Validate SSH section."""
|
||||
errors = []
|
||||
|
||||
if 'ssh' in config:
|
||||
ssh = config['ssh']
|
||||
|
||||
if 'install-server' in ssh and not isinstance(ssh['install-server'], bool):
|
||||
|
||||
if "ssh" in config:
|
||||
ssh = config["ssh"]
|
||||
|
||||
if "install-server" in ssh and not isinstance(ssh["install-server"], bool):
|
||||
errors.append("SSH install-server must be boolean")
|
||||
|
||||
if 'authorized-keys' in ssh and not isinstance(ssh['authorized-keys'], list):
|
||||
|
||||
if "authorized-keys" in ssh and not isinstance(ssh["authorized-keys"], list):
|
||||
errors.append("SSH authorized-keys must be an array")
|
||||
|
||||
if 'allow-pw' in ssh and not isinstance(ssh['allow-pw'], bool):
|
||||
|
||||
if "allow-pw" in ssh and not isinstance(ssh["allow-pw"], bool):
|
||||
errors.append("SSH allow-pw must be boolean")
|
||||
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def validate_packages_section(config: dict) -> list:
|
||||
"""Validate packages section."""
|
||||
errors = []
|
||||
|
||||
if 'packages' in config:
|
||||
packages = config['packages']
|
||||
|
||||
|
||||
if "packages" in config:
|
||||
packages = config["packages"]
|
||||
|
||||
if not isinstance(packages, list):
|
||||
errors.append("Packages must be an array")
|
||||
else:
|
||||
for i, package in enumerate(packages):
|
||||
if not isinstance(package, str):
|
||||
errors.append(f"Package at index {i} must be a string")
|
||||
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def validate_commands_sections(config: dict) -> list:
|
||||
"""Validate early-commands and late-commands sections."""
|
||||
errors = []
|
||||
|
||||
for section_name in ['early-commands', 'late-commands']:
|
||||
|
||||
for section_name in ["early-commands", "late-commands"]:
|
||||
if section_name in config:
|
||||
commands = config[section_name]
|
||||
|
||||
|
||||
if not isinstance(commands, list):
|
||||
errors.append(f"{section_name} must be an array")
|
||||
else:
|
||||
for i, command in enumerate(commands):
|
||||
if not isinstance(command, (str, list)):
|
||||
errors.append(f"{section_name} item at index {i} must be string or array")
|
||||
errors.append(
|
||||
f"{section_name} item at index {i} must be string or array"
|
||||
)
|
||||
elif isinstance(command, list):
|
||||
for j, cmd_part in enumerate(command):
|
||||
if not isinstance(cmd_part, str):
|
||||
errors.append(f"{section_name}[{i}][{j}] must be a string")
|
||||
|
||||
errors.append(
|
||||
f"{section_name}[{i}][{j}] must be a string"
|
||||
)
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def validate_shutdown_section(config: dict) -> list:
|
||||
"""Validate shutdown section."""
|
||||
errors = []
|
||||
|
||||
if 'shutdown' in config:
|
||||
shutdown = config['shutdown']
|
||||
valid_values = ['reboot', 'poweroff']
|
||||
|
||||
|
||||
if "shutdown" in config:
|
||||
shutdown = config["shutdown"]
|
||||
valid_values = ["reboot", "poweroff"]
|
||||
|
||||
if shutdown not in valid_values:
|
||||
errors.append(f"Shutdown must be one of: {valid_values}")
|
||||
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def main():
|
||||
"""Main validation function."""
|
||||
template_path = Path(__file__).parent / "cloud-init-template.yaml"
|
||||
|
||||
|
||||
if not template_path.exists():
|
||||
print(f"Error: Template file not found at {template_path}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
try:
|
||||
# Load the autoinstall configuration
|
||||
print(f"Loading autoinstall config from {template_path}")
|
||||
config = load_autoinstall_config(str(template_path))
|
||||
|
||||
|
||||
# Run validation checks
|
||||
all_errors = []
|
||||
|
||||
|
||||
all_errors.extend(validate_required_fields(config))
|
||||
all_errors.extend(validate_identity_section(config))
|
||||
all_errors.extend(validate_network_section(config))
|
||||
@@ -185,7 +188,7 @@ def main():
|
||||
all_errors.extend(validate_packages_section(config))
|
||||
all_errors.extend(validate_commands_sections(config))
|
||||
all_errors.extend(validate_shutdown_section(config))
|
||||
|
||||
|
||||
# Report results
|
||||
if all_errors:
|
||||
print("\n❌ Validation failed with the following errors:")
|
||||
@@ -195,11 +198,11 @@ def main():
|
||||
else:
|
||||
print("\n✅ Autoinstall configuration validation passed!")
|
||||
print("Configuration appears to comply with Ubuntu autoinstall schema.")
|
||||
|
||||
|
||||
# Print summary of detected sections
|
||||
sections = list(config.keys())
|
||||
print(f"\nDetected sections: {', '.join(sorted(sections))}")
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during validation: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -11,44 +11,46 @@ import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import tempfile
|
||||
import subprocess
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# Import our modular components
|
||||
from iso_builder import UbuntuISOBuilder
|
||||
from vm_manager import UnraidVMManager
|
||||
# Note: UnraidVMManager is defined locally in this file
|
||||
|
||||
# Configuration
|
||||
UNRAID_HOST = os***REMOVED***iron.get("UNRAID_HOST", "localhost")
|
||||
UNRAID_USER = os***REMOVED***iron.get("UNRAID_USER", "root")
|
||||
VM_NAME = os***REMOVED***iron.get("VM_NAME", "thrillwiki-vm")
|
||||
VM_MEMORY = int(os***REMOVED***iron.get("VM_MEMORY", 4096)) # MB
|
||||
VM_VCPUS = int(os***REMOVED***iron.get("VM_VCPUS", 2))
|
||||
VM_DISK_SIZE = int(os***REMOVED***iron.get("VM_DISK_SIZE", 50)) # GB
|
||||
SSH_PUBLIC_KEY = os***REMOVED***iron.get("SSH_PUBLIC_KEY", "")
|
||||
UNRAID_HOST = os.environ.get("UNRAID_HOST", "localhost")
|
||||
UNRAID_USER = os.environ.get("UNRAID_USER", "root")
|
||||
VM_NAME = os.environ.get("VM_NAME", "thrillwiki-vm")
|
||||
VM_MEMORY = int(os.environ.get("VM_MEMORY", 4096)) # MB
|
||||
VM_VCPUS = int(os.environ.get("VM_VCPUS", 2))
|
||||
VM_DISK_SIZE = int(os.environ.get("VM_DISK_SIZE", 50)) # GB
|
||||
SSH_PUBLIC_KEY = os.environ.get("SSH_PUBLIC_KEY", "")
|
||||
|
||||
# Network Configuration
|
||||
VM_IP = os***REMOVED***iron.get("VM_IP", "dhcp")
|
||||
VM_GATEWAY = os***REMOVED***iron.get("VM_GATEWAY", "192.168.20.1")
|
||||
VM_NETMASK = os***REMOVED***iron.get("VM_NETMASK", "255.255.255.0")
|
||||
VM_NETWORK = os***REMOVED***iron.get("VM_NETWORK", "192.168.20.0/24")
|
||||
VM_IP = os.environ.get("VM_IP", "dhcp")
|
||||
VM_GATEWAY = os.environ.get("VM_GATEWAY", "192.168.20.1")
|
||||
VM_NETMASK = os.environ.get("VM_NETMASK", "255.255.255.0")
|
||||
VM_NETWORK = os.environ.get("VM_NETWORK", "192.168.20.0/24")
|
||||
|
||||
# GitHub Configuration
|
||||
REPO_URL = os***REMOVED***iron.get("REPO_URL", "")
|
||||
GITHUB_USERNAME = os***REMOVED***iron.get("GITHUB_USERNAME", "")
|
||||
GITHUB_TOKEN = os***REMOVED***iron.get("GITHUB_TOKEN", "")
|
||||
REPO_URL = os.environ.get("REPO_URL", "")
|
||||
GITHUB_USERNAME = os.environ.get("GITHUB_USERNAME", "")
|
||||
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN", "")
|
||||
|
||||
# Ubuntu version preference
|
||||
UBUNTU_VERSION = os***REMOVED***iron.get("UBUNTU_VERSION", "24.04")
|
||||
UBUNTU_VERSION = os.environ.get("UBUNTU_VERSION", "24.04")
|
||||
|
||||
# Setup logging
|
||||
os.makedirs("logs", exist_ok=True)
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||
handlers=[logging.FileHandler("logs/unraid-vm.log"), logging.StreamHandler()],
|
||||
handlers=[
|
||||
logging.FileHandler("logs/unraid-vm.log"),
|
||||
logging.StreamHandler(),
|
||||
],
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -67,9 +69,9 @@ class UnraidVMManager:
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=15
|
||||
timeout=15,
|
||||
)
|
||||
|
||||
|
||||
if result.returncode == 0 and "Connected" in result.stdout:
|
||||
logger.info("Successfully connected to Unraid via SSH")
|
||||
return True
|
||||
@@ -114,24 +116,25 @@ class UnraidVMManager:
|
||||
import uuid
|
||||
|
||||
vm_uuid = existing_uuid if existing_uuid else str(uuid.uuid4())
|
||||
|
||||
|
||||
# Detect Ubuntu ISO dynamically
|
||||
ubuntu_iso_path = self._detect_ubuntu_iso()
|
||||
if not ubuntu_iso_path:
|
||||
raise FileNotFoundError("No Ubuntu ISO found for VM template")
|
||||
|
||||
|
||||
# Read XML template from file
|
||||
template_path = Path(__file__).parent / "thrillwiki-vm-template.xml"
|
||||
if not template_path.exists():
|
||||
raise FileNotFoundError(f"VM XML template not found at {template_path}")
|
||||
|
||||
with open(template_path, 'r', encoding='utf-8') as f:
|
||||
|
||||
with open(template_path, "r", encoding="utf-8") as f:
|
||||
xml_template = f.read()
|
||||
|
||||
|
||||
# Calculate CPU topology
|
||||
cpu_cores = VM_VCPUS // 2 if VM_VCPUS > 1 else 1
|
||||
cpu_threads = 2 if VM_VCPUS > 1 else 1
|
||||
|
||||
mac_suffix = self._generate_mac_suffix()
|
||||
|
||||
# Replace placeholders with actual values
|
||||
xml_content = xml_template.format(
|
||||
VM_NAME=VM_NAME,
|
||||
@@ -140,10 +143,10 @@ class UnraidVMManager:
|
||||
VM_VCPUS=VM_VCPUS,
|
||||
CPU_CORES=cpu_cores,
|
||||
CPU_THREADS=cpu_threads,
|
||||
MAC_SUFFIX=self._generate_mac_suffix(),
|
||||
UBUNTU_ISO_PATH=ubuntu_iso_path
|
||||
MAC_SUFFIX=mac_suffix,
|
||||
UBUNTU_ISO_PATH=ubuntu_iso_path,
|
||||
)
|
||||
|
||||
|
||||
return xml_content.strip()
|
||||
|
||||
def _detect_ubuntu_iso(self) -> Optional[str]:
|
||||
@@ -156,48 +159,52 @@ class UnraidVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
if find_all_result.returncode != 0 or not find_all_result.stdout.strip():
|
||||
return None
|
||||
|
||||
available_isos = find_all_result.stdout.strip().split('\n')
|
||||
|
||||
|
||||
available_isos = find_all_result.stdout.strip().split("\n")
|
||||
|
||||
# Prioritize ISOs by version and type
|
||||
# Sort by preference: 24.04 LTS > 22.04 LTS > 23.x > 20.04 > others
|
||||
# Within each version, prefer the latest point release
|
||||
priority_versions = [
|
||||
'24.04', # Ubuntu 24.04 LTS (highest priority)
|
||||
'22.04', # Ubuntu 22.04 LTS
|
||||
'23.10', # Ubuntu 23.10
|
||||
'23.04', # Ubuntu 23.04
|
||||
'20.04', # Ubuntu 20.04 LTS
|
||||
"24.04", # Ubuntu 24.04 LTS (highest priority)
|
||||
"22.04", # Ubuntu 22.04 LTS
|
||||
"23.10", # Ubuntu 23.10
|
||||
"23.04", # Ubuntu 23.04
|
||||
"20.04", # Ubuntu 20.04 LTS
|
||||
]
|
||||
|
||||
# Find the best ISO based on priority, preferring latest point releases
|
||||
|
||||
# Find the best ISO based on priority, preferring latest point
|
||||
# releases
|
||||
for version in priority_versions:
|
||||
# Find all ISOs for this version
|
||||
version_isos = []
|
||||
for iso in available_isos:
|
||||
if version in iso and ('server' in iso.lower() or 'live' in iso.lower()):
|
||||
if version in iso and (
|
||||
"server" in iso.lower() or "live" in iso.lower()
|
||||
):
|
||||
version_isos.append(iso)
|
||||
|
||||
|
||||
if version_isos:
|
||||
# Sort by version number (reverse to get latest first)
|
||||
# This will put 24.04.3 before 24.04.2 before 24.04.1 before 24.04
|
||||
# This will put 24.04.3 before 24.04.2 before 24.04.1
|
||||
# before 24.04
|
||||
version_isos.sort(reverse=True)
|
||||
return version_isos[0]
|
||||
|
||||
|
||||
# If no priority match, use the first server/live ISO found
|
||||
for iso in available_isos:
|
||||
if 'server' in iso.lower() or 'live' in iso.lower():
|
||||
if "server" in iso.lower() or "live" in iso.lower():
|
||||
return iso
|
||||
|
||||
|
||||
# If still no match, use the first Ubuntu ISO found (any type)
|
||||
if available_isos:
|
||||
return available_isos[0]
|
||||
|
||||
|
||||
return None
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error detecting Ubuntu ISO: {e}")
|
||||
return None
|
||||
@@ -212,7 +219,7 @@ class UnraidVMManager:
|
||||
# Always try to stop VM before updating (force stop)
|
||||
current_status = self.vm_status()
|
||||
logger.info(f"Current VM status: {current_status}")
|
||||
|
||||
|
||||
if current_status not in ["shut off", "unknown"]:
|
||||
logger.info(f"Stopping VM {VM_NAME} for configuration update...")
|
||||
self.stop_vm()
|
||||
@@ -230,7 +237,8 @@ class UnraidVMManager:
|
||||
check=True,
|
||||
)
|
||||
|
||||
# Create virtual disk if it doesn't exist (for both new and updated VMs)
|
||||
# Create virtual disk if it doesn't exist (for both new and updated
|
||||
# VMs)
|
||||
disk_check = subprocess.run(
|
||||
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'test -f {self.vm_config_path}/vdisk1.qcow2'",
|
||||
shell=True,
|
||||
@@ -247,7 +255,8 @@ class UnraidVMManager:
|
||||
logger.info(f"Virtual disk already exists for VM {VM_NAME}")
|
||||
|
||||
# Always create/recreate cloud-init ISO for automated installation and ThrillWiki deployment
|
||||
# This ensures the latest configuration is used whether creating or updating the VM
|
||||
# This ensures the latest configuration is used whether creating or
|
||||
# updating the VM
|
||||
logger.info(
|
||||
"Creating cloud-init ISO for automated Ubuntu and ThrillWiki setup..."
|
||||
)
|
||||
@@ -257,9 +266,7 @@ class UnraidVMManager:
|
||||
|
||||
# For Ubuntu 24.04, use UEFI boot instead of kernel extraction
|
||||
# Ubuntu 24.04 has issues with direct kernel boot autoinstall
|
||||
logger.info(
|
||||
"Using UEFI boot for Ubuntu 24.04 compatibility..."
|
||||
)
|
||||
logger.info("Using UEFI boot for Ubuntu 24.04 compatibility...")
|
||||
if not self.fallback_to_uefi_boot():
|
||||
logger.error("UEFI boot setup failed")
|
||||
return False
|
||||
@@ -286,9 +293,9 @@ class UnraidVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
is_persistent = VM_NAME in persistent_check.stdout
|
||||
|
||||
|
||||
if is_persistent:
|
||||
# Undefine persistent VM with NVRAM flag
|
||||
logger.info(
|
||||
@@ -299,7 +306,9 @@ class UnraidVMManager:
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
logger.info(f"Persistent VM {VM_NAME} undefined for reconfiguration")
|
||||
logger.info(
|
||||
f"Persistent VM {VM_NAME} undefined for reconfiguration"
|
||||
)
|
||||
else:
|
||||
# Handle transient VM - just destroy it
|
||||
logger.info(
|
||||
@@ -318,7 +327,7 @@ class UnraidVMManager:
|
||||
vm_xml = self.create_vm_xml(existing_uuid)
|
||||
xml_file = f"/tmp/{VM_NAME}.xml"
|
||||
|
||||
with open(xml_file, "w", encoding='utf-8') as f:
|
||||
with open(xml_file, "w", encoding="utf-8") as f:
|
||||
f.write(vm_xml)
|
||||
|
||||
# Copy XML to Unraid and define/redefine VM
|
||||
@@ -359,7 +368,7 @@ class UnraidVMManager:
|
||||
try:
|
||||
# Check available Ubuntu ISOs and select the correct one
|
||||
iso_mount_point = "/tmp/ubuntu-iso"
|
||||
|
||||
|
||||
logger.info("Checking for available Ubuntu ISOs...")
|
||||
# List available Ubuntu ISOs with detailed information
|
||||
result = subprocess.run(
|
||||
@@ -368,9 +377,9 @@ class UnraidVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
logger.info(f"Available ISOs: {result.stdout}")
|
||||
|
||||
|
||||
# First, try to find ANY existing Ubuntu ISOs dynamically
|
||||
# This will find all Ubuntu ISOs regardless of naming convention
|
||||
find_all_result = subprocess.run(
|
||||
@@ -379,82 +388,107 @@ class UnraidVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
ubuntu_iso_path = None
|
||||
available_isos = []
|
||||
|
||||
|
||||
if find_all_result.returncode == 0 and find_all_result.stdout.strip():
|
||||
available_isos = find_all_result.stdout.strip().split('\n')
|
||||
logger.info(f"Found {len(available_isos)} Ubuntu ISOs: {available_isos}")
|
||||
|
||||
available_isos = find_all_result.stdout.strip().split("\n")
|
||||
logger.info(
|
||||
f"Found {
|
||||
len(available_isos)} Ubuntu ISOs: {available_isos}"
|
||||
)
|
||||
|
||||
# Prioritize ISOs by version and type (prefer LTS, prefer newer versions)
|
||||
# Sort by preference: 24.04 LTS > 22.04 LTS > 23.x > 20.04 > others
|
||||
# Within each version, prefer the latest point release
|
||||
priority_versions = [
|
||||
'24.04', # Ubuntu 24.04 LTS (highest priority)
|
||||
'22.04', # Ubuntu 22.04 LTS
|
||||
'23.10', # Ubuntu 23.10
|
||||
'23.04', # Ubuntu 23.04
|
||||
'20.04', # Ubuntu 20.04 LTS
|
||||
"24.04", # Ubuntu 24.04 LTS (highest priority)
|
||||
"22.04", # Ubuntu 22.04 LTS
|
||||
"23.10", # Ubuntu 23.10
|
||||
"23.04", # Ubuntu 23.04
|
||||
"20.04", # Ubuntu 20.04 LTS
|
||||
]
|
||||
|
||||
# Find the best ISO based on priority, preferring latest point releases
|
||||
|
||||
# Find the best ISO based on priority, preferring latest point
|
||||
# releases
|
||||
for version in priority_versions:
|
||||
# Find all ISOs for this version
|
||||
version_isos = []
|
||||
for iso in available_isos:
|
||||
if version in iso and ('server' in iso.lower() or 'live' in iso.lower()):
|
||||
if version in iso and (
|
||||
"server" in iso.lower() or "live" in iso.lower()
|
||||
):
|
||||
version_isos.append(iso)
|
||||
|
||||
|
||||
if version_isos:
|
||||
# Sort by version number (reverse to get latest first)
|
||||
# This will put 24.04.3 before 24.04.2 before 24.04.1 before 24.04
|
||||
# This will put 24.04.3 before 24.04.2 before 24.04.1
|
||||
# before 24.04
|
||||
version_isos.sort(reverse=True)
|
||||
ubuntu_iso_path = version_isos[0]
|
||||
logger.info(f"Selected latest Ubuntu {version} ISO: {ubuntu_iso_path}")
|
||||
logger.info(
|
||||
f"Selected latest Ubuntu {version} ISO: {ubuntu_iso_path}"
|
||||
)
|
||||
break
|
||||
|
||||
|
||||
# If no priority match, use the first server/live ISO found
|
||||
if not ubuntu_iso_path:
|
||||
for iso in available_isos:
|
||||
if 'server' in iso.lower() or 'live' in iso.lower():
|
||||
if "server" in iso.lower() or "live" in iso.lower():
|
||||
ubuntu_iso_path = iso
|
||||
logger.info(f"Selected Ubuntu server/live ISO: {ubuntu_iso_path}")
|
||||
logger.info(
|
||||
f"Selected Ubuntu server/live ISO: {ubuntu_iso_path}"
|
||||
)
|
||||
break
|
||||
|
||||
|
||||
# If still no match, use the first Ubuntu ISO found (any type)
|
||||
if not ubuntu_iso_path and available_isos:
|
||||
ubuntu_iso_path = available_isos[0]
|
||||
logger.info(f"Selected first available Ubuntu ISO: {ubuntu_iso_path}")
|
||||
logger.warning(f"Using non-server Ubuntu ISO - this may not support autoinstall")
|
||||
|
||||
logger.info(
|
||||
f"Selected first available Ubuntu ISO: {ubuntu_iso_path}"
|
||||
)
|
||||
logger.warning(
|
||||
f"Using non-server Ubuntu ISO - this may not support autoinstall"
|
||||
)
|
||||
|
||||
if not ubuntu_iso_path:
|
||||
logger.error("No Ubuntu server ISO found in /mnt/user/isos/")
|
||||
logger.error("")
|
||||
logger.error("🔥 MISSING UBUNTU ISO - ACTION REQUIRED 🔥")
|
||||
logger.error("")
|
||||
logger.error("Please download Ubuntu LTS Server ISO to your Unraid server:")
|
||||
logger.error(
|
||||
"Please download Ubuntu LTS Server ISO to your Unraid server:"
|
||||
)
|
||||
logger.error("")
|
||||
logger.error("📦 RECOMMENDED: Ubuntu 24.04 LTS (Noble Numbat) - Latest LTS:")
|
||||
logger.error(
|
||||
"📦 RECOMMENDED: Ubuntu 24.04 LTS (Noble Numbat) - Latest LTS:"
|
||||
)
|
||||
logger.error(" 1. Go to: https://releases.ubuntu.com/24.04/")
|
||||
logger.error(" 2. Download: ubuntu-24.04-live-server-amd64.iso")
|
||||
logger.error(" 3. Upload to: /mnt/user/isos/ on your Unraid server")
|
||||
logger.error("")
|
||||
logger.error("📦 ALTERNATIVE: Ubuntu 22.04 LTS (Jammy Jellyfish) - Stable:")
|
||||
logger.error(
|
||||
"📦 ALTERNATIVE: Ubuntu 22.04 LTS (Jammy Jellyfish) - Stable:"
|
||||
)
|
||||
logger.error(" 1. Go to: https://releases.ubuntu.com/22.04/")
|
||||
logger.error(" 2. Download: ubuntu-22.04-live-server-amd64.iso")
|
||||
logger.error(" 3. Upload to: /mnt/user/isos/ on your Unraid server")
|
||||
logger.error("")
|
||||
logger.error("💡 Quick download via wget on Unraid server:")
|
||||
logger.error(" # For Ubuntu 24.04 LTS (recommended):")
|
||||
logger.error(" wget -P /mnt/user/isos/ https://releases.ubuntu.com/24.04/ubuntu-24.04-live-server-amd64.iso")
|
||||
logger.error(
|
||||
" wget -P /mnt/user/isos/ https://releases.ubuntu.com/24.04/ubuntu-24.04-live-server-amd64.iso"
|
||||
)
|
||||
logger.error(" # For Ubuntu 22.04 LTS (stable):")
|
||||
logger.error(" wget -P /mnt/user/isos/ https://releases.ubuntu.com/22.04/ubuntu-22.04-live-server-amd64.iso")
|
||||
logger.error(
|
||||
" wget -P /mnt/user/isos/ https://releases.ubuntu.com/22.04/ubuntu-22.04-live-server-amd64.iso"
|
||||
)
|
||||
logger.error("")
|
||||
logger.error("Then re-run this script.")
|
||||
logger.error("")
|
||||
return False
|
||||
|
||||
|
||||
# Verify ISO file integrity
|
||||
logger.info(f"Verifying ISO file: {ubuntu_iso_path}")
|
||||
stat_result = subprocess.run(
|
||||
@@ -466,23 +500,23 @@ class UnraidVMManager:
|
||||
if stat_result.returncode != 0:
|
||||
logger.error(f"Cannot access ISO file: {ubuntu_iso_path}")
|
||||
return False
|
||||
|
||||
|
||||
logger.info(f"ISO file stats: {stat_result.stdout.strip()}")
|
||||
|
||||
|
||||
# Clean up any previous mount points
|
||||
subprocess.run(
|
||||
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'umount {iso_mount_point} 2>/dev/null || true'",
|
||||
shell=True,
|
||||
check=False,
|
||||
)
|
||||
|
||||
|
||||
# Remove mount point if it exists
|
||||
subprocess.run(
|
||||
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rmdir {iso_mount_point} 2>/dev/null || true'",
|
||||
shell=True,
|
||||
check=False,
|
||||
)
|
||||
|
||||
|
||||
# Create mount point
|
||||
logger.info(f"Creating mount point: {iso_mount_point}")
|
||||
subprocess.run(
|
||||
@@ -490,7 +524,7 @@ class UnraidVMManager:
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
|
||||
# Check if loop module is loaded
|
||||
logger.info("Checking loop module availability...")
|
||||
loop_check = subprocess.run(
|
||||
@@ -500,7 +534,7 @@ class UnraidVMManager:
|
||||
text=True,
|
||||
)
|
||||
logger.info(f"Loop module check: {loop_check.stdout}")
|
||||
|
||||
|
||||
# Mount ISO with more verbose output
|
||||
logger.info(f"Mounting ISO: {ubuntu_iso_path} to {iso_mount_point}")
|
||||
mount_result = subprocess.run(
|
||||
@@ -509,15 +543,18 @@ class UnraidVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
if mount_result.returncode != 0:
|
||||
logger.error(f"Failed to mount ISO. Return code: {mount_result.returncode}")
|
||||
logger.error(
|
||||
f"Failed to mount ISO. Return code: {
|
||||
mount_result.returncode}"
|
||||
)
|
||||
logger.error(f"STDOUT: {mount_result.stdout}")
|
||||
logger.error(f"STDERR: {mount_result.stderr}")
|
||||
return False
|
||||
|
||||
|
||||
logger.info("ISO mounted successfully")
|
||||
|
||||
|
||||
# Create directory for extracted kernel files
|
||||
kernel_dir = f"/mnt/user/domains/{VM_NAME}/kernel"
|
||||
subprocess.run(
|
||||
@@ -525,37 +562,37 @@ class UnraidVMManager:
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
|
||||
# Extract kernel and initrd
|
||||
subprocess.run(
|
||||
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'cp {iso_mount_point}/casper/vmlinuz {kernel_dir}/'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
|
||||
subprocess.run(
|
||||
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'cp {iso_mount_point}/casper/initrd {kernel_dir}/'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
|
||||
# Unmount ISO
|
||||
subprocess.run(
|
||||
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'umount {iso_mount_point}'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
|
||||
# Remove mount point
|
||||
subprocess.run(
|
||||
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rmdir {iso_mount_point}'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
|
||||
logger.info("Ubuntu kernel and initrd extracted successfully")
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to extract Ubuntu kernel: {e}")
|
||||
# Clean up on failure
|
||||
@@ -570,19 +607,23 @@ class UnraidVMManager:
|
||||
"""Fallback to UEFI boot when kernel extraction fails."""
|
||||
try:
|
||||
logger.info("Setting up fallback UEFI boot configuration...")
|
||||
|
||||
|
||||
# First, detect available Ubuntu ISO for the fallback template
|
||||
ubuntu_iso_path = self._detect_ubuntu_iso()
|
||||
if not ubuntu_iso_path:
|
||||
logger.error("Cannot create UEFI fallback without Ubuntu ISO")
|
||||
return False
|
||||
|
||||
|
||||
# Create a fallback VM XML template path
|
||||
fallback_template_path = Path(__file__).parent / "thrillwiki-vm-uefi-fallback-template.xml"
|
||||
|
||||
fallback_template_path = (
|
||||
Path(__file__).parent / "thrillwiki-vm-uefi-fallback-template.xml"
|
||||
)
|
||||
|
||||
# Create fallback UEFI template with detected Ubuntu ISO
|
||||
logger.info(f"Creating fallback UEFI template with detected ISO: {ubuntu_iso_path}")
|
||||
uefi_template = f'''<?xml version='1.0' encoding='UTF-8'?>
|
||||
logger.info(
|
||||
f"Creating fallback UEFI template with detected ISO: {ubuntu_iso_path}"
|
||||
)
|
||||
uefi_template = f"""<?xml version='1.0' encoding='UTF-8'?>
|
||||
<domain type='kvm'>
|
||||
<name>{{VM_NAME}}</name>
|
||||
<uuid>{{VM_UUID}}</uuid>
|
||||
@@ -605,7 +646,7 @@ class UnraidVMManager:
|
||||
<vmport state='off'/>
|
||||
</features>
|
||||
<cpu mode='host-passthrough' check='none' migratable='on'>
|
||||
<topology sockets='1' dies='1' clusters='1' cores='{CPU_CORES}' threads='{CPU_THREADS}'/>
|
||||
<topology sockets='1' dies='1' clusters='1' cores='{{CPU_CORES}}' threads='{{CPU_THREADS}}'/>
|
||||
<cache mode='passthrough'/>
|
||||
<feature policy='require' name='topoext'/>
|
||||
</cpu>
|
||||
@@ -682,7 +723,7 @@ class UnraidVMManager:
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x1f' function='0x2'/>
|
||||
</controller>
|
||||
<interface type='bridge'>
|
||||
<mac address='52:54:00:{MAC_SUFFIX}'/>
|
||||
<mac address='52:54:00:{{MAC_SUFFIX}}'/>
|
||||
<source bridge='br0.20'/>
|
||||
<model type='virtio'/>
|
||||
<address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x0'/>
|
||||
@@ -717,28 +758,32 @@ class UnraidVMManager:
|
||||
<address type='pci' domain='0x0000' bus='0x05' slot='0x00' function='0x0'/>
|
||||
</memballoon>
|
||||
</devices>
|
||||
</domain>'''
|
||||
|
||||
with open(fallback_template_path, 'w', encoding='utf-8') as f:
|
||||
</domain>"""
|
||||
|
||||
with open(fallback_template_path, "w", encoding="utf-8") as f:
|
||||
f.write(uefi_template)
|
||||
|
||||
|
||||
logger.info(f"Created fallback UEFI template: {fallback_template_path}")
|
||||
|
||||
|
||||
# Update the template path to use the fallback
|
||||
original_template = Path(__file__).parent / "thrillwiki-vm-template.xml"
|
||||
fallback_template = Path(__file__).parent / "thrillwiki-vm-uefi-fallback-template.xml"
|
||||
|
||||
fallback_template = (
|
||||
Path(__file__).parent / "thrillwiki-vm-uefi-fallback-template.xml"
|
||||
)
|
||||
|
||||
# Backup original template and replace with fallback
|
||||
if original_template.exists():
|
||||
backup_path = Path(__file__).parent / "thrillwiki-vm-template.xml.backup"
|
||||
backup_path = (
|
||||
Path(__file__).parent / "thrillwiki-vm-template.xml.backup"
|
||||
)
|
||||
original_template.rename(backup_path)
|
||||
logger.info(f"Backed up original template to {backup_path}")
|
||||
|
||||
|
||||
fallback_template.rename(original_template)
|
||||
logger.info("Switched to UEFI fallback template")
|
||||
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to set up UEFI fallback: {e}")
|
||||
return False
|
||||
@@ -841,7 +886,7 @@ class UnraidVMManager:
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10 # 10 second timeout for the command itself
|
||||
timeout=10, # 10 second timeout for the command itself
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
@@ -853,25 +898,33 @@ class UnraidVMManager:
|
||||
logger.info(f"VM {VM_NAME} stopped gracefully")
|
||||
return True
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
# If still running after 30 seconds, force destroy
|
||||
logger.warning(f"VM {VM_NAME} didn't shutdown gracefully, forcing destroy...")
|
||||
logger.warning(
|
||||
f"VM {VM_NAME} didn't shutdown gracefully, forcing destroy..."
|
||||
)
|
||||
destroy_result = subprocess.run(
|
||||
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'virsh destroy {VM_NAME}'",
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
|
||||
if destroy_result.returncode == 0:
|
||||
logger.info(f"VM {VM_NAME} forcefully destroyed")
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Failed to destroy VM: {destroy_result.stderr}")
|
||||
logger.error(
|
||||
f"Failed to destroy VM: {
|
||||
destroy_result.stderr}"
|
||||
)
|
||||
return False
|
||||
else:
|
||||
logger.error(f"Failed to initiate VM shutdown: {result.stderr}")
|
||||
logger.error(
|
||||
f"Failed to initiate VM shutdown: {
|
||||
result.stderr}"
|
||||
)
|
||||
return False
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
@@ -898,7 +951,9 @@ class UnraidVMManager:
|
||||
lines = result.stdout.strip().split("\n")
|
||||
for line in lines:
|
||||
if "ipv4" in line:
|
||||
# Extract IP from line like: vnet0 52:54:00:xx:xx:xx ipv4 192.168.1.100/24
|
||||
# Extract IP from line like: vnet0
|
||||
# 52:54:00:xx:xx:xx ipv4
|
||||
# 192.168.1.100/24
|
||||
parts = line.split()
|
||||
if len(parts) >= 4:
|
||||
ip_with_mask = parts[3]
|
||||
@@ -907,7 +962,8 @@ class UnraidVMManager:
|
||||
return ip
|
||||
|
||||
logger.info(
|
||||
f"Waiting for VM IP... (attempt {attempt + 1}/{max_attempts}) - Ubuntu autoinstall in progress"
|
||||
f"Waiting for VM IP... (attempt {
|
||||
attempt + 1}/{max_attempts}) - Ubuntu autoinstall in progress"
|
||||
)
|
||||
time.sleep(10)
|
||||
|
||||
@@ -928,27 +984,31 @@ class UnraidVMManager:
|
||||
ssh_public_key = os.getenv("SSH_PUBLIC_KEY", "")
|
||||
|
||||
# Read autoinstall user-data template
|
||||
autoinstall_template_path = Path(__file__).parent / "autoinstall-user-data.yaml"
|
||||
autoinstall_template_path = (
|
||||
Path(__file__).parent / "autoinstall-user-data.yaml"
|
||||
)
|
||||
if not autoinstall_template_path.exists():
|
||||
logger.error(f"Autoinstall template not found at {autoinstall_template_path}")
|
||||
logger.error(
|
||||
f"Autoinstall template not found at {autoinstall_template_path}"
|
||||
)
|
||||
return False
|
||||
|
||||
with open(autoinstall_template_path, 'r', encoding='utf-8') as f:
|
||||
|
||||
with open(autoinstall_template_path, "r", encoding="utf-8") as f:
|
||||
autoinstall_template = f.read()
|
||||
|
||||
# Replace placeholders in autoinstall template
|
||||
user_data = autoinstall_template.format(
|
||||
SSH_PUBLIC_KEY=ssh_public_key if ssh_public_key else "# No SSH key provided",
|
||||
GITHUB_REPO=repo_url if repo_url else ""
|
||||
SSH_PUBLIC_KEY=(
|
||||
ssh_public_key if ssh_public_key else "# No SSH key provided"
|
||||
),
|
||||
GITHUB_REPO=repo_url if repo_url else "",
|
||||
)
|
||||
|
||||
# Update network configuration in autoinstall based on VM_IP setting
|
||||
# Update network configuration in autoinstall based on VM_IP
|
||||
# setting
|
||||
if vm_ip.lower() == "dhcp":
|
||||
# Replace the static network config with DHCP
|
||||
user_data = user_data.replace(
|
||||
"dhcp4: true",
|
||||
"dhcp4: true"
|
||||
)
|
||||
user_data = user_data.replace("dhcp4: true", "dhcp4: true")
|
||||
else:
|
||||
# Update with static IP configuration
|
||||
gateway = os.getenv("VM_GATEWAY", "192.168.20.1")
|
||||
@@ -960,10 +1020,7 @@ class UnraidVMManager:
|
||||
addresses:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4"""
|
||||
user_data = user_data.replace(
|
||||
"dhcp4: true",
|
||||
network_config
|
||||
)
|
||||
user_data = user_data.replace("dhcp4: true", network_config)
|
||||
|
||||
# Force clean temp directory for cloud-init files
|
||||
cloud_init_dir = "/tmp/cloud-init"
|
||||
@@ -975,19 +1032,21 @@ class UnraidVMManager:
|
||||
server_dir = f"{cloud_init_dir}/server"
|
||||
os.makedirs(server_dir, exist_ok=True)
|
||||
|
||||
# Create user-data file in server/ directory with autoinstall configuration
|
||||
with open(f"{server_dir}/user-data", "w", encoding='utf-8') as f:
|
||||
# Create user-data file in server/ directory with autoinstall
|
||||
# configuration
|
||||
with open(f"{server_dir}/user-data", "w", encoding="utf-8") as f:
|
||||
f.write(user_data)
|
||||
|
||||
# Create empty meta-data file in server/ directory as per Ubuntu guide
|
||||
with open(f"{server_dir}/meta-data", "w", encoding='utf-8') as f:
|
||||
# Create empty meta-data file in server/ directory as per Ubuntu
|
||||
# guide
|
||||
with open(f"{server_dir}/meta-data", "w", encoding="utf-8") as f:
|
||||
f.write("")
|
||||
|
||||
# Create root level meta-data for cloud-init
|
||||
meta_data = f"""instance-id: thrillwiki-vm-{int(time.time())}
|
||||
local-hostname: thrillwiki-vm
|
||||
"""
|
||||
with open(f"{cloud_init_dir}/meta-data", "w", encoding='utf-8') as f:
|
||||
with open(f"{cloud_init_dir}/meta-data", "w", encoding="utf-8") as f:
|
||||
f.write(meta_data)
|
||||
|
||||
# Create user-data at root level (minimal cloud-config)
|
||||
@@ -995,7 +1054,7 @@ local-hostname: thrillwiki-vm
|
||||
# Root level cloud-config for compatibility
|
||||
# Main autoinstall config is in /server/user-data
|
||||
"""
|
||||
with open(f"{cloud_init_dir}/user-data", "w", encoding='utf-8') as f:
|
||||
with open(f"{cloud_init_dir}/user-data", "w", encoding="utf-8") as f:
|
||||
f.write(root_user_data)
|
||||
|
||||
# Force remove old ISO first
|
||||
@@ -1078,15 +1137,19 @@ local-hostname: thrillwiki-vm
|
||||
shell=True,
|
||||
check=False, # Don't fail if file doesn't exist
|
||||
)
|
||||
logger.info(f"Removed old cloud-init ISO from Unraid: /mnt/user/isos/{VM_NAME}-cloud-init.iso")
|
||||
|
||||
logger.info(
|
||||
f"Removed old cloud-init ISO from Unraid: /mnt/user/isos/{VM_NAME}-cloud-init.iso"
|
||||
)
|
||||
|
||||
# Copy new ISO to Unraid
|
||||
subprocess.run(
|
||||
f"scp {iso_path} {UNRAID_USER}@{UNRAID_HOST}:/mnt/user/isos/",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
logger.info(f"Copied new cloud-init ISO to Unraid: /mnt/user/isos/{VM_NAME}-cloud-init.iso")
|
||||
logger.info(
|
||||
f"Copied new cloud-init ISO to Unraid: /mnt/user/isos/{VM_NAME}-cloud-init.iso"
|
||||
)
|
||||
|
||||
logger.info("Cloud-init ISO created successfully")
|
||||
return True
|
||||
@@ -1154,7 +1217,7 @@ local-hostname: thrillwiki-vm
|
||||
shell=True,
|
||||
check=False, # Don't fail if file doesn't exist
|
||||
)
|
||||
|
||||
|
||||
# Remove extracted kernel files
|
||||
subprocess.run(
|
||||
f"ssh {UNRAID_USER}@{UNRAID_HOST} 'rm -rf /mnt/user/domains/{VM_NAME}/kernel'",
|
||||
|
||||
@@ -5,7 +5,6 @@ Handles VM creation, configuration, and lifecycle management.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import subprocess
|
||||
@@ -33,9 +32,9 @@ class UnraidVMManager:
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=15
|
||||
timeout=15,
|
||||
)
|
||||
|
||||
|
||||
if result.returncode == 0 and "Connected" in result.stdout:
|
||||
logger.info("Successfully connected to Unraid via SSH")
|
||||
return True
|
||||
@@ -75,23 +74,28 @@ class UnraidVMManager:
|
||||
hash_bytes = hash_obj.digest()[:3]
|
||||
return ":".join([f"{b:02x}" for b in hash_bytes])
|
||||
|
||||
def create_vm_xml(self, vm_memory: int, vm_vcpus: int, vm_ip: str,
|
||||
existing_uuid: str = None) -> str:
|
||||
def create_vm_xml(
|
||||
self,
|
||||
vm_memory: int,
|
||||
vm_vcpus: int,
|
||||
vm_ip: str,
|
||||
existing_uuid: str = None,
|
||||
) -> str:
|
||||
"""Generate VM XML configuration from template file."""
|
||||
vm_uuid = existing_uuid if existing_uuid else str(uuid.uuid4())
|
||||
|
||||
|
||||
# Read XML template from file
|
||||
template_path = Path(__file__).parent / "thrillwiki-vm-template.xml"
|
||||
if not template_path.exists():
|
||||
raise FileNotFoundError(f"VM XML template not found at {template_path}")
|
||||
|
||||
with open(template_path, 'r', encoding='utf-8') as f:
|
||||
|
||||
with open(template_path, "r", encoding="utf-8") as f:
|
||||
xml_template = f.read()
|
||||
|
||||
|
||||
# Calculate CPU topology
|
||||
cpu_cores = vm_vcpus // 2 if vm_vcpus > 1 else 1
|
||||
cpu_threads = 2 if vm_vcpus > 1 else 1
|
||||
|
||||
|
||||
# Replace placeholders with actual values
|
||||
xml_content = xml_template.format(
|
||||
VM_NAME=self.vm_name,
|
||||
@@ -100,17 +104,18 @@ class UnraidVMManager:
|
||||
VM_VCPUS=vm_vcpus,
|
||||
CPU_CORES=cpu_cores,
|
||||
CPU_THREADS=cpu_threads,
|
||||
MAC_SUFFIX=self._generate_mac_suffix(vm_ip)
|
||||
MAC_SUFFIX=self._generate_mac_suffix(vm_ip),
|
||||
)
|
||||
|
||||
|
||||
return xml_content.strip()
|
||||
|
||||
def upload_iso_to_unraid(self, local_iso_path: Path) -> str:
|
||||
"""Upload ISO to Unraid server."""
|
||||
remote_iso_path = f"/mnt/user/isos/{self.vm_name}-ubuntu-autoinstall.iso"
|
||||
|
||||
remote_iso_path = f"/mnt/user/isos/{
|
||||
self.vm_name}-ubuntu-autoinstall.iso"
|
||||
|
||||
logger.info(f"Uploading ISO to Unraid: {remote_iso_path}")
|
||||
|
||||
|
||||
try:
|
||||
# Remove old ISO if exists
|
||||
subprocess.run(
|
||||
@@ -118,34 +123,42 @@ class UnraidVMManager:
|
||||
shell=True,
|
||||
check=False, # Don't fail if file doesn't exist
|
||||
)
|
||||
|
||||
|
||||
# Upload new ISO
|
||||
subprocess.run(
|
||||
f"scp {local_iso_path} {self.unraid_user}@{self.unraid_host}:{remote_iso_path}",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
|
||||
|
||||
logger.info(f"ISO uploaded successfully: {remote_iso_path}")
|
||||
return remote_iso_path
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to upload ISO: {e}")
|
||||
raise
|
||||
|
||||
def create_vm(self, vm_memory: int, vm_vcpus: int, vm_disk_size: int, vm_ip: str) -> bool:
|
||||
def create_vm(
|
||||
self, vm_memory: int, vm_vcpus: int, vm_disk_size: int, vm_ip: str
|
||||
) -> bool:
|
||||
"""Create or update the VM on Unraid."""
|
||||
try:
|
||||
vm_exists = self.check_vm_exists()
|
||||
|
||||
if vm_exists:
|
||||
logger.info(f"VM {self.vm_name} already exists, updating configuration...")
|
||||
logger.info(
|
||||
f"VM {
|
||||
self.vm_name} already exists, updating configuration..."
|
||||
)
|
||||
# Always try to stop VM before updating
|
||||
current_status = self.vm_status()
|
||||
logger.info(f"Current VM status: {current_status}")
|
||||
|
||||
|
||||
if current_status not in ["shut off", "unknown"]:
|
||||
logger.info(f"Stopping VM {self.vm_name} for configuration update...")
|
||||
logger.info(
|
||||
f"Stopping VM {
|
||||
self.vm_name} for configuration update..."
|
||||
)
|
||||
self.stop_vm()
|
||||
time.sleep(3)
|
||||
else:
|
||||
@@ -174,13 +187,19 @@ class UnraidVMManager:
|
||||
"""
|
||||
subprocess.run(disk_cmd, shell=True, check=True)
|
||||
else:
|
||||
logger.info(f"Virtual disk already exists for VM {self.vm_name}")
|
||||
logger.info(
|
||||
f"Virtual disk already exists for VM {
|
||||
self.vm_name}"
|
||||
)
|
||||
|
||||
existing_uuid = None
|
||||
|
||||
if vm_exists:
|
||||
# Get existing VM UUID
|
||||
cmd = f"ssh {self.unraid_user}@{self.unraid_host} 'virsh dumpxml {self.vm_name} | grep \"<uuid>\" | sed \"s/<uuid>//g\" | sed \"s/<\\/uuid>//g\" | tr -d \" \"'"
|
||||
cmd = f'ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} \'virsh dumpxml {
|
||||
self.vm_name} | grep "<uuid>" | sed "s/<uuid>//g" | sed "s/<\\/uuid>//g" | tr -d " "\''
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
@@ -199,34 +218,49 @@ class UnraidVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
is_persistent = self.vm_name in persistent_check.stdout
|
||||
|
||||
|
||||
if is_persistent:
|
||||
# Undefine persistent VM with NVRAM flag
|
||||
logger.info(f"VM {self.vm_name} is persistent, undefining with NVRAM for reconfiguration...")
|
||||
logger.info(
|
||||
f"VM {
|
||||
self.vm_name} is persistent, undefining with NVRAM for reconfiguration..."
|
||||
)
|
||||
subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh undefine {self.vm_name} --nvram'",
|
||||
f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'virsh undefine {
|
||||
self.vm_name} --nvram'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
logger.info(f"Persistent VM {self.vm_name} undefined for reconfiguration")
|
||||
logger.info(
|
||||
f"Persistent VM {
|
||||
self.vm_name} undefined for reconfiguration"
|
||||
)
|
||||
else:
|
||||
# Handle transient VM - just destroy it
|
||||
logger.info(f"VM {self.vm_name} is transient, destroying for reconfiguration...")
|
||||
logger.info(
|
||||
f"VM {
|
||||
self.vm_name} is transient, destroying for reconfiguration..."
|
||||
)
|
||||
if self.vm_status() == "running":
|
||||
subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh destroy {self.vm_name}'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
logger.info(f"Transient VM {self.vm_name} destroyed for reconfiguration")
|
||||
logger.info(
|
||||
f"Transient VM {
|
||||
self.vm_name} destroyed for reconfiguration"
|
||||
)
|
||||
|
||||
# Generate VM XML with appropriate UUID
|
||||
vm_xml = self.create_vm_xml(vm_memory, vm_vcpus, vm_ip, existing_uuid)
|
||||
xml_file = f"/tmp/{self.vm_name}.xml"
|
||||
|
||||
with open(xml_file, "w", encoding='utf-8') as f:
|
||||
with open(xml_file, "w", encoding="utf-8") as f:
|
||||
f.write(vm_xml)
|
||||
|
||||
# Copy XML to Unraid and define/redefine VM
|
||||
@@ -245,7 +279,10 @@ class UnraidVMManager:
|
||||
|
||||
# Ensure VM is set to autostart for persistent configuration
|
||||
subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh autostart {self.vm_name}'",
|
||||
f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'virsh autostart {
|
||||
self.vm_name}'",
|
||||
shell=True,
|
||||
check=False, # Don't fail if autostart is already enabled
|
||||
)
|
||||
@@ -281,7 +318,9 @@ class UnraidVMManager:
|
||||
# Copy template to create NVRAM file
|
||||
logger.info(f"Creating NVRAM file: {nvram_path}")
|
||||
result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'cp /usr/share/qemu/ovmf-x64/OVMF_VARS-pure-efi.fd {nvram_path}'",
|
||||
f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'cp /usr/share/qemu/ovmf-x64/OVMF_VARS-pure-efi.fd {nvram_path}'",
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
@@ -316,7 +355,10 @@ class UnraidVMManager:
|
||||
return False
|
||||
|
||||
# Get VM UUID from XML
|
||||
cmd = f"ssh {self.unraid_user}@{self.unraid_host} 'virsh dumpxml {self.vm_name} | grep \"<uuid>\" | sed \"s/<uuid>//g\" | sed \"s/<\\/uuid>//g\" | tr -d \" \"'"
|
||||
cmd = f'ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} \'virsh dumpxml {
|
||||
self.vm_name} | grep "<uuid>" | sed "s/<uuid>//g" | sed "s/<\\/uuid>//g" | tr -d " "\''
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
@@ -361,37 +403,49 @@ class UnraidVMManager:
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
# Wait up to 30 seconds for graceful shutdown
|
||||
logger.info(f"Waiting for VM {self.vm_name} to shutdown gracefully...")
|
||||
logger.info(
|
||||
f"Waiting for VM {
|
||||
self.vm_name} to shutdown gracefully..."
|
||||
)
|
||||
for i in range(30):
|
||||
status = self.vm_status()
|
||||
if status in ["shut off", "unknown"]:
|
||||
logger.info(f"VM {self.vm_name} stopped gracefully")
|
||||
return True
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
# If still running after 30 seconds, force destroy
|
||||
logger.warning(f"VM {self.vm_name} didn't shutdown gracefully, forcing destroy...")
|
||||
logger.warning(
|
||||
f"VM {
|
||||
self.vm_name} didn't shutdown gracefully, forcing destroy..."
|
||||
)
|
||||
destroy_result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh destroy {self.vm_name}'",
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
|
||||
if destroy_result.returncode == 0:
|
||||
logger.info(f"VM {self.vm_name} forcefully destroyed")
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Failed to destroy VM: {destroy_result.stderr}")
|
||||
logger.error(
|
||||
f"Failed to destroy VM: {
|
||||
destroy_result.stderr}"
|
||||
)
|
||||
return False
|
||||
else:
|
||||
logger.error(f"Failed to initiate VM shutdown: {result.stderr}")
|
||||
logger.error(
|
||||
f"Failed to initiate VM shutdown: {
|
||||
result.stderr}"
|
||||
)
|
||||
return False
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
@@ -418,7 +472,9 @@ class UnraidVMManager:
|
||||
lines = result.stdout.strip().split("\\n")
|
||||
for line in lines:
|
||||
if "ipv4" in line:
|
||||
# Extract IP from line like: vnet0 52:54:00:xx:xx:xx ipv4 192.168.1.100/24
|
||||
# Extract IP from line like: vnet0
|
||||
# 52:54:00:xx:xx:xx ipv4
|
||||
# 192.168.1.100/24
|
||||
parts = line.split()
|
||||
if len(parts) >= 4:
|
||||
ip_with_mask = parts[3]
|
||||
@@ -427,7 +483,8 @@ class UnraidVMManager:
|
||||
return ip
|
||||
|
||||
logger.info(
|
||||
f"Waiting for VM IP... (attempt {attempt + 1}/{max_attempts}) - Ubuntu autoinstall in progress"
|
||||
f"Waiting for VM IP... (attempt {
|
||||
attempt + 1}/{max_attempts}) - Ubuntu autoinstall in progress"
|
||||
)
|
||||
time.sleep(10)
|
||||
|
||||
@@ -460,7 +517,10 @@ class UnraidVMManager:
|
||||
def delete_vm(self) -> bool:
|
||||
"""Completely remove VM and all associated files."""
|
||||
try:
|
||||
logger.info(f"Deleting VM {self.vm_name} and all associated files...")
|
||||
logger.info(
|
||||
f"Deleting VM {
|
||||
self.vm_name} and all associated files..."
|
||||
)
|
||||
|
||||
# Check if VM exists
|
||||
if not self.check_vm_exists():
|
||||
@@ -476,7 +536,10 @@ class UnraidVMManager:
|
||||
# Undefine VM with NVRAM
|
||||
logger.info(f"Undefining VM {self.vm_name}...")
|
||||
subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh undefine {self.vm_name} --nvram'",
|
||||
f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'virsh undefine {
|
||||
self.vm_name} --nvram'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
@@ -491,7 +554,10 @@ class UnraidVMManager:
|
||||
|
||||
# Remove autoinstall ISO
|
||||
subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'rm -f /mnt/user/isos/{self.vm_name}-ubuntu-autoinstall.iso'",
|
||||
f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'rm -f /mnt/user/isos/{
|
||||
self.vm_name}-ubuntu-autoinstall.iso'",
|
||||
shell=True,
|
||||
check=False, # Don't fail if file doesn't exist
|
||||
)
|
||||
|
||||
@@ -5,7 +5,6 @@ Handles VM creation using pre-built template disks instead of autoinstall.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import subprocess
|
||||
@@ -60,23 +59,28 @@ class UnraidTemplateVMManager:
|
||||
hash_bytes = hash_obj.digest()[:3]
|
||||
return ":".join([f"{b:02x}" for b in hash_bytes])
|
||||
|
||||
def create_vm_xml(self, vm_memory: int, vm_vcpus: int, vm_ip: str,
|
||||
existing_uuid: str = None) -> str:
|
||||
def create_vm_xml(
|
||||
self,
|
||||
vm_memory: int,
|
||||
vm_vcpus: int,
|
||||
vm_ip: str,
|
||||
existing_uuid: str = None,
|
||||
) -> str:
|
||||
"""Generate VM XML configuration from template file."""
|
||||
vm_uuid = existing_uuid if existing_uuid else str(uuid.uuid4())
|
||||
|
||||
|
||||
# Use simplified template for template-based VMs
|
||||
template_path = Path(__file__).parent / "thrillwiki-vm-template-simple.xml"
|
||||
if not template_path.exists():
|
||||
raise FileNotFoundError(f"VM XML template not found at {template_path}")
|
||||
|
||||
with open(template_path, 'r', encoding='utf-8') as f:
|
||||
|
||||
with open(template_path, "r", encoding="utf-8") as f:
|
||||
xml_template = f.read()
|
||||
|
||||
|
||||
# Calculate CPU topology
|
||||
cpu_cores = vm_vcpus // 2 if vm_vcpus > 1 else 1
|
||||
cpu_threads = 2 if vm_vcpus > 1 else 1
|
||||
|
||||
|
||||
# Replace placeholders with actual values
|
||||
xml_content = xml_template.format(
|
||||
VM_NAME=self.vm_name,
|
||||
@@ -85,25 +89,32 @@ class UnraidTemplateVMManager:
|
||||
VM_VCPUS=vm_vcpus,
|
||||
CPU_CORES=cpu_cores,
|
||||
CPU_THREADS=cpu_threads,
|
||||
MAC_SUFFIX=self._generate_mac_suffix(vm_ip)
|
||||
MAC_SUFFIX=self._generate_mac_suffix(vm_ip),
|
||||
)
|
||||
|
||||
|
||||
return xml_content.strip()
|
||||
|
||||
def create_vm_from_template(self, vm_memory: int, vm_vcpus: int,
|
||||
vm_disk_size: int, vm_ip: str) -> bool:
|
||||
def create_vm_from_template(
|
||||
self, vm_memory: int, vm_vcpus: int, vm_disk_size: int, vm_ip: str
|
||||
) -> bool:
|
||||
"""Create VM from template disk."""
|
||||
try:
|
||||
vm_exists = self.check_vm_exists()
|
||||
|
||||
if vm_exists:
|
||||
logger.info(f"VM {self.vm_name} already exists, updating configuration...")
|
||||
logger.info(
|
||||
f"VM {
|
||||
self.vm_name} already exists, updating configuration..."
|
||||
)
|
||||
# Always try to stop VM before updating
|
||||
current_status = self.vm_status()
|
||||
logger.info(f"Current VM status: {current_status}")
|
||||
|
||||
|
||||
if current_status not in ["shut off", "unknown"]:
|
||||
logger.info(f"Stopping VM {self.vm_name} for configuration update...")
|
||||
logger.info(
|
||||
f"Stopping VM {
|
||||
self.vm_name} for configuration update..."
|
||||
)
|
||||
self.stop_vm()
|
||||
time.sleep(3)
|
||||
else:
|
||||
@@ -123,7 +134,10 @@ class UnraidTemplateVMManager:
|
||||
|
||||
if vm_exists:
|
||||
# Get existing VM UUID
|
||||
cmd = f"ssh {self.unraid_user}@{self.unraid_host} 'virsh dumpxml {self.vm_name} | grep \"<uuid>\" | sed \"s/<uuid>//g\" | sed \"s/<\\/uuid>//g\" | tr -d \" \"'"
|
||||
cmd = f'ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} \'virsh dumpxml {
|
||||
self.vm_name} | grep "<uuid>" | sed "s/<uuid>//g" | sed "s/<\\/uuid>//g" | tr -d " "\''
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
@@ -142,34 +156,49 @@ class UnraidTemplateVMManager:
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
is_persistent = self.vm_name in persistent_check.stdout
|
||||
|
||||
|
||||
if is_persistent:
|
||||
# Undefine persistent VM with NVRAM flag
|
||||
logger.info(f"VM {self.vm_name} is persistent, undefining with NVRAM for reconfiguration...")
|
||||
logger.info(
|
||||
f"VM {
|
||||
self.vm_name} is persistent, undefining with NVRAM for reconfiguration..."
|
||||
)
|
||||
subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh undefine {self.vm_name} --nvram'",
|
||||
f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'virsh undefine {
|
||||
self.vm_name} --nvram'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
logger.info(f"Persistent VM {self.vm_name} undefined for reconfiguration")
|
||||
logger.info(
|
||||
f"Persistent VM {
|
||||
self.vm_name} undefined for reconfiguration"
|
||||
)
|
||||
else:
|
||||
# Handle transient VM - just destroy it
|
||||
logger.info(f"VM {self.vm_name} is transient, destroying for reconfiguration...")
|
||||
logger.info(
|
||||
f"VM {
|
||||
self.vm_name} is transient, destroying for reconfiguration..."
|
||||
)
|
||||
if self.vm_status() == "running":
|
||||
subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh destroy {self.vm_name}'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
logger.info(f"Transient VM {self.vm_name} destroyed for reconfiguration")
|
||||
logger.info(
|
||||
f"Transient VM {
|
||||
self.vm_name} destroyed for reconfiguration"
|
||||
)
|
||||
|
||||
# Step 2: Generate VM XML with appropriate UUID
|
||||
vm_xml = self.create_vm_xml(vm_memory, vm_vcpus, vm_ip, existing_uuid)
|
||||
xml_file = f"/tmp/{self.vm_name}.xml"
|
||||
|
||||
with open(xml_file, "w", encoding='utf-8') as f:
|
||||
with open(xml_file, "w", encoding="utf-8") as f:
|
||||
f.write(vm_xml)
|
||||
|
||||
# Step 3: Copy XML to Unraid and define VM
|
||||
@@ -188,13 +217,19 @@ class UnraidTemplateVMManager:
|
||||
|
||||
# Ensure VM is set to autostart for persistent configuration
|
||||
subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh autostart {self.vm_name}'",
|
||||
f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'virsh autostart {
|
||||
self.vm_name}'",
|
||||
shell=True,
|
||||
check=False, # Don't fail if autostart is already enabled
|
||||
)
|
||||
|
||||
action = "updated" if vm_exists else "created"
|
||||
logger.info(f"VM {self.vm_name} {action} successfully from template")
|
||||
logger.info(
|
||||
f"VM {
|
||||
self.vm_name} {action} successfully from template"
|
||||
)
|
||||
|
||||
# Cleanup
|
||||
os.remove(xml_file)
|
||||
@@ -224,7 +259,9 @@ class UnraidTemplateVMManager:
|
||||
# Copy template to create NVRAM file
|
||||
logger.info(f"Creating NVRAM file: {nvram_path}")
|
||||
result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'cp /usr/share/qemu/ovmf-x64/OVMF_VARS-pure-efi.fd {nvram_path}'",
|
||||
f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'cp /usr/share/qemu/ovmf-x64/OVMF_VARS-pure-efi.fd {nvram_path}'",
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
@@ -259,7 +296,10 @@ class UnraidTemplateVMManager:
|
||||
return False
|
||||
|
||||
# Get VM UUID from XML
|
||||
cmd = f"ssh {self.unraid_user}@{self.unraid_host} 'virsh dumpxml {self.vm_name} | grep \"<uuid>\" | sed \"s/<uuid>//g\" | sed \"s/<\\/uuid>//g\" | tr -d \" \"'"
|
||||
cmd = f'ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} \'virsh dumpxml {
|
||||
self.vm_name} | grep "<uuid>" | sed "s/<uuid>//g" | sed "s/<\\/uuid>//g" | tr -d " "\''
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
@@ -284,7 +324,9 @@ class UnraidTemplateVMManager:
|
||||
|
||||
if result.returncode == 0:
|
||||
logger.info(f"VM {self.vm_name} started successfully")
|
||||
logger.info("VM is booting from template disk - should be ready quickly!")
|
||||
logger.info(
|
||||
"VM is booting from template disk - should be ready quickly!"
|
||||
)
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Failed to start VM: {result.stderr}")
|
||||
@@ -305,37 +347,49 @@ class UnraidTemplateVMManager:
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
# Wait up to 30 seconds for graceful shutdown
|
||||
logger.info(f"Waiting for VM {self.vm_name} to shutdown gracefully...")
|
||||
logger.info(
|
||||
f"Waiting for VM {
|
||||
self.vm_name} to shutdown gracefully..."
|
||||
)
|
||||
for i in range(30):
|
||||
status = self.vm_status()
|
||||
if status in ["shut off", "unknown"]:
|
||||
logger.info(f"VM {self.vm_name} stopped gracefully")
|
||||
return True
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
# If still running after 30 seconds, force destroy
|
||||
logger.warning(f"VM {self.vm_name} didn't shutdown gracefully, forcing destroy...")
|
||||
logger.warning(
|
||||
f"VM {
|
||||
self.vm_name} didn't shutdown gracefully, forcing destroy..."
|
||||
)
|
||||
destroy_result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh destroy {self.vm_name}'",
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
|
||||
if destroy_result.returncode == 0:
|
||||
logger.info(f"VM {self.vm_name} forcefully destroyed")
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Failed to destroy VM: {destroy_result.stderr}")
|
||||
logger.error(
|
||||
f"Failed to destroy VM: {
|
||||
destroy_result.stderr}"
|
||||
)
|
||||
return False
|
||||
else:
|
||||
logger.error(f"Failed to initiate VM shutdown: {result.stderr}")
|
||||
logger.error(
|
||||
f"Failed to initiate VM shutdown: {
|
||||
result.stderr}"
|
||||
)
|
||||
return False
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
@@ -350,94 +404,121 @@ class UnraidTemplateVMManager:
|
||||
try:
|
||||
# Method 1: Try guest agent first (most reliable for template VMs)
|
||||
logger.info("Trying guest agent for IP detection...")
|
||||
ssh_cmd = f"ssh -o StrictHostKeyChecking=no {self.unraid_user}@{self.unraid_host} 'virsh guestinfo {self.vm_name} --interface 2>/dev/null || echo FAILED'"
|
||||
ssh_cmd = f"ssh -o StrictHostKeyChecking=no {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'virsh guestinfo {
|
||||
self.vm_name} --interface 2>/dev/null || echo FAILED'"
|
||||
logger.info(f"Running SSH command: {ssh_cmd}")
|
||||
result = subprocess.run(
|
||||
ssh_cmd,
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
ssh_cmd, shell=True, capture_output=True, text=True, timeout=10
|
||||
)
|
||||
|
||||
logger.info(f"Guest agent result (returncode={result.returncode}): {result.stdout[:200]}...")
|
||||
|
||||
if result.returncode == 0 and "FAILED" not in result.stdout and "addr" in result.stdout:
|
||||
|
||||
logger.info(
|
||||
f"Guest agent result (returncode={result.returncode}): {result.stdout[:200]}..."
|
||||
)
|
||||
|
||||
if (
|
||||
result.returncode == 0
|
||||
and "FAILED" not in result.stdout
|
||||
and "addr" in result.stdout
|
||||
):
|
||||
# Parse guest agent output for IP addresses
|
||||
lines = result.stdout.strip().split("\n")
|
||||
import re
|
||||
|
||||
for line in lines:
|
||||
logger.info(f"Processing line: {line}")
|
||||
# Look for lines like: if.1.addr.0.addr : 192.168.20.65
|
||||
if ".addr." in line and "addr :" in line and "127.0.0.1" not in line:
|
||||
if (
|
||||
".addr." in line
|
||||
and "addr :" in line
|
||||
and "127.0.0.1" not in line
|
||||
):
|
||||
# Extract IP address from the line
|
||||
ip_match = re.search(r':\s*([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})\s*$', line)
|
||||
ip_match = re.search(
|
||||
r":\s*([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})\s*$",
|
||||
line,
|
||||
)
|
||||
if ip_match:
|
||||
ip = ip_match.group(1)
|
||||
logger.info(f"Found potential IP: {ip}")
|
||||
# Skip localhost and Docker bridge IPs
|
||||
if not ip.startswith('127.') and not ip.startswith('172.'):
|
||||
if not ip.startswith("127.") and not ip.startswith("172."):
|
||||
logger.info(f"Found IP via guest agent: {ip}")
|
||||
return ip
|
||||
|
||||
|
||||
# Method 2: Try domifaddr (network interface detection)
|
||||
logger.info("Trying domifaddr for IP detection...")
|
||||
result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh domifaddr {self.vm_name} 2>/dev/null || echo FAILED'",
|
||||
f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'virsh domifaddr {
|
||||
self.vm_name} 2>/dev/null || echo FAILED'",
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if result.returncode == 0 and "FAILED" not in result.stdout and "ipv4" in result.stdout:
|
||||
|
||||
if (
|
||||
result.returncode == 0
|
||||
and "FAILED" not in result.stdout
|
||||
and "ipv4" in result.stdout
|
||||
):
|
||||
lines = result.stdout.strip().split("\n")
|
||||
for line in lines:
|
||||
if "ipv4" in line:
|
||||
# Extract IP from line like: vnet0 52:54:00:xx:xx:xx ipv4 192.168.1.100/24
|
||||
# Extract IP from line like: vnet0
|
||||
# 52:54:00:xx:xx:xx ipv4 192.168.1.100/24
|
||||
parts = line.split()
|
||||
if len(parts) >= 4:
|
||||
ip_with_mask = parts[3]
|
||||
ip = ip_with_mask.split("/")[0]
|
||||
logger.info(f"Found IP via domifaddr: {ip}")
|
||||
return ip
|
||||
|
||||
# Method 3: Try ARP table lookup (fallback for when guest agent isn't ready)
|
||||
|
||||
# Method 3: Try ARP table lookup (fallback for when guest agent
|
||||
# isn't ready)
|
||||
logger.info("Trying ARP table lookup...")
|
||||
# Get VM MAC address first
|
||||
mac_result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh dumpxml {self.vm_name} | grep \"mac address\" | head -1 | sed \"s/.*address=.\\([^'\"]*\\).*/\\1/\"'",
|
||||
f'ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} \'virsh dumpxml {
|
||||
self.vm_name} | grep "mac address" | head -1 | sed "s/.*address=.\\([^\'"]*\\).*/\\1/"\'',
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
|
||||
if mac_result.returncode == 0 and mac_result.stdout.strip():
|
||||
mac_addr = mac_result.stdout.strip()
|
||||
logger.info(f"VM MAC address: {mac_addr}")
|
||||
|
||||
|
||||
# Look up IP by MAC in ARP table
|
||||
arp_result = subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'arp -a | grep {mac_addr} || echo NOTFOUND'",
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
|
||||
if arp_result.returncode == 0 and "NOTFOUND" not in arp_result.stdout:
|
||||
# Parse ARP output like: (192.168.1.100) at 52:54:00:xx:xx:xx
|
||||
# Parse ARP output like: (192.168.1.100) at
|
||||
# 52:54:00:xx:xx:xx
|
||||
import re
|
||||
ip_match = re.search(r'\(([0-9.]+)\)', arp_result.stdout)
|
||||
|
||||
ip_match = re.search(r"\(([0-9.]+)\)", arp_result.stdout)
|
||||
if ip_match:
|
||||
ip = ip_match.group(1)
|
||||
logger.info(f"Found IP via ARP lookup: {ip}")
|
||||
return ip
|
||||
|
||||
|
||||
logger.warning("All IP detection methods failed")
|
||||
return None
|
||||
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.error("Timeout getting VM IP - guest agent may not be ready")
|
||||
return None
|
||||
@@ -467,7 +548,10 @@ class UnraidTemplateVMManager:
|
||||
def delete_vm(self) -> bool:
|
||||
"""Completely remove VM and all associated files."""
|
||||
try:
|
||||
logger.info(f"Deleting VM {self.vm_name} and all associated files...")
|
||||
logger.info(
|
||||
f"Deleting VM {
|
||||
self.vm_name} and all associated files..."
|
||||
)
|
||||
|
||||
# Check if VM exists
|
||||
if not self.check_vm_exists():
|
||||
@@ -483,7 +567,10 @@ class UnraidTemplateVMManager:
|
||||
# Undefine VM with NVRAM
|
||||
logger.info(f"Undefining VM {self.vm_name}...")
|
||||
subprocess.run(
|
||||
f"ssh {self.unraid_user}@{self.unraid_host} 'virsh undefine {self.vm_name} --nvram'",
|
||||
f"ssh {
|
||||
self.unraid_user}@{
|
||||
self.unraid_host} 'virsh undefine {
|
||||
self.vm_name} --nvram'",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
@@ -503,11 +590,13 @@ class UnraidTemplateVMManager:
|
||||
logger.error(f"Failed to delete VM: {e}")
|
||||
return False
|
||||
|
||||
def customize_vm_for_thrillwiki(self, repo_url: str, github_token: str = "") -> bool:
|
||||
def customize_vm_for_thrillwiki(
|
||||
self, repo_url: str, github_token: str = ""
|
||||
) -> bool:
|
||||
"""Customize the VM for ThrillWiki after it boots."""
|
||||
try:
|
||||
logger.info("Waiting for VM to be accessible via SSH...")
|
||||
|
||||
|
||||
# Wait for VM to get an IP and be SSH accessible
|
||||
vm_ip = None
|
||||
max_attempts = 20
|
||||
@@ -524,36 +613,42 @@ class UnraidTemplateVMManager:
|
||||
if ssh_test.returncode == 0:
|
||||
logger.info(f"VM is SSH accessible at {vm_ip}")
|
||||
break
|
||||
|
||||
logger.info(f"Waiting for SSH access... (attempt {attempt + 1}/{max_attempts})")
|
||||
|
||||
logger.info(
|
||||
f"Waiting for SSH access... (attempt {
|
||||
attempt + 1}/{max_attempts})"
|
||||
)
|
||||
time.sleep(15)
|
||||
|
||||
|
||||
if not vm_ip:
|
||||
logger.error("VM failed to become SSH accessible")
|
||||
return False
|
||||
|
||||
|
||||
# Run ThrillWiki deployment on the VM
|
||||
logger.info("Running ThrillWiki deployment on VM...")
|
||||
|
||||
|
||||
deploy_cmd = f"cd /home/thrillwiki && /home/thrillwiki/deploy-thrillwiki.sh '{repo_url}'"
|
||||
if github_token:
|
||||
deploy_cmd = f"cd /home/thrillwiki && GITHUB_TOKEN='{github_token}' /home/thrillwiki/deploy-thrillwiki.sh '{repo_url}'"
|
||||
|
||||
|
||||
deploy_result = subprocess.run(
|
||||
f"ssh -o StrictHostKeyChecking=no thrillwiki@{vm_ip} '{deploy_cmd}'",
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
|
||||
if deploy_result.returncode == 0:
|
||||
logger.info("ThrillWiki deployment completed successfully!")
|
||||
logger.info(f"ThrillWiki should be accessible at http://{vm_ip}:8000")
|
||||
return True
|
||||
else:
|
||||
logger.error(f"ThrillWiki deployment failed: {deploy_result.stderr}")
|
||||
logger.error(
|
||||
f"ThrillWiki deployment failed: {
|
||||
deploy_result.stderr}"
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error customizing VM: {e}")
|
||||
return False
|
||||
|
||||
Reference in New Issue
Block a user