diff --git a/videoarchiver/enhanced_queue.py b/videoarchiver/enhanced_queue.py index 4b257aa..0f2d1f9 100644 --- a/videoarchiver/enhanced_queue.py +++ b/videoarchiver/enhanced_queue.py @@ -183,7 +183,7 @@ class EnhancedVideoQueueManager: channel_id: int, guild_id: int, author_id: int, - callback: Callable[[str, bool, str], Any], + callback: Optional[Callable[[str, bool, str], Any]] = None, # Make callback optional priority: int = 0, ) -> bool: """Add a video to the processing queue with priority support""" @@ -259,7 +259,9 @@ class EnhancedVideoQueueManager: try: # Process the item start_time = time.time() + logger.info(f"Calling processor for item: {item.url}") success, error = await processor(item) + logger.info(f"Processor result for {item.url}: success={success}, error={error}") processing_time = time.time() - start_time # Update metrics diff --git a/videoarchiver/ffmpeg/__init__.py b/videoarchiver/ffmpeg/__init__.py index e7bd9a1..ca79863 100644 --- a/videoarchiver/ffmpeg/__init__.py +++ b/videoarchiver/ffmpeg/__init__.py @@ -1,6 +1,186 @@ -"""FFmpeg management package""" +"""FFmpeg module initialization""" -from videoarchiver.ffmpeg.exceptions import FFmpegError, GPUError, DownloadError -from videoarchiver.ffmpeg.ffmpeg_manager import FFmpegManager +import logging +import sys +import os +from pathlib import Path +from typing import Dict, Any, Optional -__all__ = ['FFmpegManager', 'FFmpegError', 'GPUError', 'DownloadError'] +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + handlers=[ + logging.StreamHandler(sys.stdout) + ] +) + +logger = logging.getLogger("VideoArchiver") + +# Import components after logging is configured +from .ffmpeg_manager import FFmpegManager +from .video_analyzer import VideoAnalyzer +from .gpu_detector import GPUDetector +from .encoder_params import EncoderParams +from .ffmpeg_downloader import FFmpegDownloader +from .exceptions import ( + FFmpegError, + DownloadError, + VerificationError, + EncodingError, + AnalysisError, + GPUError, + HardwareAccelerationError, + FFmpegNotFoundError, + FFprobeError, + CompressionError, + FormatError, + PermissionError, + TimeoutError, + ResourceError, + QualityError, + AudioError, + BitrateError, +) + +class FFmpeg: + """Main FFmpeg interface""" + + _instance = None + + def __new__(cls, base_dir: Optional[Path] = None): + """Singleton pattern to ensure only one FFmpeg instance""" + if cls._instance is None: + cls._instance = super(FFmpeg, cls).__new__(cls) + cls._instance._initialized = False + return cls._instance + + def __init__(self, base_dir: Optional[Path] = None): + """Initialize FFmpeg interface + + Args: + base_dir: Optional base directory for FFmpeg files. If not provided, + will use the default directory in the module. + """ + # Skip initialization if already done + if self._initialized: + return + + try: + self._manager = FFmpegManager() + logger.info(f"FFmpeg initialized at {self._manager.get_ffmpeg_path()}") + logger.info(f"GPU support: {self._manager.gpu_info}") + self._initialized = True + except Exception as e: + logger.error(f"Failed to initialize FFmpeg interface: {e}") + raise FFmpegError(f"FFmpeg initialization failed: {e}") + + @property + def ffmpeg_path(self) -> Path: + """Get path to FFmpeg binary""" + return Path(self._manager.get_ffmpeg_path()) + + @property + def gpu_info(self) -> Dict[str, bool]: + """Get GPU information""" + return self._manager.gpu_info + + def analyze_video(self, input_path: str) -> Dict[str, Any]: + """Analyze video content + + Args: + input_path: Path to input video file + + Returns: + Dict containing video analysis results + """ + try: + if not os.path.exists(input_path): + raise FileNotFoundError(f"Input file not found: {input_path}") + return self._manager.analyze_video(input_path) + except Exception as e: + logger.error(f"Video analysis failed: {e}") + raise AnalysisError(f"Failed to analyze video: {e}") + + def get_compression_params(self, input_path: str, target_size_mb: int) -> Dict[str, str]: + """Get optimal compression parameters + + Args: + input_path: Path to input video file + target_size_mb: Target file size in megabytes + + Returns: + Dict containing FFmpeg encoding parameters + """ + try: + if not os.path.exists(input_path): + raise FileNotFoundError(f"Input file not found: {input_path}") + return self._manager.get_compression_params(input_path, target_size_mb) + except Exception as e: + logger.error(f"Failed to get compression parameters: {e}") + raise EncodingError(f"Failed to get compression parameters: {e}") + + def force_download(self) -> bool: + """Force re-download of FFmpeg binary + + Returns: + bool: True if download successful, False otherwise + """ + try: + return self._manager.force_download() + except Exception as e: + logger.error(f"Force download failed: {e}") + raise DownloadError(f"Failed to force download FFmpeg: {e}") + + @property + def version(self) -> str: + """Get FFmpeg version""" + try: + import subprocess + result = subprocess.run( + [str(self.ffmpeg_path), "-version"], + capture_output=True, + text=True, + timeout=5 + ) + if result.returncode == 0: + return result.stdout.split()[2] + raise FFmpegError(f"FFmpeg version check failed: {result.stderr}") + except Exception as e: + logger.error(f"Failed to get FFmpeg version: {e}") + raise FFmpegError(f"Failed to get FFmpeg version: {e}") + +# Initialize default instance +try: + ffmpeg = FFmpeg() + logger.info(f"Default FFmpeg instance initialized (version {ffmpeg.version})") +except Exception as e: + logger.error(f"Failed to initialize default FFmpeg instance: {e}") + ffmpeg = None + +__all__ = [ + 'FFmpeg', + 'ffmpeg', + 'FFmpegManager', + 'VideoAnalyzer', + 'GPUDetector', + 'EncoderParams', + 'FFmpegDownloader', + 'FFmpegError', + 'DownloadError', + 'VerificationError', + 'EncodingError', + 'AnalysisError', + 'GPUError', + 'HardwareAccelerationError', + 'FFmpegNotFoundError', + 'FFprobeError', + 'CompressionError', + 'FormatError', + 'PermissionError', + 'TimeoutError', + 'ResourceError', + 'QualityError', + 'AudioError', + 'BitrateError', +] diff --git a/videoarchiver/ffmpeg/encoder_params.py b/videoarchiver/ffmpeg/encoder_params.py index d7f9951..e61a2b9 100644 --- a/videoarchiver/ffmpeg/encoder_params.py +++ b/videoarchiver/ffmpeg/encoder_params.py @@ -3,21 +3,87 @@ import os import logging from typing import Dict, Any +from .exceptions import CompressionError, QualityError, BitrateError logger = logging.getLogger("VideoArchiver") class EncoderParams: + """Manages FFmpeg encoding parameters based on hardware and content""" + + # Quality presets based on content type + QUALITY_PRESETS = { + "gaming": { + "crf": "20", + "preset": "fast", + "tune": "zerolatency", + "x264opts": "rc-lookahead=20:me=hex:subme=6:ref=3:b-adapt=1:direct=spatial" + }, + "animation": { + "crf": "18", + "preset": "slow", + "tune": "animation", + "x264opts": "rc-lookahead=60:me=umh:subme=9:ref=6:b-adapt=2:direct=auto:deblock=-1,-1" + }, + "film": { + "crf": "22", + "preset": "medium", + "tune": "film", + "x264opts": "rc-lookahead=50:me=umh:subme=8:ref=4:b-adapt=2:direct=auto" + } + } + def __init__(self, cpu_cores: int, gpu_info: Dict[str, bool]): + """Initialize encoder parameters manager + + Args: + cpu_cores: Number of available CPU cores + gpu_info: Dict containing GPU availability information + """ self.cpu_cores = cpu_cores self.gpu_info = gpu_info + logger.info(f"Initialized encoder with {cpu_cores} CPU cores and GPU info: {gpu_info}") def get_params(self, video_info: Dict[str, Any], target_size_bytes: int) -> Dict[str, str]: - """Get optimal FFmpeg parameters based on hardware and video analysis""" - params = self._get_base_params() - params.update(self._get_content_specific_params(video_info)) - params.update(self._get_gpu_specific_params()) - params.update(self._get_bitrate_params(video_info, target_size_bytes)) - return params + """Get optimal FFmpeg parameters based on hardware and video analysis + + Args: + video_info: Dict containing video analysis results + target_size_bytes: Target file size in bytes + + Returns: + Dict containing FFmpeg encoding parameters + """ + try: + # Get base parameters + params = self._get_base_params() + logger.debug(f"Base parameters: {params}") + + # Update with content-specific parameters + content_params = self._get_content_specific_params(video_info) + params.update(content_params) + logger.debug(f"Content-specific parameters: {content_params}") + + # Update with GPU-specific parameters if available + gpu_params = self._get_gpu_specific_params() + if gpu_params: + params.update(gpu_params) + logger.debug(f"GPU-specific parameters: {gpu_params}") + + # Calculate and update bitrate parameters + bitrate_params = self._get_bitrate_params(video_info, target_size_bytes) + params.update(bitrate_params) + logger.debug(f"Bitrate parameters: {bitrate_params}") + + # Validate final parameters + self._validate_params(params, video_info) + + logger.info(f"Final encoding parameters: {params}") + return params + + except Exception as e: + logger.error(f"Error generating encoding parameters: {str(e)}") + # Return safe default parameters + return self._get_safe_defaults() def _get_base_params(self) -> Dict[str, str]: """Get base encoding parameters""" @@ -39,13 +105,19 @@ class EncoderParams: """Get parameters optimized for specific content types""" params = {} - if video_info.get("has_high_motion"): + # Detect content type + content_type = self._detect_content_type(video_info) + if content_type in self.QUALITY_PRESETS: + params.update(self.QUALITY_PRESETS[content_type]) + + # Additional optimizations based on content analysis + if video_info.get("has_high_motion", False): params.update({ "tune": "grain", "x264opts": "rc-lookahead=60:me=umh:subme=7:ref=4:b-adapt=2:direct=auto:deblock=-1,-1:psy-rd=1.0:aq-strength=0.8" }) - if video_info.get("has_dark_scenes"): + if video_info.get("has_dark_scenes", False): x264opts = params.get("x264opts", "rc-lookahead=60:me=umh:subme=7:ref=4:b-adapt=2:direct=auto") params.update({ "x264opts": x264opts + ":aq-mode=3:aq-strength=1.0:deblock=1:1", @@ -56,7 +128,7 @@ class EncoderParams: def _get_gpu_specific_params(self) -> Dict[str, str]: """Get GPU-specific encoding parameters""" - if self.gpu_info["nvidia"]: + if self.gpu_info.get("nvidia", False): return { "c:v": "h264_nvenc", "preset": "p7", @@ -70,7 +142,7 @@ class EncoderParams: "max_muxing_queue_size": "1024", "gpu": "any" } - elif self.gpu_info["amd"]: + elif self.gpu_info.get("amd", False): return { "c:v": "h264_amf", "quality": "quality", @@ -80,7 +152,7 @@ class EncoderParams: "preanalysis": "1", "max_muxing_queue_size": "1024" } - elif self.gpu_info["intel"]: + elif self.gpu_info.get("intel", False): return { "c:v": "h264_qsv", "preset": "veryslow", @@ -94,15 +166,16 @@ class EncoderParams: """Calculate and get bitrate-related parameters""" params = {} try: - duration = video_info.get("duration", 0) - input_size = video_info.get("bitrate", 0) * duration / 8 # Estimate from bitrate + duration = float(video_info.get("duration", 0)) + input_size = int(video_info.get("bitrate", 0) * duration / 8) # Convert to bytes if duration > 0 and input_size > target_size_bytes: - video_size_target = int(target_size_bytes * 0.95) - total_bitrate = (video_size_target * 8) / duration + # Calculate target bitrates + video_size_target = int(target_size_bytes * 0.95) # Reserve 5% for container overhead + total_bitrate = int((video_size_target * 8) / duration) - # Audio bitrate calculation - audio_channels = video_info.get("audio_channels", 2) + # Calculate audio bitrate + audio_channels = int(video_info.get("audio_channels", 2)) min_audio_bitrate = 64000 * audio_channels max_audio_bitrate = 192000 * audio_channels audio_bitrate = min( @@ -110,8 +183,10 @@ class EncoderParams: max(min_audio_bitrate, int(total_bitrate * 0.15)) ) - # Video bitrate calculation - video_bitrate = int((video_size_target * 8) / duration - audio_bitrate) + # Calculate video bitrate + video_bitrate = int(total_bitrate - audio_bitrate) + if video_bitrate <= 0: + raise BitrateError("Calculated video bitrate is too low", total_bitrate, 0) # Set bitrate constraints params["maxrate"] = str(int(video_bitrate * 1.5)) @@ -120,42 +195,85 @@ class EncoderParams: # Quality adjustments based on compression ratio ratio = input_size / target_size_bytes if ratio > 4: - params["crf"] = "26" if params.get("c:v", "libx264") == "libx264" else "23" + params["crf"] = "26" params["preset"] = "faster" elif ratio > 2: - params["crf"] = "23" if params.get("c:v", "libx264") == "libx264" else "21" + params["crf"] = "23" params["preset"] = "medium" else: - params["crf"] = "20" if params.get("c:v", "libx264") == "libx264" else "19" + params["crf"] = "20" params["preset"] = "slow" - # Dark scene adjustments - if video_info.get("has_dark_scenes"): - if params.get("c:v", "libx264") == "libx264": - params["crf"] = str(max(18, int(params["crf"]) - 2)) - elif params.get("c:v") == "h264_nvenc": - params["cq:v"] = str(max(15, int(params.get("cq:v", "19")) - 2)) - # Audio settings params.update({ "c:a": "aac", "b:a": f"{int(audio_bitrate/1000)}k", "ar": str(video_info.get("audio_sample_rate", 48000)), - "ac": str(video_info.get("audio_channels", 2)) + "ac": str(audio_channels) }) except Exception as e: logger.error(f"Error calculating bitrates: {str(e)}") # Use safe default parameters - params.update({ - "crf": "23", - "preset": "medium", - "maxrate": f"{2 * 1024 * 1024}", # 2 Mbps - "bufsize": f"{4 * 1024 * 1024}", # 4 Mbps buffer - "c:a": "aac", - "b:a": "128k", - "ar": "48000", - "ac": "2" - }) + params.update(self._get_safe_defaults()) return params + + def _detect_content_type(self, video_info: Dict[str, Any]) -> str: + """Detect content type based on video analysis""" + try: + # Check for gaming content + if video_info.get("has_high_motion", False) and video_info.get("fps", 0) >= 60: + return "gaming" + + # Check for animation + if video_info.get("has_sharp_edges", False) and not video_info.get("has_film_grain", False): + return "animation" + + # Default to film + return "film" + + except Exception as e: + logger.error(f"Error detecting content type: {str(e)}") + return "film" + + def _validate_params(self, params: Dict[str, str], video_info: Dict[str, Any]) -> None: + """Validate encoding parameters""" + try: + # Check for required parameters + required_params = ["c:v", "preset", "pix_fmt"] + missing_params = [p for p in required_params if p not in params] + if missing_params: + raise ValueError(f"Missing required parameters: {missing_params}") + + # Validate video codec + if params["c:v"] not in ["libx264", "h264_nvenc", "h264_amf", "h264_qsv"]: + raise ValueError(f"Invalid video codec: {params['c:v']}") + + # Validate preset + valid_presets = ["ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow", "slower", "veryslow"] + if params["preset"] not in valid_presets: + raise ValueError(f"Invalid preset: {params['preset']}") + + # Validate pixel format + if params["pix_fmt"] not in ["yuv420p", "nv12", "yuv444p"]: + raise ValueError(f"Invalid pixel format: {params['pix_fmt']}") + + except Exception as e: + logger.error(f"Parameter validation failed: {str(e)}") + raise + + def _get_safe_defaults(self) -> Dict[str, str]: + """Get safe default encoding parameters""" + return { + "c:v": "libx264", + "preset": "medium", + "crf": "23", + "pix_fmt": "yuv420p", + "profile:v": "high", + "level": "4.1", + "c:a": "aac", + "b:a": "128k", + "ar": "48000", + "ac": "2" + } diff --git a/videoarchiver/ffmpeg/exceptions.py b/videoarchiver/ffmpeg/exceptions.py index ea768bb..a77fe06 100644 --- a/videoarchiver/ffmpeg/exceptions.py +++ b/videoarchiver/ffmpeg/exceptions.py @@ -4,10 +4,106 @@ class FFmpegError(Exception): """Base exception for FFmpeg-related errors""" pass -class GPUError(FFmpegError): - """Raised when GPU operations fail""" +class DownloadError(FFmpegError): + """Exception raised when FFmpeg download fails""" pass -class DownloadError(FFmpegError): - """Raised when FFmpeg download fails""" +class VerificationError(FFmpegError): + """Exception raised when FFmpeg verification fails""" pass + +class EncodingError(FFmpegError): + """Exception raised when video encoding fails""" + pass + +class AnalysisError(FFmpegError): + """Exception raised when video analysis fails""" + pass + +class GPUError(FFmpegError): + """Exception raised when GPU operations fail""" + pass + +class HardwareAccelerationError(FFmpegError): + """Exception raised when hardware acceleration fails""" + def __init__(self, message: str, fallback_used: bool = False): + self.fallback_used = fallback_used + super().__init__(message) + +class FFmpegNotFoundError(FFmpegError): + """Exception raised when FFmpeg binary is not found""" + pass + +class FFprobeError(FFmpegError): + """Exception raised when FFprobe operations fail""" + pass + +class CompressionError(FFmpegError): + """Exception raised when video compression fails""" + def __init__(self, message: str, input_size: int, target_size: int): + self.input_size = input_size + self.target_size = target_size + super().__init__(f"{message} (Input: {input_size}B, Target: {target_size}B)") + +class FormatError(FFmpegError): + """Exception raised when video format is invalid or unsupported""" + pass + +class PermissionError(FFmpegError): + """Exception raised when file permissions prevent operations""" + pass + +class TimeoutError(FFmpegError): + """Exception raised when FFmpeg operations timeout""" + pass + +class ResourceError(FFmpegError): + """Exception raised when system resources are insufficient""" + def __init__(self, message: str, resource_type: str): + self.resource_type = resource_type + super().__init__(f"{message} (Resource: {resource_type})") + +class QualityError(FFmpegError): + """Exception raised when video quality requirements cannot be met""" + def __init__(self, message: str, target_quality: int, achieved_quality: int): + self.target_quality = target_quality + self.achieved_quality = achieved_quality + super().__init__( + f"{message} (Target: {target_quality}p, Achieved: {achieved_quality}p)" + ) + +class AudioError(FFmpegError): + """Exception raised when audio processing fails""" + pass + +class BitrateError(FFmpegError): + """Exception raised when bitrate requirements cannot be met""" + def __init__(self, message: str, target_bitrate: int, actual_bitrate: int): + self.target_bitrate = target_bitrate + self.actual_bitrate = actual_bitrate + super().__init__( + f"{message} (Target: {target_bitrate}bps, Actual: {actual_bitrate}bps)" + ) + +def handle_ffmpeg_error(error_output: str) -> FFmpegError: + """Convert FFmpeg error output to appropriate exception""" + error_output = error_output.lower() + + if "no such file" in error_output: + return FFmpegNotFoundError("FFmpeg binary not found") + elif "permission denied" in error_output: + return PermissionError("Insufficient permissions") + elif "hardware acceleration" in error_output: + return HardwareAccelerationError("Hardware acceleration failed", fallback_used=True) + elif "invalid data" in error_output: + return FormatError("Invalid or corrupted video format") + elif "insufficient memory" in error_output: + return ResourceError("Insufficient memory", "memory") + elif "audio" in error_output: + return AudioError("Audio processing failed") + elif "bitrate" in error_output: + return BitrateError("Bitrate requirements not met", 0, 0) + elif "timeout" in error_output: + return TimeoutError("Operation timed out") + else: + return FFmpegError(f"FFmpeg operation failed: {error_output}") diff --git a/videoarchiver/ffmpeg/ffmpeg_downloader.py b/videoarchiver/ffmpeg/ffmpeg_downloader.py index 677ebee..4d60f95 100644 --- a/videoarchiver/ffmpeg/ffmpeg_downloader.py +++ b/videoarchiver/ffmpeg/ffmpeg_downloader.py @@ -8,9 +8,12 @@ import tarfile import zipfile import subprocess import tempfile +import platform +import hashlib from pathlib import Path from contextlib import contextmanager from typing import Optional +import time from .exceptions import DownloadError @@ -71,6 +74,9 @@ class FFmpegDownloader: self.machine = "aarch64" # Normalize ARM64 naming self.base_dir = base_dir self.ffmpeg_path = self.base_dir / self._get_binary_name() + + logger.info(f"Initialized FFmpeg downloader for {system}/{machine}") + logger.info(f"FFmpeg binary path: {self.ffmpeg_path}") def _get_binary_name(self) -> str: """Get the appropriate binary name for the current system""" @@ -87,37 +93,58 @@ class FFmpegDownloader: raise DownloadError(f"Unsupported system/architecture: {self.system}/{self.machine}") def download(self) -> Path: - """Download and set up FFmpeg binary""" - try: - # Ensure base directory exists with proper permissions - self.base_dir.mkdir(parents=True, exist_ok=True) - os.chmod(str(self.base_dir), 0o777) + """Download and set up FFmpeg binary with retries""" + max_retries = 3 + retry_delay = 5 + last_error = None - # Clean up any existing file or directory - if self.ffmpeg_path.exists(): - if self.ffmpeg_path.is_dir(): - shutil.rmtree(str(self.ffmpeg_path)) - else: - self.ffmpeg_path.unlink() + for attempt in range(max_retries): + try: + logger.info(f"Download attempt {attempt + 1}/{max_retries}") + + # Ensure base directory exists with proper permissions + self.base_dir.mkdir(parents=True, exist_ok=True) + os.chmod(str(self.base_dir), 0o777) - with temp_path_context() as temp_dir: - # Download archive - archive_path = self._download_archive(temp_dir) - - # Extract FFmpeg binary - self._extract_binary(archive_path, temp_dir) - - # Set proper permissions - os.chmod(str(self.ffmpeg_path), 0o755) - - return self.ffmpeg_path + # Clean up any existing file or directory + if self.ffmpeg_path.exists(): + if self.ffmpeg_path.is_dir(): + shutil.rmtree(str(self.ffmpeg_path)) + else: + self.ffmpeg_path.unlink() - except Exception as e: - logger.error(f"Failed to download FFmpeg: {str(e)}") - raise DownloadError(str(e)) + with temp_path_context() as temp_dir: + # Download archive + archive_path = self._download_archive(temp_dir) + + # Verify download + if not self._verify_download(archive_path): + raise DownloadError("Downloaded file verification failed") + + # Extract FFmpeg binary + self._extract_binary(archive_path, temp_dir) + + # Set proper permissions + os.chmod(str(self.ffmpeg_path), 0o755) + + # Verify binary + if not self.verify(): + raise DownloadError("FFmpeg binary verification failed") + + logger.info(f"Successfully downloaded FFmpeg to {self.ffmpeg_path}") + return self.ffmpeg_path + + except Exception as e: + last_error = str(e) + logger.error(f"Download attempt {attempt + 1} failed: {last_error}") + if attempt < max_retries - 1: + time.sleep(retry_delay * (attempt + 1)) # Exponential backoff + continue + + raise DownloadError(f"All download attempts failed: {last_error}") def _download_archive(self, temp_dir: str) -> Path: - """Download FFmpeg archive""" + """Download FFmpeg archive with progress tracking""" url = self._get_download_url() archive_path = Path(temp_dir) / f"ffmpeg_archive{'.zip' if self.system == 'Windows' else '.tar.xz'}" @@ -126,14 +153,46 @@ class FFmpegDownloader: response = requests.get(url, stream=True, timeout=30) response.raise_for_status() + total_size = int(response.headers.get('content-length', 0)) + block_size = 8192 + downloaded = 0 + with open(archive_path, "wb") as f: - for chunk in response.iter_content(chunk_size=8192): + for chunk in response.iter_content(chunk_size=block_size): f.write(chunk) + downloaded += len(chunk) + if total_size > 0: + percent = (downloaded / total_size) * 100 + logger.debug(f"Download progress: {percent:.1f}%") return archive_path + except Exception as e: raise DownloadError(f"Failed to download FFmpeg: {str(e)}") + def _verify_download(self, archive_path: Path) -> bool: + """Verify downloaded archive integrity""" + try: + if not archive_path.exists(): + return False + + # Check file size + size = archive_path.stat().st_size + if size < 1000000: # Less than 1MB is suspicious + logger.error(f"Downloaded file too small: {size} bytes") + return False + + # Check file hash + with open(archive_path, 'rb') as f: + file_hash = hashlib.sha256(f.read()).hexdigest() + logger.debug(f"Archive hash: {file_hash}") + + return True + + except Exception as e: + logger.error(f"Download verification failed: {str(e)}") + return False + def _extract_binary(self, archive_path: Path, temp_dir: str): """Extract FFmpeg binary from archive""" logger.info("Extracting FFmpeg binary") @@ -182,7 +241,13 @@ class FFmpegDownloader: timeout=5 ) - return result.returncode == 0 + if result.returncode == 0: + version = result.stdout.decode().split('\n')[0] + logger.info(f"FFmpeg verification successful: {version}") + return True + else: + logger.error(f"FFmpeg verification failed: {result.stderr.decode()}") + return False except Exception as e: logger.error(f"FFmpeg verification failed: {e}") diff --git a/videoarchiver/ffmpeg/ffmpeg_manager.py b/videoarchiver/ffmpeg/ffmpeg_manager.py index 004efbf..a36f3b6 100644 --- a/videoarchiver/ffmpeg/ffmpeg_manager.py +++ b/videoarchiver/ffmpeg/ffmpeg_manager.py @@ -4,6 +4,7 @@ import os import platform import multiprocessing import logging +import subprocess from pathlib import Path from typing import Dict, Any, Optional @@ -21,6 +22,7 @@ class FFmpegManager: # Set up base directory in videoarchiver/bin module_dir = Path(__file__).parent.parent self.base_dir = module_dir / "bin" + logger.info(f"FFmpeg base directory: {self.base_dir}") # Initialize downloader self.downloader = FFmpegDownloader( @@ -31,6 +33,7 @@ class FFmpegManager: # Get or download FFmpeg self.ffmpeg_path = self._initialize_ffmpeg() + logger.info(f"Using FFmpeg from: {self.ffmpeg_path}") # Initialize components self.gpu_detector = GPUDetector(self.ffmpeg_path) @@ -40,35 +43,130 @@ class FFmpegManager: # Initialize encoder params self.encoder_params = EncoderParams(self._cpu_cores, self._gpu_info) + + # Verify FFmpeg functionality + self._verify_ffmpeg() + logger.info("FFmpeg manager initialized successfully") def _initialize_ffmpeg(self) -> Path: - """Initialize FFmpeg binary""" - # Verify existing FFmpeg if it exists - if self.downloader.ffmpeg_path.exists() and self.downloader.verify(): - logger.info(f"Using existing FFmpeg: {self.downloader.ffmpeg_path}") - return self.downloader.ffmpeg_path - - # Download and verify FFmpeg - logger.info("Downloading FFmpeg...") + """Initialize FFmpeg binary with proper error handling""" try: + # Verify existing FFmpeg if it exists + if self.downloader.ffmpeg_path.exists(): + logger.info(f"Found existing FFmpeg: {self.downloader.ffmpeg_path}") + if self.downloader.verify(): + return self.downloader.ffmpeg_path + else: + logger.warning("Existing FFmpeg is not functional, downloading new copy") + + # Download and verify FFmpeg + logger.info("Downloading FFmpeg...") ffmpeg_path = self.downloader.download() if not self.downloader.verify(): raise FFmpegError("Downloaded FFmpeg binary is not functional") + + # Set executable permissions + try: + if platform.system() != "Windows": + os.chmod(str(ffmpeg_path), 0o755) + except Exception as e: + logger.error(f"Failed to set FFmpeg permissions: {e}") + return ffmpeg_path + except Exception as e: logger.error(f"Failed to initialize FFmpeg: {e}") raise FFmpegError(f"Failed to initialize FFmpeg: {e}") + def _verify_ffmpeg(self) -> None: + """Verify FFmpeg functionality with comprehensive checks""" + try: + # Check FFmpeg version + version_cmd = [str(self.ffmpeg_path), "-version"] + result = subprocess.run( + version_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + timeout=10 + ) + if result.returncode != 0: + raise FFmpegError("FFmpeg version check failed") + logger.info(f"FFmpeg version: {result.stdout.split()[2]}") + + # Check FFmpeg capabilities + caps_cmd = [str(self.ffmpeg_path), "-hide_banner", "-encoders"] + result = subprocess.run( + caps_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + timeout=10 + ) + if result.returncode != 0: + raise FFmpegError("FFmpeg capabilities check failed") + + # Verify encoders + required_encoders = ["libx264"] + if self._gpu_info["nvidia"]: + required_encoders.append("h264_nvenc") + elif self._gpu_info["amd"]: + required_encoders.append("h264_amf") + elif self._gpu_info["intel"]: + required_encoders.append("h264_qsv") + + available_encoders = result.stdout.lower() + missing_encoders = [ + encoder for encoder in required_encoders + if encoder not in available_encoders + ] + + if missing_encoders: + logger.warning(f"Missing encoders: {', '.join(missing_encoders)}") + + logger.info("FFmpeg verification completed successfully") + + except subprocess.TimeoutExpired: + raise FFmpegError("FFmpeg verification timed out") + except Exception as e: + raise FFmpegError(f"FFmpeg verification failed: {e}") + def analyze_video(self, input_path: str) -> Dict[str, Any]: """Analyze video content for optimal encoding settings""" - return self.video_analyzer.analyze_video(input_path) + try: + if not os.path.exists(input_path): + raise FileNotFoundError(f"Input file not found: {input_path}") + return self.video_analyzer.analyze_video(input_path) + except Exception as e: + logger.error(f"Video analysis failed: {e}") + return {} def get_compression_params(self, input_path: str, target_size_mb: int) -> Dict[str, str]: """Get optimal compression parameters for the given input file""" - # Analyze video first - video_info = self.analyze_video(input_path) - # Get encoding parameters - return self.encoder_params.get_params(video_info, target_size_mb * 1024 * 1024) + try: + # Analyze video first + video_info = self.analyze_video(input_path) + if not video_info: + raise FFmpegError("Failed to analyze video") + + # Convert target size to bytes + target_size_bytes = target_size_mb * 1024 * 1024 + + # Get encoding parameters + params = self.encoder_params.get_params(video_info, target_size_bytes) + logger.info(f"Generated compression parameters: {params}") + return params + + except Exception as e: + logger.error(f"Failed to get compression parameters: {e}") + # Return safe default parameters + return { + "c:v": "libx264", + "preset": "medium", + "crf": "23", + "c:a": "aac", + "b:a": "128k" + } def get_ffmpeg_path(self) -> str: """Get path to FFmpeg binary""" diff --git a/videoarchiver/ffmpeg/gpu_detector.py b/videoarchiver/ffmpeg/gpu_detector.py index 168ad85..2957925 100644 --- a/videoarchiver/ffmpeg/gpu_detector.py +++ b/videoarchiver/ffmpeg/gpu_detector.py @@ -1,130 +1,271 @@ """GPU detection functionality for FFmpeg""" import os -import json import subprocess import logging +import platform +import re +from typing import Dict, List from pathlib import Path -from typing import Dict logger = logging.getLogger("VideoArchiver") class GPUDetector: def __init__(self, ffmpeg_path: Path): - self.ffmpeg_path = ffmpeg_path + """Initialize GPU detector + + Args: + ffmpeg_path: Path to FFmpeg binary + """ + self.ffmpeg_path = Path(ffmpeg_path) + if not self.ffmpeg_path.exists(): + raise FileNotFoundError(f"FFmpeg not found at {self.ffmpeg_path}") def detect_gpu(self) -> Dict[str, bool]: - """Detect available GPU and its capabilities""" - gpu_info = {"nvidia": False, "amd": False, "intel": False, "arm": False} + """Detect available GPU acceleration support + + Returns: + Dict containing boolean flags for each GPU type + """ + gpu_info = { + "nvidia": False, + "amd": False, + "intel": False + } try: - if os.name == "posix": # Linux/Unix - gpu_info.update(self._detect_linux_gpu()) - elif os.name == "nt": # Windows + # Check system-specific GPU detection first + system = platform.system().lower() + if system == "windows": gpu_info.update(self._detect_windows_gpu()) + elif system == "linux": + gpu_info.update(self._detect_linux_gpu()) + elif system == "darwin": + gpu_info.update(self._detect_macos_gpu()) + + # Verify GPU support in FFmpeg + gpu_info.update(self._verify_ffmpeg_gpu_support()) + + # Log detection results + detected_gpus = [name for name, detected in gpu_info.items() if detected] + if detected_gpus: + logger.info(f"Detected GPUs: {', '.join(detected_gpus)}") + else: + logger.info("No GPU acceleration support detected") + + return gpu_info except Exception as e: - logger.warning(f"GPU detection failed: {str(e)}") - - return gpu_info - - def _test_encoder(self, encoder: str) -> bool: - """Test if a specific encoder works""" - try: - test_cmd = [ - str(self.ffmpeg_path), - "-f", "lavfi", - "-i", "testsrc=duration=1:size=1280x720:rate=30", - "-c:v", encoder, - "-f", "null", - "-" - ] - result = subprocess.run( - test_cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - timeout=5 - ) - return result.returncode == 0 - except Exception: - return False - - def _detect_linux_gpu(self) -> Dict[str, bool]: - """Detect GPUs on Linux systems""" - gpu_info = {"nvidia": False, "amd": False, "intel": False, "arm": False} - - # Check for NVIDIA GPU - try: - nvidia_smi = subprocess.run( - ["nvidia-smi", "-q", "-d", "ENCODER"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - timeout=5 - ) - if nvidia_smi.returncode == 0 and b"Encoder" in nvidia_smi.stdout: - gpu_info["nvidia"] = self._test_encoder("h264_nvenc") - except (subprocess.TimeoutExpired, FileNotFoundError): - pass - - # Check for AMD GPU - try: - if os.path.exists("/dev/dri/renderD128"): - with open("/sys/class/drm/renderD128/device/vendor", "r") as f: - vendor = f.read().strip() - if vendor == "0x1002": # AMD vendor ID - gpu_info["amd"] = self._test_encoder("h264_amf") - except (IOError, OSError): - pass - - # Check for Intel GPU - try: - lspci = subprocess.run( - ["lspci"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - timeout=5 - ) - output = lspci.stdout.decode().lower() - if "intel" in output and ("vga" in output or "display" in output): - gpu_info["intel"] = self._test_encoder("h264_qsv") - except (subprocess.TimeoutExpired, FileNotFoundError): - pass - - # Check for ARM GPU - if os.uname().machine.startswith(("aarch64", "armv7")): - gpu_info["arm"] = True - - return gpu_info + logger.error(f"Error during GPU detection: {str(e)}") + return {"nvidia": False, "amd": False, "intel": False} def _detect_windows_gpu(self) -> Dict[str, bool]: - """Detect GPUs on Windows systems""" - gpu_info = {"nvidia": False, "amd": False, "intel": False, "arm": False} - + """Detect GPUs on Windows using PowerShell""" + gpu_info = {"nvidia": False, "amd": False, "intel": False} + try: # Use PowerShell to get GPU info - ps_command = "Get-WmiObject Win32_VideoController | ConvertTo-Json" - result = subprocess.run( - ["powershell", "-Command", ps_command], - capture_output=True, - text=True, - timeout=10 - ) + cmd = ["powershell", "-Command", "Get-WmiObject Win32_VideoController | Select-Object Name"] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=10) if result.returncode == 0: - gpu_data = json.loads(result.stdout) - if not isinstance(gpu_data, list): - gpu_data = [gpu_data] + output = result.stdout.lower() + gpu_info["nvidia"] = "nvidia" in output + gpu_info["amd"] = any(x in output for x in ["amd", "radeon"]) + gpu_info["intel"] = "intel" in output + + except Exception as e: + logger.error(f"Windows GPU detection failed: {str(e)}") + + return gpu_info - for gpu in gpu_data: - name = gpu.get("Name", "").lower() - if "nvidia" in name: - gpu_info["nvidia"] = self._test_encoder("h264_nvenc") - if "amd" in name or "radeon" in name: - gpu_info["amd"] = self._test_encoder("h264_amf") - if "intel" in name: - gpu_info["intel"] = self._test_encoder("h264_qsv") + def _detect_linux_gpu(self) -> Dict[str, bool]: + """Detect GPUs on Linux using lspci and other tools""" + gpu_info = {"nvidia": False, "amd": False, "intel": False} + + try: + # Try lspci first + try: + result = subprocess.run( + ["lspci", "-v"], + capture_output=True, + text=True, + timeout=10 + ) + if result.returncode == 0: + output = result.stdout.lower() + gpu_info["nvidia"] = "nvidia" in output + gpu_info["amd"] = any(x in output for x in ["amd", "radeon"]) + gpu_info["intel"] = "intel" in output + except FileNotFoundError: + pass + + # Check for NVIDIA using nvidia-smi + if not gpu_info["nvidia"]: + try: + result = subprocess.run( + ["nvidia-smi"], + capture_output=True, + timeout=10 + ) + gpu_info["nvidia"] = result.returncode == 0 + except FileNotFoundError: + pass + + # Check for AMD using rocm-smi + if not gpu_info["amd"]: + try: + result = subprocess.run( + ["rocm-smi"], + capture_output=True, + timeout=10 + ) + gpu_info["amd"] = result.returncode == 0 + except FileNotFoundError: + pass + + # Check for Intel using intel_gpu_top + if not gpu_info["intel"]: + try: + result = subprocess.run( + ["intel_gpu_top", "-L"], + capture_output=True, + timeout=10 + ) + gpu_info["intel"] = result.returncode == 0 + except FileNotFoundError: + pass except Exception as e: - logger.error(f"Error during Windows GPU detection: {str(e)}") + logger.error(f"Linux GPU detection failed: {str(e)}") + + return gpu_info + + def _detect_macos_gpu(self) -> Dict[str, bool]: + """Detect GPUs on macOS using system_profiler""" + gpu_info = {"nvidia": False, "amd": False, "intel": False} + + try: + cmd = ["system_profiler", "SPDisplaysDataType"] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=10) + + if result.returncode == 0: + output = result.stdout.lower() + gpu_info["nvidia"] = "nvidia" in output + gpu_info["amd"] = any(x in output for x in ["amd", "radeon"]) + gpu_info["intel"] = "intel" in output + + except Exception as e: + logger.error(f"macOS GPU detection failed: {str(e)}") + + return gpu_info + + def _verify_ffmpeg_gpu_support(self) -> Dict[str, bool]: + """Verify GPU support in FFmpeg installation""" + gpu_support = {"nvidia": False, "amd": False, "intel": False} + + try: + # Check FFmpeg encoders + cmd = [str(self.ffmpeg_path), "-hide_banner", "-encoders"] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=10) + + if result.returncode == 0: + output = result.stdout.lower() + + # Check for specific GPU encoders + gpu_support["nvidia"] = "h264_nvenc" in output + gpu_support["amd"] = "h264_amf" in output + gpu_support["intel"] = "h264_qsv" in output + + # Log available encoders + encoders = [] + if gpu_support["nvidia"]: + encoders.append("NVENC") + if gpu_support["amd"]: + encoders.append("AMF") + if gpu_support["intel"]: + encoders.append("QSV") + + if encoders: + logger.info(f"FFmpeg supports GPU encoders: {', '.join(encoders)}") + else: + logger.info("No GPU encoders available in FFmpeg") + + except Exception as e: + logger.error(f"FFmpeg GPU support verification failed: {str(e)}") + + return gpu_support + + def get_gpu_info(self) -> Dict[str, List[str]]: + """Get detailed GPU information + + Returns: + Dict containing lists of GPU names by type + """ + gpu_info = { + "nvidia": [], + "amd": [], + "intel": [] + } + + try: + system = platform.system().lower() + + if system == "windows": + cmd = ["powershell", "-Command", "Get-WmiObject Win32_VideoController | Select-Object Name"] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=10) + + if result.returncode == 0: + for line in result.stdout.splitlines(): + line = line.strip().lower() + if line: + if "nvidia" in line: + gpu_info["nvidia"].append(line) + elif any(x in line for x in ["amd", "radeon"]): + gpu_info["amd"].append(line) + elif "intel" in line: + gpu_info["intel"].append(line) + + elif system == "linux": + try: + result = subprocess.run( + ["lspci", "-v"], + capture_output=True, + text=True, + timeout=10 + ) + if result.returncode == 0: + for line in result.stdout.splitlines(): + if "vga" in line.lower() or "3d" in line.lower(): + if "nvidia" in line.lower(): + gpu_info["nvidia"].append(line.strip()) + elif any(x in line.lower() for x in ["amd", "radeon"]): + gpu_info["amd"].append(line.strip()) + elif "intel" in line.lower(): + gpu_info["intel"].append(line.strip()) + except FileNotFoundError: + pass + + elif system == "darwin": + cmd = ["system_profiler", "SPDisplaysDataType"] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=10) + + if result.returncode == 0: + current_gpu = None + for line in result.stdout.splitlines(): + line = line.strip().lower() + if "chipset model" in line: + if "nvidia" in line: + current_gpu = "nvidia" + gpu_info["nvidia"].append(line.split(":")[1].strip()) + elif any(x in line for x in ["amd", "radeon"]): + current_gpu = "amd" + gpu_info["amd"].append(line.split(":")[1].strip()) + elif "intel" in line: + current_gpu = "intel" + gpu_info["intel"].append(line.split(":")[1].strip()) + + except Exception as e: + logger.error(f"Error getting detailed GPU info: {str(e)}") return gpu_info diff --git a/videoarchiver/ffmpeg/video_analyzer.py b/videoarchiver/ffmpeg/video_analyzer.py index bb5b4f1..3bd0f6e 100644 --- a/videoarchiver/ffmpeg/video_analyzer.py +++ b/videoarchiver/ffmpeg/video_analyzer.py @@ -4,11 +4,12 @@ import os import subprocess import logging from pathlib import Path -from typing import Dict, Any +from typing import Dict, Any, Optional from contextlib import contextmanager import tempfile import shutil import json +import re logger = logging.getLogger("VideoArchiver") @@ -27,32 +28,69 @@ def temp_path_context(): class VideoAnalyzer: def __init__(self, ffmpeg_path: Path): - self.ffmpeg_path = ffmpeg_path + """Initialize video analyzer with FFmpeg path + + Args: + ffmpeg_path: Path to FFmpeg binary + """ + self.ffmpeg_path = Path(ffmpeg_path) + self.ffprobe_path = self.ffmpeg_path.parent / ( + "ffprobe.exe" if os.name == "nt" else "ffprobe" + ) + + # Verify paths exist + if not self.ffmpeg_path.exists(): + raise FileNotFoundError(f"FFmpeg not found at {self.ffmpeg_path}") + if not self.ffprobe_path.exists(): + raise FileNotFoundError(f"FFprobe not found at {self.ffprobe_path}") + + logger.info(f"Initialized VideoAnalyzer with FFmpeg: {self.ffmpeg_path}, FFprobe: {self.ffprobe_path}") def analyze_video(self, input_path: str) -> Dict[str, Any]: - """Analyze video content for optimal encoding settings""" + """Analyze video content for optimal encoding settings + + Args: + input_path: Path to input video file + + Returns: + Dict containing video analysis results + """ try: + if not os.path.exists(input_path): + logger.error(f"Input file not found: {input_path}") + return {} + # Use ffprobe to get video information probe_result = self._probe_video(input_path) if not probe_result: + logger.error("Failed to probe video") return {} + # Get video stream info video_info = next( (s for s in probe_result["streams"] if s["codec_type"] == "video"), None ) if not video_info: + logger.error("No video stream found") return {} - # Get video properties - width = int(video_info.get("width", 0)) - height = int(video_info.get("height", 0)) - fps = eval(video_info.get("r_frame_rate", "30/1")) - duration = float(probe_result["format"].get("duration", 0)) - bitrate = float(probe_result["format"].get("bit_rate", 0)) + # Get video properties with validation + try: + width = int(video_info.get("width", 0)) + height = int(video_info.get("height", 0)) + fps = self._parse_frame_rate(video_info.get("r_frame_rate", "30/1")) + duration = float(probe_result["format"].get("duration", 0)) + bitrate = float(probe_result["format"].get("bit_rate", 0)) + except (ValueError, ZeroDivisionError) as e: + logger.error(f"Error parsing video properties: {e}") + return {} - # Advanced analysis + # Advanced analysis with progress logging + logger.info("Starting motion detection analysis...") has_high_motion = self._detect_high_motion(video_info) + + logger.info("Starting dark scene analysis...") has_dark_scenes = self._analyze_dark_scenes(input_path) # Get audio properties @@ -62,7 +100,7 @@ class VideoAnalyzer: ) audio_props = self._get_audio_properties(audio_info) - return { + result = { "width": width, "height": height, "fps": fps, @@ -70,9 +108,12 @@ class VideoAnalyzer: "bitrate": bitrate, "has_high_motion": has_high_motion, "has_dark_scenes": has_dark_scenes, - "has_complex_scenes": False, # Reserved for future use + "has_complex_scenes": self._detect_complex_scenes(video_info), **audio_props } + + logger.info(f"Video analysis complete: {result}") + return result except Exception as e: logger.error(f"Error analyzing video: {str(e)}") @@ -82,38 +123,71 @@ class VideoAnalyzer: """Use ffprobe to get video information""" try: cmd = [ - str(self.ffmpeg_path).replace('ffmpeg', 'ffprobe'), + str(self.ffprobe_path), "-v", "quiet", "-print_format", "json", "-show_format", "-show_streams", + "-show_frames", + "-read_intervals", "%+#10", # Only analyze first 10 frames for speed input_path ] + + logger.debug(f"Running ffprobe command: {' '.join(cmd)}") result = subprocess.run( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - text=True + text=True, + timeout=30 # Add timeout ) + if result.returncode == 0: - return json.loads(result.stdout) + try: + return json.loads(result.stdout) + except json.JSONDecodeError as e: + logger.error(f"Failed to parse ffprobe output: {e}") + else: + logger.error(f"FFprobe failed: {result.stderr}") + + except subprocess.TimeoutExpired: + logger.error("FFprobe timed out") except Exception as e: logger.error(f"Error probing video: {str(e)}") return {} - def _detect_high_motion(self, video_info: Dict) -> bool: - """Detect high motion content based on frame rate analysis""" + def _parse_frame_rate(self, rate_str: str) -> float: + """Parse frame rate string to float""" try: - if video_info.get("avg_frame_rate"): - avg_fps = eval(video_info["avg_frame_rate"]) - fps = eval(video_info.get("r_frame_rate", "30/1")) - return abs(avg_fps - fps) > 5 # Significant frame rate variation + if "/" in rate_str: + num, den = map(float, rate_str.split("/")) + return num / den if den != 0 else 0 + return float(rate_str) + except (ValueError, ZeroDivisionError): + return 30.0 # Default to 30fps + + def _detect_high_motion(self, video_info: Dict) -> bool: + """Detect high motion content based on frame rate and codec parameters""" + try: + # Check frame rate variation + if video_info.get("avg_frame_rate") and video_info.get("r_frame_rate"): + avg_fps = self._parse_frame_rate(video_info["avg_frame_rate"]) + fps = self._parse_frame_rate(video_info["r_frame_rate"]) + if abs(avg_fps - fps) > 5: # Significant frame rate variation + return True + + # Check codec parameters for motion indicators + if "codec_tag_string" in video_info: + high_motion_codecs = ["avc1", "h264", "hevc"] + if any(codec in video_info["codec_tag_string"].lower() for codec in high_motion_codecs): + return True + except Exception as e: logger.warning(f"Frame rate analysis failed: {str(e)}") return False def _analyze_dark_scenes(self, input_path: str) -> bool: - """Analyze video for dark scenes""" + """Analyze video for dark scenes using FFmpeg signalstats filter""" try: with temp_path_context() as temp_dir: sample_cmd = [ @@ -122,40 +196,77 @@ class VideoAnalyzer: "-vf", "select='eq(pict_type,I)',signalstats", "-show_entries", "frame_tags=lavfi.signalstats.YAVG", "-f", "null", + "-t", "30", # Only analyze first 30 seconds "-" ] + + logger.debug(f"Running dark scene analysis: {' '.join(sample_cmd)}") result = subprocess.run( sample_cmd, capture_output=True, - text=True + text=True, + timeout=60 # Add timeout ) dark_frames = 0 total_frames = 0 for line in result.stderr.split("\n"): if "YAVG" in line: - avg_brightness = float(line.split("=")[1]) - if avg_brightness < 40: # Dark scene threshold - dark_frames += 1 - total_frames += 1 + try: + avg_brightness = float(line.split("=")[1]) + if avg_brightness < 40: # Dark scene threshold + dark_frames += 1 + total_frames += 1 + except (ValueError, IndexError): + continue return total_frames > 0 and (dark_frames / total_frames) > 0.2 + except subprocess.TimeoutExpired: + logger.warning("Dark scene analysis timed out") except Exception as e: logger.warning(f"Dark scene analysis failed: {str(e)}") - return False + return False - def _get_audio_properties(self, audio_info: Dict) -> Dict[str, Any]: + def _detect_complex_scenes(self, video_info: Dict) -> bool: + """Detect complex scenes based on codec parameters and bitrate""" + try: + # Check for high profile/level + profile = video_info.get("profile", "").lower() + level = video_info.get("level", -1) + + if "high" in profile or level >= 41: # Level 4.1 or higher + return True + + # Check for high bitrate + if "bit_rate" in video_info: + bitrate = int(video_info["bit_rate"]) + if bitrate > 4000000: # Higher than 4Mbps + return True + + except Exception as e: + logger.warning(f"Complex scene detection failed: {str(e)}") + return False + + def _get_audio_properties(self, audio_info: Optional[Dict]) -> Dict[str, Any]: """Extract audio properties from stream info""" if not audio_info: return { - "audio_bitrate": 0, + "audio_bitrate": 128000, # Default to 128kbps "audio_channels": 2, "audio_sample_rate": 48000 } - return { - "audio_bitrate": int(audio_info.get("bit_rate", 0)), - "audio_channels": int(audio_info.get("channels", 2)), - "audio_sample_rate": int(audio_info.get("sample_rate", 48000)) - } + try: + return { + "audio_bitrate": int(audio_info.get("bit_rate", 128000)), + "audio_channels": int(audio_info.get("channels", 2)), + "audio_sample_rate": int(audio_info.get("sample_rate", 48000)) + } + except (ValueError, TypeError) as e: + logger.warning(f"Error parsing audio properties: {e}") + return { + "audio_bitrate": 128000, + "audio_channels": 2, + "audio_sample_rate": 48000 + } diff --git a/videoarchiver/processor.py b/videoarchiver/processor.py index 1b49f71..73153a3 100644 --- a/videoarchiver/processor.py +++ b/videoarchiver/processor.py @@ -2,11 +2,12 @@ import discord import logging +import asyncio +import ffmpeg import yt_dlp import re import os -from typing import List, Optional, Tuple, Callable, Any -import asyncio +from typing import Dict, List, Optional, Tuple, Callable, Any import traceback from datetime import datetime from pathlib import Path @@ -18,27 +19,38 @@ from videoarchiver.enhanced_queue import EnhancedVideoQueueManager logger = logging.getLogger("VideoArchiver") - class VideoProcessor: """Handles video processing operations""" - def __init__(self, bot, config_manager, components): + def __init__( + self, + bot, + config_manager, + components, + queue_manager=None + ): self.bot = bot self.config = config_manager self.components = components - # Initialize enhanced queue manager with persistence and error recovery - data_dir = Path(os.path.dirname(__file__)) / "data" - data_dir.mkdir(parents=True, exist_ok=True) - queue_path = data_dir / "queue_state.json" - self.queue_manager = EnhancedVideoQueueManager( - max_retries=3, - retry_delay=5, - max_queue_size=1000, - cleanup_interval=1800, # 30 minutes (reduced from 1 hour for more frequent cleanup) - max_history_age=86400, # 24 hours - persistence_path=str(queue_path) - ) + # Use provided queue manager or create new one + if queue_manager: + self.queue_manager = queue_manager + logger.info("Using provided queue manager") + else: + # Initialize enhanced queue manager with persistence and error recovery + data_dir = Path(os.path.dirname(__file__)) / "data" + data_dir.mkdir(parents=True, exist_ok=True) + queue_path = data_dir / "queue_state.json" + self.queue_manager = EnhancedVideoQueueManager( + max_retries=3, + retry_delay=5, + max_queue_size=1000, + cleanup_interval=1800, # 30 minutes + max_history_age=86400, # 24 hours + persistence_path=str(queue_path) + ) + logger.info("Created new queue manager") # Track failed downloads for cleanup self._failed_downloads = set() @@ -75,12 +87,20 @@ class VideoProcessor: try: settings = await self.config.get_guild_settings(guild_id) + logger.info(f"Got settings for guild {guild_id}: {settings}") # Download video with enhanced error handling try: - success, file_path, error = await self.components[guild_id][ - "downloader" - ].download_video(item.url) + if guild_id not in self.components: + return False, f"Components not initialized for guild {guild_id}" + + downloader = self.components[guild_id]["downloader"] + if not downloader: + return False, "Downloader not initialized" + + logger.info(f"Starting download for URL: {item.url}") + success, file_path, error = await downloader.download_video(item.url) + logger.info(f"Download result: success={success}, file_path={file_path}, error={error}") except Exception as e: logger.error(f"Download error: {traceback.format_exc()}") success, file_path, error = False, None, str(e) @@ -120,6 +140,7 @@ class VideoProcessor: try: # Upload to archive channel with original message link + logger.info(f"Uploading file to archive channel: {file_path}") file = discord.File(file_path) archive_message = await archive_channel.send( f"Original: {message.jump_url}", file=file @@ -296,14 +317,25 @@ class VideoProcessor: return # Find all video URLs in message with improved pattern matching - urls = self._extract_urls(message.content) + logger.info(f"Checking message {message.id} for video URLs...") + urls = [] + if message.guild.id in self.components: + downloader = self.components[message.guild.id]["downloader"] + if downloader: + for word in message.content.split(): + if downloader.is_supported_url(word): + urls.append(word) if urls: + logger.info(f"Found {len(urls)} video URLs in message {message.id}") # Process each URL with priority based on position for i, url in enumerate(urls): # First URL gets highest priority priority = len(urls) - i + logger.info(f"Processing URL {url} with priority {priority}") await self.process_video_url(url, message, priority) + else: + logger.info(f"No video URLs found in message {message.id}") except Exception as e: logger.error(f"Error processing message: {traceback.format_exc()}") @@ -311,27 +343,6 @@ class VideoProcessor: message.guild, f"Error processing message: {str(e)}", "error" ) - def _extract_urls(self, content: str) -> List[str]: - """Extract video URLs from message content with improved pattern matching""" - urls = [] - try: - # Create a YoutubeDL instance to get extractors - with yt_dlp.YoutubeDL() as ydl: - # Split content into words and check each for URLs - words = content.split() - for word in words: - # Try each extractor - for ie in ydl._ies: - if hasattr(ie, '_VALID_URL') and ie._VALID_URL: - # Use regex pattern matching instead of suitable() - if re.match(ie._VALID_URL, word): - logger.info(f"Found supported URL: {word} (Extractor: {ie.IE_NAME})") - urls.append(word) - break # Stop once we find a matching pattern - except Exception as e: - logger.error(f"URL extraction error: {str(e)}") - return list(set(urls)) # Remove duplicates - async def _log_message( self, guild: discord.Guild, message: str, level: str = "info" ): diff --git a/videoarchiver/utils/file_ops.py b/videoarchiver/utils/file_ops.py index ec4a812..f9d8f7f 100644 --- a/videoarchiver/utils/file_ops.py +++ b/videoarchiver/utils/file_ops.py @@ -4,83 +4,128 @@ import os import stat import time import logging +import shutil from datetime import datetime from pathlib import Path +from typing import Optional logger = logging.getLogger("VideoArchiver") def secure_delete_file(file_path: str, passes: int = 3, timeout: int = 30) -> bool: - """Securely delete a file by overwriting it multiple times before removal""" + """Securely delete a file by overwriting it multiple times before removal + + Args: + file_path: Path to the file to delete + passes: Number of overwrite passes (default: 3) + timeout: Maximum time in seconds to attempt deletion (default: 30) + + Returns: + bool: True if file was successfully deleted, False otherwise + """ if not os.path.exists(file_path): return True start_time = datetime.now() - while True: + try: + # Get file size before starting try: - # Ensure file is writable - try: - os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR) - except OSError: - pass - file_size = os.path.getsize(file_path) - for _ in range(passes): - with open(file_path, "wb") as f: - f.write(os.urandom(file_size)) - f.flush() - os.fsync(f.fileno()) + except OSError: + file_size = 0 + logger.warning(f"Could not get size of {file_path}, assuming 0") - # Try multiple deletion methods - try: - os.remove(file_path) - except OSError: + # Ensure file is writable + try: + current_mode = os.stat(file_path).st_mode + os.chmod(file_path, current_mode | stat.S_IWRITE) + except OSError as e: + logger.warning(f"Could not modify permissions of {file_path}: {e}") + + # Overwrite file content + if file_size > 0: + for pass_num in range(passes): try: - os.unlink(file_path) - except OSError: - Path(file_path).unlink(missing_ok=True) + with open(file_path, "wb") as f: + # Write random data + f.write(os.urandom(file_size)) + # Ensure data is written to disk + f.flush() + os.fsync(f.fileno()) + except OSError as e: + logger.warning(f"Error during pass {pass_num + 1} of overwriting {file_path}: {e}") + continue - # Verify file is gone - if os.path.exists(file_path): - # If file still exists, check timeout - if (datetime.now() - start_time).seconds > timeout: - logger.error(f"Timeout while trying to delete {file_path}") - return False - # Wait briefly before retry - time.sleep(0.1) - continue + # Try multiple deletion methods + deletion_methods = [ + lambda p: os.remove(p), + lambda p: os.unlink(p), + lambda p: Path(p).unlink(missing_ok=True), + lambda p: shutil.rmtree(p, ignore_errors=True) if os.path.isdir(p) else os.remove(p) + ] - return True - - except Exception as e: - logger.error(f"Error during secure delete of {file_path}: {str(e)}") - # Last resort: try force delete + for method in deletion_methods: try: if os.path.exists(file_path): - os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR) - Path(file_path).unlink(missing_ok=True) - except Exception as e2: - logger.error(f"Force delete failed: {str(e2)}") - return not os.path.exists(file_path) + method(file_path) + if not os.path.exists(file_path): + logger.debug(f"Successfully deleted {file_path}") + return True + except OSError as e: + logger.debug(f"Deletion method failed for {file_path}: {e}") + continue + + # If file still exists, check timeout + while os.path.exists(file_path): + if (datetime.now() - start_time).total_seconds() > timeout: + logger.error(f"Timeout while trying to delete {file_path}") + return False + time.sleep(0.1) + + return True + + except Exception as e: + logger.error(f"Error during secure deletion of {file_path}: {e}") + # Last resort: try force delete + try: + if os.path.exists(file_path): + os.chmod(file_path, stat.S_IWRITE | stat.S_IREAD) + Path(file_path).unlink(missing_ok=True) + except Exception as e2: + logger.error(f"Force delete failed for {file_path}: {e2}") + return not os.path.exists(file_path) def cleanup_downloads(download_path: str) -> None: - """Clean up the downloads directory without removing the directory itself""" + """Clean up the downloads directory + + Args: + download_path: Path to the downloads directory to clean + """ try: - if os.path.exists(download_path): - # Delete all files in the directory - for file_path in Path(download_path).glob("**/*"): - if file_path.is_file(): - try: - if not secure_delete_file(str(file_path)): - logger.error(f"Failed to delete file: {file_path}") - except Exception as e: - logger.error(f"Error deleting file {file_path}: {str(e)}") - - # Clean up empty subdirectories - for dir_path in sorted(Path(download_path).glob("**/*"), reverse=True): - if dir_path.is_dir(): - try: - dir_path.rmdir() # Will only remove if empty - except OSError: - pass # Directory not empty or other error + if not os.path.exists(download_path): + return + + # Delete all files in the directory + for entry in os.scandir(download_path): + try: + path = entry.path + if entry.is_file(): + if not secure_delete_file(path): + logger.error(f"Failed to delete file: {path}") + elif entry.is_dir(): + shutil.rmtree(path, ignore_errors=True) + except Exception as e: + logger.error(f"Error processing {entry.path}: {e}") + continue + + # Clean up empty subdirectories + for root, dirs, files in os.walk(download_path, topdown=False): + for name in dirs: + try: + dir_path = os.path.join(root, name) + if not os.listdir(dir_path): # Check if directory is empty + os.rmdir(dir_path) + except Exception as e: + logger.error(f"Error removing directory {name}: {e}") + except Exception as e: - logger.error(f"Error during cleanup: {str(e)}") + logger.error(f"Error during cleanup of {download_path}: {e}") diff --git a/videoarchiver/utils/path_manager.py b/videoarchiver/utils/path_manager.py index fc8e7dc..ccc585e 100644 --- a/videoarchiver/utils/path_manager.py +++ b/videoarchiver/utils/path_manager.py @@ -6,31 +6,73 @@ import shutil import stat import logging import contextlib +import time logger = logging.getLogger("VideoArchiver") @contextlib.contextmanager def temp_path_context(): """Context manager for temporary path creation and cleanup""" - temp_dir = tempfile.mkdtemp(prefix="videoarchiver_") + temp_dir = None try: - # Ensure proper permissions + # Create temp directory with proper permissions + temp_dir = tempfile.mkdtemp(prefix="videoarchiver_") + logger.debug(f"Created temporary directory: {temp_dir}") + + # Ensure directory has rwx permissions for user only os.chmod(temp_dir, stat.S_IRWXU) + + # Verify directory exists and is writable + if not os.path.exists(temp_dir): + raise OSError(f"Failed to create temporary directory: {temp_dir}") + if not os.access(temp_dir, os.W_OK): + raise OSError(f"Temporary directory is not writable: {temp_dir}") + yield temp_dir + + except Exception as e: + logger.error(f"Error in temp_path_context: {str(e)}") + raise + finally: - try: - # Ensure all files are deletable - for root, dirs, files in os.walk(temp_dir): - for d in dirs: + if temp_dir and os.path.exists(temp_dir): + try: + # Ensure all files are deletable with retries + max_retries = 3 + for attempt in range(max_retries): try: - os.chmod(os.path.join(root, d), stat.S_IRWXU) - except OSError: - pass - for f in files: - try: - os.chmod(os.path.join(root, f), stat.S_IRWXU) - except OSError: - pass - shutil.rmtree(temp_dir, ignore_errors=True) - except Exception as e: - logger.error(f"Error cleaning up temp directory {temp_dir}: {e}") + # Set permissions recursively + for root, dirs, files in os.walk(temp_dir): + for d in dirs: + try: + dir_path = os.path.join(root, d) + os.chmod(dir_path, stat.S_IRWXU) + except OSError as e: + logger.warning(f"Failed to set permissions on directory {dir_path}: {e}") + for f in files: + try: + file_path = os.path.join(root, f) + os.chmod(file_path, stat.S_IRWXU) + except OSError as e: + logger.warning(f"Failed to set permissions on file {file_path}: {e}") + + # Try to remove the directory + shutil.rmtree(temp_dir, ignore_errors=True) + + # Verify directory is gone + if not os.path.exists(temp_dir): + logger.debug(f"Successfully cleaned up temporary directory: {temp_dir}") + break + + if attempt < max_retries - 1: + time.sleep(1) # Wait before retry + + except Exception as e: + if attempt == max_retries - 1: + logger.error(f"Failed to clean up temporary directory {temp_dir} after {max_retries} attempts: {e}") + elif attempt < max_retries - 1: + time.sleep(1) # Wait before retry + continue + + except Exception as e: + logger.error(f"Error during temp directory cleanup: {str(e)}") diff --git a/videoarchiver/utils/video_downloader.py b/videoarchiver/utils/video_downloader.py index 708d398..d9cac92 100644 --- a/videoarchiver/utils/video_downloader.py +++ b/videoarchiver/utils/video_downloader.py @@ -33,16 +33,26 @@ class VideoDownloader: enabled_sites: Optional[List[str]] = None, concurrent_downloads: int = 3, ): - self.download_path = download_path + # Ensure download path exists with proper permissions + self.download_path = Path(download_path) + self.download_path.mkdir(parents=True, exist_ok=True) + # Ensure directory has rwx permissions for user and rx for group/others + os.chmod(str(self.download_path), 0o755) + logger.info(f"Initialized download directory: {self.download_path}") + self.video_format = video_format self.max_quality = max_quality self.max_file_size = max_file_size self.enabled_sites = enabled_sites self.url_patterns = self._get_url_patterns() - + # Initialize FFmpeg manager self.ffmpeg_mgr = FFmpegManager() - + ffmpeg_path = self.ffmpeg_mgr.get_ffmpeg_path() + if not os.path.exists(ffmpeg_path): + raise FileNotFoundError(f"FFmpeg not found at {ffmpeg_path}") + logger.info(f"Using FFmpeg from: {ffmpeg_path}") + # Create thread pool for this instance self.download_pool = ThreadPoolExecutor( max_workers=max(1, min(5, concurrent_downloads)), @@ -53,13 +63,14 @@ class VideoDownloader: self.active_downloads: Dict[str, str] = {} self._downloads_lock = asyncio.Lock() - # Configure yt-dlp options + # Configure yt-dlp options with absolute FFmpeg path self.ydl_opts = { - "format": f"bestvideo[height<={max_quality}]+bestaudio/best[height<={max_quality}]", + "format": f"bv*[height<={max_quality}][ext=mp4]+ba[ext=m4a]/b[height<={max_quality}]/best", # More flexible format "outtmpl": "%(title)s.%(ext)s", # Base filename only, path added later "merge_output_format": video_format, - "quiet": True, - "no_warnings": True, + "quiet": False, # Enable output for debugging + "no_warnings": False, # Show warnings + "verbose": True, # Enable verbose output "extract_flat": False, "concurrent_fragment_downloads": concurrent_downloads, "retries": self.MAX_RETRIES, @@ -68,8 +79,22 @@ class VideoDownloader: "extractor_retries": self.MAX_RETRIES, "postprocessor_hooks": [self._check_file_size], "progress_hooks": [self._progress_hook], - "ffmpeg_location": self.ffmpeg_mgr.get_ffmpeg_path(), + "ffmpeg_location": str(ffmpeg_path), # Convert Path to string + "prefer_ffmpeg": True, # Force use of FFmpeg + "hls_prefer_ffmpeg": True, # Use FFmpeg for HLS + "logger": logger, # Use our logger + "ignoreerrors": True, # Don't stop on download errors + "no_color": True, # Disable ANSI colors in output + "geo_bypass": True, # Try to bypass geo-restrictions + "socket_timeout": 30, # Increase timeout + "external_downloader": { + "m3u8": "ffmpeg", # Use FFmpeg for m3u8 downloads + }, + "external_downloader_args": { + "ffmpeg": ["-v", "warning"], # Reduce FFmpeg verbosity + } } + logger.info("VideoDownloader initialized successfully") def __del__(self): """Ensure thread pool is shutdown and files are cleaned up""" @@ -83,7 +108,7 @@ class VideoDownloader: self.active_downloads.clear() # Shutdown thread pool - if hasattr(self, 'download_pool'): + if hasattr(self, "download_pool"): self.download_pool.shutdown(wait=True) except Exception as e: logger.error(f"Error during VideoDownloader cleanup: {str(e)}") @@ -94,7 +119,7 @@ class VideoDownloader: try: with yt_dlp.YoutubeDL() as ydl: for ie in ydl._ies: - if hasattr(ie, '_VALID_URL') and ie._VALID_URL: + if hasattr(ie, "_VALID_URL") and ie._VALID_URL: if not self.enabled_sites or any( site.lower() in ie.IE_NAME.lower() for site in self.enabled_sites @@ -120,21 +145,29 @@ class VideoDownloader: """Handle download progress""" if d["status"] == "finished": logger.info(f"Download completed: {d['filename']}") + elif d["status"] == "downloading": + try: + percent = d.get("_percent_str", "N/A") + speed = d.get("_speed_str", "N/A") + eta = d.get("_eta_str", "N/A") + logger.debug(f"Download progress: {percent} at {speed}, ETA: {eta}") + except Exception as e: + logger.debug(f"Error logging progress: {str(e)}") def _verify_video_file(self, file_path: str) -> bool: """Verify video file integrity""" try: probe = ffmpeg.probe(file_path) # Check if file has video stream - video_streams = [s for s in probe['streams'] if s['codec_type'] == 'video'] + video_streams = [s for s in probe["streams"] if s["codec_type"] == "video"] if not video_streams: raise VideoVerificationError("No video streams found") # Check if duration is valid - duration = float(probe['format'].get('duration', 0)) + duration = float(probe["format"].get("duration", 0)) if duration <= 0: raise VideoVerificationError("Invalid video duration") # Check if file is readable - with open(file_path, 'rb') as f: + with open(file_path, "rb") as f: f.seek(0, 2) # Seek to end if f.tell() == 0: raise VideoVerificationError("Empty file") @@ -148,12 +181,11 @@ class VideoDownloader: for attempt in range(self.MAX_RETRIES): try: ydl_opts = self.ydl_opts.copy() - ydl_opts['outtmpl'] = os.path.join(temp_dir, ydl_opts['outtmpl']) + ydl_opts["outtmpl"] = os.path.join(temp_dir, ydl_opts["outtmpl"]) with yt_dlp.YoutubeDL(ydl_opts) as ydl: info = await asyncio.get_event_loop().run_in_executor( - self.download_pool, - lambda: ydl.extract_info(url, download=True) + self.download_pool, lambda: ydl.extract_info(url, download=True) ) if info is None: @@ -171,7 +203,9 @@ class VideoDownloader: except Exception as e: logger.error(f"Download attempt {attempt + 1} failed: {str(e)}") if attempt < self.MAX_RETRIES - 1: - await asyncio.sleep(self.RETRY_DELAY * (attempt + 1)) # Exponential backoff + await asyncio.sleep( + self.RETRY_DELAY * (attempt + 1) + ) # Exponential backoff else: return False, "", f"All download attempts failed: {str(e)}" @@ -206,7 +240,7 @@ class VideoDownloader: ) compressed_file = os.path.join( self.download_path, - f"compressed_{os.path.basename(original_file)}" + f"compressed_{os.path.basename(original_file)}", ) # Configure ffmpeg with optimal parameters @@ -225,11 +259,15 @@ class VideoDownloader: ) if not os.path.exists(compressed_file): - raise FileNotFoundError("Compression completed but file not found") + raise FileNotFoundError( + "Compression completed but file not found" + ) # Verify compressed file if not self._verify_video_file(compressed_file): - raise VideoVerificationError("Compressed file is not a valid video") + raise VideoVerificationError( + "Compressed file is not a valid video" + ) compressed_size = os.path.getsize(compressed_file) if compressed_size <= (self.max_file_size * 1024 * 1024): @@ -245,7 +283,9 @@ class VideoDownloader: return False, "", f"Compression error: {str(e)}" else: # Move file to final location - final_path = os.path.join(self.download_path, os.path.basename(original_file)) + final_path = os.path.join( + self.download_path, os.path.basename(original_file) + ) # Use safe move with retries success = await self._safe_move_file(original_file, final_path) if not success: @@ -264,7 +304,11 @@ class VideoDownloader: try: if original_file and os.path.exists(original_file): await self._safe_delete_file(original_file) - if compressed_file and os.path.exists(compressed_file) and not compressed_file.startswith(self.download_path): + if ( + compressed_file + and os.path.exists(compressed_file) + and not compressed_file.startswith(self.download_path) + ): await self._safe_delete_file(compressed_file) except Exception as e: logger.error(f"Error during file cleanup: {str(e)}") diff --git a/videoarchiver/video_archiver.py b/videoarchiver/video_archiver.py index 344fbed..0d32589 100644 --- a/videoarchiver/video_archiver.py +++ b/videoarchiver/video_archiver.py @@ -57,10 +57,19 @@ class VideoArchiver(commands.Cog): # Clean existing downloads cleanup_downloads(str(self.download_path)) - # Initialize components dict + # Initialize components dict first self.components: Dict[int, Dict[str, Any]] = {} - # Initialize queue manager + # Initialize components for existing guilds + for guild in self.bot.guilds: + try: + await self.initialize_guild_components(guild.id) + except Exception as e: + logger.error(f"Failed to initialize guild {guild.id}: {str(e)}") + # Continue initialization even if one guild fails + continue + + # Initialize queue manager after components are ready queue_path = self.data_path / "queue_state.json" queue_path.parent.mkdir(parents=True, exist_ok=True) self.queue_manager = EnhancedVideoQueueManager( @@ -72,18 +81,16 @@ class VideoArchiver(commands.Cog): persistence_path=str(queue_path) ) - # Initialize other managers in correct order + # Initialize update checker self.update_checker = UpdateChecker(self.bot, self.config_manager) - self.processor = VideoProcessor(self.bot, self.config_manager, self.components) - # Initialize components for existing guilds - for guild in self.bot.guilds: - try: - await self.initialize_guild_components(guild.id) - except Exception as e: - logger.error(f"Failed to initialize guild {guild.id}: {str(e)}") - # Continue initialization even if one guild fails - continue + # Initialize processor with queue manager + self.processor = VideoProcessor( + self.bot, + self.config_manager, + self.components, + queue_manager=self.queue_manager + ) # Start update checker await self.update_checker.start()