fix imports

This commit is contained in:
pacnpal
2024-11-15 00:02:09 +00:00
parent 2160be84bb
commit fc40e994fe

View File

@@ -4,17 +4,12 @@ import logging
import json import json
import os import os
import time import time
import psutil
from typing import Dict, Optional, Set, Tuple, Callable, Any, List, Union from typing import Dict, Optional, Set, Tuple, Callable, Any, List, Union
from datetime import datetime, timedelta from datetime import datetime, timedelta
import traceback import traceback
from dataclasses import dataclass, asdict, field from dataclasses import dataclass, asdict, field
import weakref
from pathlib import Path from pathlib import Path
import aiofiles
import aiofiles.os
import sys import sys
import signal
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from functools import partial from functools import partial
import tempfile import tempfile
@@ -41,6 +36,11 @@ class QueueMetrics:
total_failed: int = 0 total_failed: int = 0
avg_processing_time: float = 0.0 avg_processing_time: float = 0.0
success_rate: float = 0.0 success_rate: float = 0.0
errors_by_type: Dict[str, int] = field(default_factory=dict)
last_error: Optional[str] = None
last_error_time: Optional[datetime] = None
last_cleanup: datetime = field(default_factory=datetime.utcnow)
retries: int = 0
peak_memory_usage: float = 0.0 peak_memory_usage: float = 0.0
errors_by_type: Dict[str, int] = field(default_factory=dict) errors_by_type: Dict[str, int] = field(default_factory=dict)
last_error: Optional[str] = None last_error: Optional[str] = None
@@ -57,8 +57,10 @@ class QueueMetrics:
if error: if error:
self.last_error = error self.last_error = error
self.last_error_time = datetime.utcnow() self.last_error_time = datetime.utcnow()
error_type = error.split(':')[0] if ':' in error else error error_type = error.split(":")[0] if ":" in error else error
self.errors_by_type[error_type] = self.errors_by_type.get(error_type, 0) + 1 self.errors_by_type[error_type] = (
self.errors_by_type.get(error_type, 0) + 1
)
# Update processing times with sliding window # Update processing times with sliding window
self.processing_times.append(processing_time) self.processing_times.append(processing_time)
@@ -66,21 +68,28 @@ class QueueMetrics:
self.processing_times.pop(0) self.processing_times.pop(0)
# Update average processing time # Update average processing time
self.avg_processing_time = sum(self.processing_times) / len(self.processing_times) if self.processing_times else 0.0 self.avg_processing_time = (
sum(self.processing_times) / len(self.processing_times)
if self.processing_times
else 0.0
)
# Update success rate # Update success rate
self.success_rate = ( self.success_rate = (
(self.total_processed - self.total_failed) / self.total_processed (self.total_processed - self.total_failed) / self.total_processed
if self.total_processed > 0 else 0.0 if self.total_processed > 0
else 0.0
) )
# Update peak memory usage # Update peak memory usage
current_memory = psutil.Process().memory_info().rss / 1024 / 1024 # MB current_memory = psutil.Process().memory_info().rss / 1024 / 1024 # MB
self.peak_memory_usage = max(self.peak_memory_usage, current_memory) self.peak_memory_usage = max(self.peak_memory_usage, current_memory)
@dataclass @dataclass
class QueueItem: class QueueItem:
"""Represents a video processing task in the queue""" """Represents a video processing task in the queue"""
url: str url: str
message_id: int message_id: int
channel_id: int channel_id: int
@@ -99,6 +108,7 @@ class QueueItem:
processing_times: List[float] = field(default_factory=list) processing_times: List[float] = field(default_factory=list)
last_error_time: Optional[datetime] = None last_error_time: Optional[datetime] = None
class EnhancedVideoQueueManager: class EnhancedVideoQueueManager:
"""Enhanced queue manager with improved memory management and performance""" """Enhanced queue manager with improved memory management and performance"""
@@ -110,7 +120,7 @@ class EnhancedVideoQueueManager:
cleanup_interval: int = 3600, # 1 hour cleanup_interval: int = 3600, # 1 hour
max_history_age: int = 86400, # 24 hours max_history_age: int = 86400, # 24 hours
persistence_path: Optional[str] = None, persistence_path: Optional[str] = None,
backup_interval: int = 300 # 5 minutes backup_interval: int = 300, # 5 minutes
): ):
self.max_retries = max_retries self.max_retries = max_retries
self.retry_delay = retry_delay self.retry_delay = retry_delay
@@ -171,7 +181,7 @@ class EnhancedVideoQueueManager:
guild_id: int, guild_id: int,
author_id: int, author_id: int,
callback: Callable[[str, bool, str], Any], callback: Callable[[str, bool, str], Any],
priority: int = 0 priority: int = 0,
) -> bool: ) -> bool:
"""Add a video to the processing queue with priority support""" """Add a video to the processing queue with priority support"""
try: try:
@@ -191,7 +201,7 @@ class EnhancedVideoQueueManager:
guild_id=guild_id, guild_id=guild_id,
author_id=author_id, author_id=author_id,
added_at=datetime.utcnow(), added_at=datetime.utcnow(),
priority=priority priority=priority,
) )
# Add to tracking collections # Add to tracking collections
@@ -218,7 +228,9 @@ class EnhancedVideoQueueManager:
logger.error(f"Error adding video to queue: {traceback.format_exc()}") logger.error(f"Error adding video to queue: {traceback.format_exc()}")
raise QueueError(f"Failed to add to queue: {str(e)}") raise QueueError(f"Failed to add to queue: {str(e)}")
async def process_queue(self, processor: Callable[[QueueItem], Tuple[bool, Optional[str]]]): async def process_queue(
self, processor: Callable[[QueueItem], Tuple[bool, Optional[str]]]
):
"""Process items in the queue with the provided processor function """Process items in the queue with the provided processor function
Args: Args:
@@ -266,15 +278,21 @@ class EnhancedVideoQueueManager:
item.status = "pending" item.status = "pending"
item.last_retry = datetime.utcnow() item.last_retry = datetime.utcnow()
self._queue.append(item) self._queue.append(item)
logger.warning(f"Retrying item: {item.url} (attempt {item.retry_count})") logger.warning(
f"Retrying item: {item.url} (attempt {item.retry_count})"
)
else: else:
self._failed[item.url] = item self._failed[item.url] = item
logger.error(f"Failed to process item after {self.max_retries} attempts: {item.url}") logger.error(
f"Failed to process item after {self.max_retries} attempts: {item.url}"
)
self._processing.pop(item.url, None) self._processing.pop(item.url, None)
except Exception as e: except Exception as e:
logger.error(f"Error processing item {item.url}: {traceback.format_exc()}") logger.error(
f"Error processing item {item.url}: {traceback.format_exc()}"
)
async with self._processing_lock: async with self._processing_lock:
item.status = "failed" item.status = "failed"
item.error = str(e) item.error = str(e)
@@ -300,7 +318,8 @@ class EnhancedVideoQueueManager:
try: try:
if self.persistence_path and ( if self.persistence_path and (
not self._last_backup not self._last_backup
or (datetime.utcnow() - self._last_backup).total_seconds() >= self.backup_interval or (datetime.utcnow() - self._last_backup).total_seconds()
>= self.backup_interval
): ):
await self._persist_queue() await self._persist_queue()
self._last_backup = datetime.utcnow() self._last_backup = datetime.utcnow()
@@ -327,8 +346,12 @@ class EnhancedVideoQueueManager:
"success_rate": self.metrics.success_rate, "success_rate": self.metrics.success_rate,
"errors_by_type": self.metrics.errors_by_type, "errors_by_type": self.metrics.errors_by_type,
"last_error": self.metrics.last_error, "last_error": self.metrics.last_error,
"last_error_time": self.metrics.last_error_time.isoformat() if self.metrics.last_error_time else None "last_error_time": (
} self.metrics.last_error_time.isoformat()
if self.metrics.last_error_time
else None
),
},
} }
# Ensure directory exists # Ensure directory exists
@@ -336,13 +359,13 @@ class EnhancedVideoQueueManager:
# Write to temp file first # Write to temp file first
temp_path = f"{self.persistence_path}.tmp" temp_path = f"{self.persistence_path}.tmp"
async with aiofiles.open(temp_path, 'w') as f: with open(temp_path, "w") as f:
await f.write(json.dumps(state, default=str)) json.dump(state, f, default=str)
await f.flush() f.flush()
os.fsync(f.fileno()) os.fsync(f.fileno())
# Atomic rename # Atomic rename
await aiofiles.os.rename(temp_path, self.persistence_path) os.rename(temp_path, self.persistence_path)
except Exception as e: except Exception as e:
logger.error(f"Error persisting queue state: {traceback.format_exc()}") logger.error(f"Error persisting queue state: {traceback.format_exc()}")
@@ -354,7 +377,7 @@ class EnhancedVideoQueueManager:
return return
try: try:
with open(self.persistence_path, 'r') as f: with open(self.persistence_path, "r") as f:
state = json.load(f) state = json.load(f)
# Restore queue items with datetime conversion # Restore queue items with datetime conversion
@@ -365,7 +388,9 @@ class EnhancedVideoQueueManager:
item["last_retry"] = datetime.fromisoformat(item["last_retry"]) item["last_retry"] = datetime.fromisoformat(item["last_retry"])
self._queue.append(QueueItem(**item)) self._queue.append(QueueItem(**item))
self._processing = {k: QueueItem(**v) for k, v in state["processing"].items()} self._processing = {
k: QueueItem(**v) for k, v in state["processing"].items()
}
self._completed = {k: QueueItem(**v) for k, v in state["completed"].items()} self._completed = {k: QueueItem(**v) for k, v in state["completed"].items()}
self._failed = {k: QueueItem(**v) for k, v in state["failed"].items()} self._failed = {k: QueueItem(**v) for k, v in state["failed"].items()}
@@ -377,20 +402,28 @@ class EnhancedVideoQueueManager:
self.metrics.errors_by_type = state["metrics"]["errors_by_type"] self.metrics.errors_by_type = state["metrics"]["errors_by_type"]
self.metrics.last_error = state["metrics"]["last_error"] self.metrics.last_error = state["metrics"]["last_error"]
if state["metrics"]["last_error_time"]: if state["metrics"]["last_error_time"]:
self.metrics.last_error_time = datetime.fromisoformat(state["metrics"]["last_error_time"]) self.metrics.last_error_time = datetime.fromisoformat(
state["metrics"]["last_error_time"]
)
logger.info("Successfully loaded persisted queue state") logger.info("Successfully loaded persisted queue state")
except Exception as e: except Exception as e:
logger.error(f"Error loading persisted queue state: {traceback.format_exc()}") logger.error(
f"Error loading persisted queue state: {traceback.format_exc()}"
)
# Create backup of corrupted state file # Create backup of corrupted state file
if os.path.exists(self.persistence_path): if os.path.exists(self.persistence_path):
backup_path = f"{self.persistence_path}.bak.{int(time.time())}" backup_path = f"{self.persistence_path}.bak.{int(time.time())}"
try: try:
os.rename(self.persistence_path, backup_path) os.rename(self.persistence_path, backup_path)
logger.info(f"Created backup of corrupted state file: {backup_path}") logger.info(
f"Created backup of corrupted state file: {backup_path}"
)
except Exception as be: except Exception as be:
logger.error(f"Failed to create backup of corrupted state file: {str(be)}") logger.error(
f"Failed to create backup of corrupted state file: {str(be)}"
)
async def _monitor_health(self): async def _monitor_health(self):
"""Monitor queue health and performance with improved metrics""" """Monitor queue health and performance with improved metrics"""
@@ -404,6 +437,7 @@ class EnhancedVideoQueueManager:
logger.warning(f"High memory usage detected: {memory_usage:.2f}MB") logger.warning(f"High memory usage detected: {memory_usage:.2f}MB")
# Force garbage collection # Force garbage collection
import gc import gc
gc.collect() gc.collect()
# Check for potential deadlocks # Check for potential deadlocks
@@ -416,7 +450,9 @@ class EnhancedVideoQueueManager:
if processing_times: if processing_times:
max_time = max(processing_times) max_time = max(processing_times)
if max_time > 3600: # 1 hour if max_time > 3600: # 1 hour
logger.warning(f"Potential deadlock detected: Item processing for {max_time:.2f}s") logger.warning(
f"Potential deadlock detected: Item processing for {max_time:.2f}s"
)
# Attempt recovery # Attempt recovery
await self._recover_stuck_items() await self._recover_stuck_items()
@@ -447,7 +483,10 @@ class EnhancedVideoQueueManager:
async with self._processing_lock: async with self._processing_lock:
current_time = time.time() current_time = time.time()
for url, item in list(self._processing.items()): for url, item in list(self._processing.items()):
if item.processing_time > 0 and (current_time - item.processing_time) > 3600: if (
item.processing_time > 0
and (current_time - item.processing_time) > 3600
):
# Move to failed queue if max retries reached # Move to failed queue if max retries reached
if item.retry_count >= self.max_retries: if item.retry_count >= self.max_retries:
self._failed[url] = item self._failed[url] = item
@@ -501,20 +540,23 @@ class EnhancedVideoQueueManager:
guild_urls = self._guild_queues.get(guild_id, set()) guild_urls = self._guild_queues.get(guild_id, set())
status = { status = {
"pending": sum(1 for item in self._queue if item.url in guild_urls), "pending": sum(1 for item in self._queue if item.url in guild_urls),
"processing": sum(1 for url in self._processing if url in guild_urls), "processing": sum(
1 for url in self._processing if url in guild_urls
),
"completed": sum(1 for url in self._completed if url in guild_urls), "completed": sum(1 for url in self._completed if url in guild_urls),
"failed": sum(1 for url in self._failed if url in guild_urls) "failed": sum(1 for url in self._failed if url in guild_urls),
} }
else: else:
status = { status = {
"pending": len(self._queue), "pending": len(self._queue),
"processing": len(self._processing), "processing": len(self._processing),
"completed": len(self._completed), "completed": len(self._completed),
"failed": len(self._failed) "failed": len(self._failed),
} }
# Add detailed metrics # Add detailed metrics
status.update({ status.update(
{
"metrics": { "metrics": {
"total_processed": self.metrics.total_processed, "total_processed": self.metrics.total_processed,
"total_failed": self.metrics.total_failed, "total_failed": self.metrics.total_failed,
@@ -524,10 +566,15 @@ class EnhancedVideoQueueManager:
"last_cleanup": self.metrics.last_cleanup.isoformat(), "last_cleanup": self.metrics.last_cleanup.isoformat(),
"errors_by_type": self.metrics.errors_by_type, "errors_by_type": self.metrics.errors_by_type,
"last_error": self.metrics.last_error, "last_error": self.metrics.last_error,
"last_error_time": self.metrics.last_error_time.isoformat() if self.metrics.last_error_time else None, "last_error_time": (
"retries": self.metrics.retries self.metrics.last_error_time.isoformat()
if self.metrics.last_error_time
else None
),
"retries": self.metrics.retries,
} }
}) }
)
return status return status
@@ -558,13 +605,15 @@ class EnhancedVideoQueueManager:
# Clean up guild and channel tracking # Clean up guild and channel tracking
for guild_id in list(self._guild_queues.keys()): for guild_id in list(self._guild_queues.keys()):
self._guild_queues[guild_id] = { self._guild_queues[guild_id] = {
url for url in self._guild_queues[guild_id] url
for url in self._guild_queues[guild_id]
if url in self._queue or url in self._processing if url in self._queue or url in self._processing
} }
for channel_id in list(self._channel_queues.keys()): for channel_id in list(self._channel_queues.keys()):
self._channel_queues[channel_id] = { self._channel_queues[channel_id] = {
url for url in self._channel_queues[channel_id] url
for url in self._channel_queues[channel_id]
if url in self._queue or url in self._processing if url in self._queue or url in self._processing
} }