fix imports

This commit is contained in:
pacnpal
2024-11-15 00:02:09 +00:00
parent 2160be84bb
commit fc40e994fe

View File

@@ -4,17 +4,12 @@ import logging
import json
import os
import time
import psutil
from typing import Dict, Optional, Set, Tuple, Callable, Any, List, Union
from datetime import datetime, timedelta
import traceback
from dataclasses import dataclass, asdict, field
import weakref
from pathlib import Path
import aiofiles
import aiofiles.os
import sys
import signal
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import tempfile
@@ -41,6 +36,11 @@ class QueueMetrics:
total_failed: int = 0
avg_processing_time: float = 0.0
success_rate: float = 0.0
errors_by_type: Dict[str, int] = field(default_factory=dict)
last_error: Optional[str] = None
last_error_time: Optional[datetime] = None
last_cleanup: datetime = field(default_factory=datetime.utcnow)
retries: int = 0
peak_memory_usage: float = 0.0
errors_by_type: Dict[str, int] = field(default_factory=dict)
last_error: Optional[str] = None
@@ -57,8 +57,10 @@ class QueueMetrics:
if error:
self.last_error = error
self.last_error_time = datetime.utcnow()
error_type = error.split(':')[0] if ':' in error else error
self.errors_by_type[error_type] = self.errors_by_type.get(error_type, 0) + 1
error_type = error.split(":")[0] if ":" in error else error
self.errors_by_type[error_type] = (
self.errors_by_type.get(error_type, 0) + 1
)
# Update processing times with sliding window
self.processing_times.append(processing_time)
@@ -66,21 +68,28 @@ class QueueMetrics:
self.processing_times.pop(0)
# Update average processing time
self.avg_processing_time = sum(self.processing_times) / len(self.processing_times) if self.processing_times else 0.0
self.avg_processing_time = (
sum(self.processing_times) / len(self.processing_times)
if self.processing_times
else 0.0
)
# Update success rate
self.success_rate = (
(self.total_processed - self.total_failed) / self.total_processed
if self.total_processed > 0 else 0.0
if self.total_processed > 0
else 0.0
)
# Update peak memory usage
current_memory = psutil.Process().memory_info().rss / 1024 / 1024 # MB
self.peak_memory_usage = max(self.peak_memory_usage, current_memory)
@dataclass
class QueueItem:
"""Represents a video processing task in the queue"""
url: str
message_id: int
channel_id: int
@@ -99,6 +108,7 @@ class QueueItem:
processing_times: List[float] = field(default_factory=list)
last_error_time: Optional[datetime] = None
class EnhancedVideoQueueManager:
"""Enhanced queue manager with improved memory management and performance"""
@@ -110,7 +120,7 @@ class EnhancedVideoQueueManager:
cleanup_interval: int = 3600, # 1 hour
max_history_age: int = 86400, # 24 hours
persistence_path: Optional[str] = None,
backup_interval: int = 300 # 5 minutes
backup_interval: int = 300, # 5 minutes
):
self.max_retries = max_retries
self.retry_delay = retry_delay
@@ -171,7 +181,7 @@ class EnhancedVideoQueueManager:
guild_id: int,
author_id: int,
callback: Callable[[str, bool, str], Any],
priority: int = 0
priority: int = 0,
) -> bool:
"""Add a video to the processing queue with priority support"""
try:
@@ -191,7 +201,7 @@ class EnhancedVideoQueueManager:
guild_id=guild_id,
author_id=author_id,
added_at=datetime.utcnow(),
priority=priority
priority=priority,
)
# Add to tracking collections
@@ -218,7 +228,9 @@ class EnhancedVideoQueueManager:
logger.error(f"Error adding video to queue: {traceback.format_exc()}")
raise QueueError(f"Failed to add to queue: {str(e)}")
async def process_queue(self, processor: Callable[[QueueItem], Tuple[bool, Optional[str]]]):
async def process_queue(
self, processor: Callable[[QueueItem], Tuple[bool, Optional[str]]]
):
"""Process items in the queue with the provided processor function
Args:
@@ -266,15 +278,21 @@ class EnhancedVideoQueueManager:
item.status = "pending"
item.last_retry = datetime.utcnow()
self._queue.append(item)
logger.warning(f"Retrying item: {item.url} (attempt {item.retry_count})")
logger.warning(
f"Retrying item: {item.url} (attempt {item.retry_count})"
)
else:
self._failed[item.url] = item
logger.error(f"Failed to process item after {self.max_retries} attempts: {item.url}")
logger.error(
f"Failed to process item after {self.max_retries} attempts: {item.url}"
)
self._processing.pop(item.url, None)
except Exception as e:
logger.error(f"Error processing item {item.url}: {traceback.format_exc()}")
logger.error(
f"Error processing item {item.url}: {traceback.format_exc()}"
)
async with self._processing_lock:
item.status = "failed"
item.error = str(e)
@@ -300,7 +318,8 @@ class EnhancedVideoQueueManager:
try:
if self.persistence_path and (
not self._last_backup
or (datetime.utcnow() - self._last_backup).total_seconds() >= self.backup_interval
or (datetime.utcnow() - self._last_backup).total_seconds()
>= self.backup_interval
):
await self._persist_queue()
self._last_backup = datetime.utcnow()
@@ -327,8 +346,12 @@ class EnhancedVideoQueueManager:
"success_rate": self.metrics.success_rate,
"errors_by_type": self.metrics.errors_by_type,
"last_error": self.metrics.last_error,
"last_error_time": self.metrics.last_error_time.isoformat() if self.metrics.last_error_time else None
}
"last_error_time": (
self.metrics.last_error_time.isoformat()
if self.metrics.last_error_time
else None
),
},
}
# Ensure directory exists
@@ -336,13 +359,13 @@ class EnhancedVideoQueueManager:
# Write to temp file first
temp_path = f"{self.persistence_path}.tmp"
async with aiofiles.open(temp_path, 'w') as f:
await f.write(json.dumps(state, default=str))
await f.flush()
with open(temp_path, "w") as f:
json.dump(state, f, default=str)
f.flush()
os.fsync(f.fileno())
# Atomic rename
await aiofiles.os.rename(temp_path, self.persistence_path)
os.rename(temp_path, self.persistence_path)
except Exception as e:
logger.error(f"Error persisting queue state: {traceback.format_exc()}")
@@ -354,7 +377,7 @@ class EnhancedVideoQueueManager:
return
try:
with open(self.persistence_path, 'r') as f:
with open(self.persistence_path, "r") as f:
state = json.load(f)
# Restore queue items with datetime conversion
@@ -365,7 +388,9 @@ class EnhancedVideoQueueManager:
item["last_retry"] = datetime.fromisoformat(item["last_retry"])
self._queue.append(QueueItem(**item))
self._processing = {k: QueueItem(**v) for k, v in state["processing"].items()}
self._processing = {
k: QueueItem(**v) for k, v in state["processing"].items()
}
self._completed = {k: QueueItem(**v) for k, v in state["completed"].items()}
self._failed = {k: QueueItem(**v) for k, v in state["failed"].items()}
@@ -377,20 +402,28 @@ class EnhancedVideoQueueManager:
self.metrics.errors_by_type = state["metrics"]["errors_by_type"]
self.metrics.last_error = state["metrics"]["last_error"]
if state["metrics"]["last_error_time"]:
self.metrics.last_error_time = datetime.fromisoformat(state["metrics"]["last_error_time"])
self.metrics.last_error_time = datetime.fromisoformat(
state["metrics"]["last_error_time"]
)
logger.info("Successfully loaded persisted queue state")
except Exception as e:
logger.error(f"Error loading persisted queue state: {traceback.format_exc()}")
logger.error(
f"Error loading persisted queue state: {traceback.format_exc()}"
)
# Create backup of corrupted state file
if os.path.exists(self.persistence_path):
backup_path = f"{self.persistence_path}.bak.{int(time.time())}"
try:
os.rename(self.persistence_path, backup_path)
logger.info(f"Created backup of corrupted state file: {backup_path}")
logger.info(
f"Created backup of corrupted state file: {backup_path}"
)
except Exception as be:
logger.error(f"Failed to create backup of corrupted state file: {str(be)}")
logger.error(
f"Failed to create backup of corrupted state file: {str(be)}"
)
async def _monitor_health(self):
"""Monitor queue health and performance with improved metrics"""
@@ -404,6 +437,7 @@ class EnhancedVideoQueueManager:
logger.warning(f"High memory usage detected: {memory_usage:.2f}MB")
# Force garbage collection
import gc
gc.collect()
# Check for potential deadlocks
@@ -416,7 +450,9 @@ class EnhancedVideoQueueManager:
if processing_times:
max_time = max(processing_times)
if max_time > 3600: # 1 hour
logger.warning(f"Potential deadlock detected: Item processing for {max_time:.2f}s")
logger.warning(
f"Potential deadlock detected: Item processing for {max_time:.2f}s"
)
# Attempt recovery
await self._recover_stuck_items()
@@ -447,7 +483,10 @@ class EnhancedVideoQueueManager:
async with self._processing_lock:
current_time = time.time()
for url, item in list(self._processing.items()):
if item.processing_time > 0 and (current_time - item.processing_time) > 3600:
if (
item.processing_time > 0
and (current_time - item.processing_time) > 3600
):
# Move to failed queue if max retries reached
if item.retry_count >= self.max_retries:
self._failed[url] = item
@@ -501,33 +540,41 @@ class EnhancedVideoQueueManager:
guild_urls = self._guild_queues.get(guild_id, set())
status = {
"pending": sum(1 for item in self._queue if item.url in guild_urls),
"processing": sum(1 for url in self._processing if url in guild_urls),
"processing": sum(
1 for url in self._processing if url in guild_urls
),
"completed": sum(1 for url in self._completed if url in guild_urls),
"failed": sum(1 for url in self._failed if url in guild_urls)
"failed": sum(1 for url in self._failed if url in guild_urls),
}
else:
status = {
"pending": len(self._queue),
"processing": len(self._processing),
"completed": len(self._completed),
"failed": len(self._failed)
"failed": len(self._failed),
}
# Add detailed metrics
status.update({
"metrics": {
"total_processed": self.metrics.total_processed,
"total_failed": self.metrics.total_failed,
"success_rate": self.metrics.success_rate,
"avg_processing_time": self.metrics.avg_processing_time,
"peak_memory_usage": self.metrics.peak_memory_usage,
"last_cleanup": self.metrics.last_cleanup.isoformat(),
"errors_by_type": self.metrics.errors_by_type,
"last_error": self.metrics.last_error,
"last_error_time": self.metrics.last_error_time.isoformat() if self.metrics.last_error_time else None,
"retries": self.metrics.retries
status.update(
{
"metrics": {
"total_processed": self.metrics.total_processed,
"total_failed": self.metrics.total_failed,
"success_rate": self.metrics.success_rate,
"avg_processing_time": self.metrics.avg_processing_time,
"peak_memory_usage": self.metrics.peak_memory_usage,
"last_cleanup": self.metrics.last_cleanup.isoformat(),
"errors_by_type": self.metrics.errors_by_type,
"last_error": self.metrics.last_error,
"last_error_time": (
self.metrics.last_error_time.isoformat()
if self.metrics.last_error_time
else None
),
"retries": self.metrics.retries,
}
}
})
)
return status
@@ -558,13 +605,15 @@ class EnhancedVideoQueueManager:
# Clean up guild and channel tracking
for guild_id in list(self._guild_queues.keys()):
self._guild_queues[guild_id] = {
url for url in self._guild_queues[guild_id]
url
for url in self._guild_queues[guild_id]
if url in self._queue or url in self._processing
}
for channel_id in list(self._channel_queues.keys()):
self._channel_queues[channel_id] = {
url for url in self._channel_queues[channel_id]
url
for url in self._channel_queues[channel_id]
if url in self._queue or url in self._processing
}