mirror of
https://github.com/pacnpal/Pac-cogs.git
synced 2025-12-20 10:51:05 -05:00
Adding robust datetime handling in _periodic_cleanup:
Added try-except blocks around individual item processing Added proper string to datetime conversion with fallbacks Added cleanup of problematic items that can't be processed Improved error handling: Added specific error logging for individual item failures Added graceful removal of problematic items Ensured cleanup continues even if some items fail Added fallback mechanisms: Uses current_time as fallback for invalid datetime values Removes items that can't be properly processed Prevents single item failures from breaking the entire cleanup process
This commit is contained in:
@@ -515,31 +515,95 @@ class EnhancedVideoQueueManager:
|
|||||||
with open(self.persistence_path, "r") as f:
|
with open(self.persistence_path, "r") as f:
|
||||||
state = json.load(f)
|
state = json.load(f)
|
||||||
|
|
||||||
# Restore queue items with proper datetime conversion
|
# Helper function to safely convert items
|
||||||
self._queue = [QueueItem.from_dict(item) for item in state["queue"]]
|
def safe_convert_item(item_data):
|
||||||
self._processing = {k: QueueItem.from_dict(v) for k, v in state["processing"].items()}
|
try:
|
||||||
self._completed = {k: QueueItem.from_dict(v) for k, v in state["completed"].items()}
|
if isinstance(item_data, dict):
|
||||||
self._failed = {k: QueueItem.from_dict(v) for k, v in state["failed"].items()}
|
# Ensure datetime fields are properly formatted
|
||||||
|
if 'added_at' in item_data and item_data['added_at']:
|
||||||
|
if isinstance(item_data['added_at'], str):
|
||||||
|
try:
|
||||||
|
item_data['added_at'] = datetime.fromisoformat(item_data['added_at'])
|
||||||
|
except ValueError:
|
||||||
|
item_data['added_at'] = datetime.utcnow()
|
||||||
|
elif not isinstance(item_data['added_at'], datetime):
|
||||||
|
item_data['added_at'] = datetime.utcnow()
|
||||||
|
|
||||||
# Restore metrics
|
if 'last_retry' in item_data and item_data['last_retry']:
|
||||||
metrics_data = state["metrics"]
|
if isinstance(item_data['last_retry'], str):
|
||||||
self.metrics.total_processed = metrics_data["total_processed"]
|
try:
|
||||||
self.metrics.total_failed = metrics_data["total_failed"]
|
item_data['last_retry'] = datetime.fromisoformat(item_data['last_retry'])
|
||||||
self.metrics.avg_processing_time = metrics_data["avg_processing_time"]
|
except ValueError:
|
||||||
self.metrics.success_rate = metrics_data["success_rate"]
|
item_data['last_retry'] = None
|
||||||
self.metrics.errors_by_type = metrics_data["errors_by_type"]
|
elif not isinstance(item_data['last_retry'], datetime):
|
||||||
self.metrics.last_error = metrics_data["last_error"]
|
item_data['last_retry'] = None
|
||||||
self.metrics.compression_failures = metrics_data.get(
|
|
||||||
"compression_failures", 0
|
|
||||||
)
|
|
||||||
self.metrics.hardware_accel_failures = metrics_data.get(
|
|
||||||
"hardware_accel_failures", 0
|
|
||||||
)
|
|
||||||
|
|
||||||
if metrics_data["last_error_time"]:
|
if 'last_error_time' in item_data and item_data['last_error_time']:
|
||||||
self.metrics.last_error_time = datetime.fromisoformat(
|
if isinstance(item_data['last_error_time'], str):
|
||||||
metrics_data["last_error_time"]
|
try:
|
||||||
)
|
item_data['last_error_time'] = datetime.fromisoformat(item_data['last_error_time'])
|
||||||
|
except ValueError:
|
||||||
|
item_data['last_error_time'] = None
|
||||||
|
elif not isinstance(item_data['last_error_time'], datetime):
|
||||||
|
item_data['last_error_time'] = None
|
||||||
|
|
||||||
|
return QueueItem(**item_data)
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error converting queue item: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Restore queue items with proper conversion
|
||||||
|
self._queue = []
|
||||||
|
for item in state.get("queue", []):
|
||||||
|
converted_item = safe_convert_item(item)
|
||||||
|
if converted_item:
|
||||||
|
self._queue.append(converted_item)
|
||||||
|
|
||||||
|
# Restore processing items
|
||||||
|
self._processing = {}
|
||||||
|
for k, v in state.get("processing", {}).items():
|
||||||
|
converted_item = safe_convert_item(v)
|
||||||
|
if converted_item:
|
||||||
|
self._processing[k] = converted_item
|
||||||
|
|
||||||
|
# Restore completed items
|
||||||
|
self._completed = {}
|
||||||
|
for k, v in state.get("completed", {}).items():
|
||||||
|
converted_item = safe_convert_item(v)
|
||||||
|
if converted_item:
|
||||||
|
self._completed[k] = converted_item
|
||||||
|
|
||||||
|
# Restore failed items
|
||||||
|
self._failed = {}
|
||||||
|
for k, v in state.get("failed", {}).items():
|
||||||
|
converted_item = safe_convert_item(v)
|
||||||
|
if converted_item:
|
||||||
|
self._failed[k] = converted_item
|
||||||
|
|
||||||
|
# Restore metrics with proper datetime handling
|
||||||
|
metrics_data = state.get("metrics", {})
|
||||||
|
self.metrics.total_processed = metrics_data.get("total_processed", 0)
|
||||||
|
self.metrics.total_failed = metrics_data.get("total_failed", 0)
|
||||||
|
self.metrics.avg_processing_time = metrics_data.get("avg_processing_time", 0.0)
|
||||||
|
self.metrics.success_rate = metrics_data.get("success_rate", 0.0)
|
||||||
|
self.metrics.errors_by_type = metrics_data.get("errors_by_type", {})
|
||||||
|
self.metrics.last_error = metrics_data.get("last_error")
|
||||||
|
self.metrics.compression_failures = metrics_data.get("compression_failures", 0)
|
||||||
|
self.metrics.hardware_accel_failures = metrics_data.get("hardware_accel_failures", 0)
|
||||||
|
|
||||||
|
# Handle metrics datetime fields
|
||||||
|
last_error_time = metrics_data.get("last_error_time")
|
||||||
|
if last_error_time:
|
||||||
|
try:
|
||||||
|
if isinstance(last_error_time, str):
|
||||||
|
self.metrics.last_error_time = datetime.fromisoformat(last_error_time)
|
||||||
|
elif isinstance(last_error_time, datetime):
|
||||||
|
self.metrics.last_error_time = last_error_time
|
||||||
|
else:
|
||||||
|
self.metrics.last_error_time = None
|
||||||
|
except ValueError:
|
||||||
|
self.metrics.last_error_time = None
|
||||||
|
|
||||||
logger.info("Successfully loaded persisted queue state")
|
logger.info("Successfully loaded persisted queue state")
|
||||||
|
|
||||||
@@ -655,19 +719,43 @@ class EnhancedVideoQueueManager:
|
|||||||
# Clean up completed items
|
# Clean up completed items
|
||||||
for url in list(self._completed.keys()):
|
for url in list(self._completed.keys()):
|
||||||
item = self._completed[url]
|
item = self._completed[url]
|
||||||
# Ensure added_at is a datetime object
|
try:
|
||||||
if isinstance(item.added_at, str):
|
# Ensure added_at is a datetime object
|
||||||
item.added_at = datetime.fromisoformat(item.added_at)
|
if isinstance(item.added_at, str):
|
||||||
if item.added_at < cleanup_cutoff:
|
try:
|
||||||
|
item.added_at = datetime.fromisoformat(item.added_at)
|
||||||
|
except ValueError:
|
||||||
|
# If conversion fails, use current time to ensure item gets cleaned up
|
||||||
|
item.added_at = current_time
|
||||||
|
elif not isinstance(item.added_at, datetime):
|
||||||
|
item.added_at = current_time
|
||||||
|
|
||||||
|
if item.added_at < cleanup_cutoff:
|
||||||
|
self._completed.pop(url)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing completed item {url}: {e}")
|
||||||
|
# Remove problematic item
|
||||||
self._completed.pop(url)
|
self._completed.pop(url)
|
||||||
|
|
||||||
# Clean up failed items
|
# Clean up failed items
|
||||||
for url in list(self._failed.keys()):
|
for url in list(self._failed.keys()):
|
||||||
item = self._failed[url]
|
item = self._failed[url]
|
||||||
# Ensure added_at is a datetime object
|
try:
|
||||||
if isinstance(item.added_at, str):
|
# Ensure added_at is a datetime object
|
||||||
item.added_at = datetime.fromisoformat(item.added_at)
|
if isinstance(item.added_at, str):
|
||||||
if item.added_at < cleanup_cutoff:
|
try:
|
||||||
|
item.added_at = datetime.fromisoformat(item.added_at)
|
||||||
|
except ValueError:
|
||||||
|
# If conversion fails, use current time to ensure item gets cleaned up
|
||||||
|
item.added_at = current_time
|
||||||
|
elif not isinstance(item.added_at, datetime):
|
||||||
|
item.added_at = current_time
|
||||||
|
|
||||||
|
if item.added_at < cleanup_cutoff:
|
||||||
|
self._failed.pop(url)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing failed item {url}: {e}")
|
||||||
|
# Remove problematic item
|
||||||
self._failed.pop(url)
|
self._failed.pop(url)
|
||||||
|
|
||||||
# Clean up guild and channel tracking
|
# Clean up guild and channel tracking
|
||||||
|
|||||||
Reference in New Issue
Block a user