Converted all 'from videoarchiver.' imports to relative imports

This commit is contained in:
pacnpal
2024-11-17 16:05:12 +00:00
parent c830be2841
commit 62c97f0b01
69 changed files with 417 additions and 359 deletions

View File

@@ -8,7 +8,8 @@ import fcntl
import asyncio
from datetime import datetime, timedelta
from typing import Dict, Any, Optional
from videoarchiver.queue.models import QueueItem, QueueMetrics
from .models import QueueItem, QueueMetrics
# Configure logging
logging.basicConfig(
@@ -16,6 +17,7 @@ logging.basicConfig(
)
logger = logging.getLogger("QueuePersistence")
class QueuePersistenceManager:
"""Manages persistence of queue state to disk"""
@@ -25,10 +27,10 @@ class QueuePersistenceManager:
max_retries: int = 3,
retry_delay: int = 1,
backup_interval: int = 3600, # 1 hour
max_backups: int = 24 # Keep last 24 backups
max_backups: int = 24, # Keep last 24 backups
):
"""Initialize the persistence manager
Args:
persistence_path: Path to the persistence file
max_retries: Maximum number of retries for file operations
@@ -50,17 +52,17 @@ class QueuePersistenceManager:
processing: Dict[str, QueueItem],
completed: Dict[str, QueueItem],
failed: Dict[str, QueueItem],
metrics: QueueMetrics
metrics: QueueMetrics,
) -> None:
"""Persist queue state to disk with improved error handling
Args:
queue: List of pending queue items
processing: Dict of items currently being processed
completed: Dict of completed items
failed: Dict of failed items
metrics: Queue metrics object
Raises:
QueueError: If persistence fails
"""
@@ -87,14 +89,14 @@ class QueuePersistenceManager:
"compression_failures": metrics.compression_failures,
"hardware_accel_failures": metrics.hardware_accel_failures,
},
"timestamp": datetime.utcnow().isoformat()
"timestamp": datetime.utcnow().isoformat(),
}
# Ensure directory exists
os.makedirs(os.path.dirname(self.persistence_path), exist_ok=True)
# Acquire file lock
lock_fd = open(self._lock_file, 'w')
lock_fd = open(self._lock_file, "w")
fcntl.flock(lock_fd.fileno(), fcntl.LOCK_EX)
# Write with retries
@@ -120,7 +122,9 @@ class QueuePersistenceManager:
except Exception as e:
if attempt == self.max_retries - 1:
raise
logger.warning(f"Retry {attempt + 1}/{self.max_retries} failed: {e}")
logger.warning(
f"Retry {attempt + 1}/{self.max_retries} failed: {e}"
)
await asyncio.sleep(self.retry_delay)
except Exception as e:
@@ -140,18 +144,25 @@ class QueuePersistenceManager:
# Create backup
timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
backup_path = f"{self.persistence_path}.bak.{timestamp}"
with open(self.persistence_path, "rb") as src, open(backup_path, "wb") as dst:
with open(self.persistence_path, "rb") as src, open(
backup_path, "wb"
) as dst:
dst.write(src.read())
dst.flush()
os.fsync(dst.fileno())
# Clean old backups
backup_files = sorted([
f for f in os.listdir(os.path.dirname(self.persistence_path))
if f.startswith(os.path.basename(self.persistence_path) + ".bak.")
])
backup_files = sorted(
[
f
for f in os.listdir(os.path.dirname(self.persistence_path))
if f.startswith(os.path.basename(self.persistence_path) + ".bak.")
]
)
while len(backup_files) > self.max_backups:
old_backup = os.path.join(os.path.dirname(self.persistence_path), backup_files.pop(0))
old_backup = os.path.join(
os.path.dirname(self.persistence_path), backup_files.pop(0)
)
try:
os.remove(old_backup)
except Exception as e:
@@ -162,10 +173,10 @@ class QueuePersistenceManager:
def load_queue_state(self) -> Optional[Dict[str, Any]]:
"""Load persisted queue state from disk with retries
Returns:
Dict containing queue state if successful, None if file doesn't exist
Raises:
QueueError: If loading fails
"""
@@ -175,7 +186,7 @@ class QueuePersistenceManager:
lock_fd = None
try:
# Acquire file lock
lock_fd = open(self._lock_file, 'w')
lock_fd = open(self._lock_file, "w")
fcntl.flock(lock_fd.fileno(), fcntl.LOCK_EX)
# Try loading main file
@@ -188,18 +199,28 @@ class QueuePersistenceManager:
break
except Exception as e:
last_error = e
logger.warning(f"Retry {attempt + 1}/{self.max_retries} failed: {e}")
logger.warning(
f"Retry {attempt + 1}/{self.max_retries} failed: {e}"
)
time.sleep(self.retry_delay)
# If main file failed, try loading latest backup
if state is None:
backup_files = sorted([
f for f in os.listdir(os.path.dirname(self.persistence_path))
if f.startswith(os.path.basename(self.persistence_path) + ".bak.")
], reverse=True)
backup_files = sorted(
[
f
for f in os.listdir(os.path.dirname(self.persistence_path))
if f.startswith(
os.path.basename(self.persistence_path) + ".bak."
)
],
reverse=True,
)
if backup_files:
latest_backup = os.path.join(os.path.dirname(self.persistence_path), backup_files[0])
latest_backup = os.path.join(
os.path.dirname(self.persistence_path), backup_files[0]
)
try:
with open(latest_backup, "r") as f:
state = json.load(f)
@@ -207,7 +228,9 @@ class QueuePersistenceManager:
except Exception as e:
logger.error(f"Failed to load backup: {e}")
if last_error:
raise QueueError(f"Failed to load queue state: {last_error}")
raise QueueError(
f"Failed to load queue state: {last_error}"
)
raise
if state is None:
@@ -218,22 +241,34 @@ class QueuePersistenceManager:
try:
if isinstance(item_data, dict):
# Ensure datetime fields are properly formatted
for field in ['added_at', 'last_retry', 'last_error_time']:
for field in ["added_at", "last_retry", "last_error_time"]:
if field in item_data and item_data[field]:
if isinstance(item_data[field], str):
try:
item_data[field] = datetime.fromisoformat(item_data[field])
item_data[field] = datetime.fromisoformat(
item_data[field]
)
except ValueError:
item_data[field] = datetime.utcnow() if field == 'added_at' else None
item_data[field] = (
datetime.utcnow()
if field == "added_at"
else None
)
elif not isinstance(item_data[field], datetime):
item_data[field] = datetime.utcnow() if field == 'added_at' else None
item_data[field] = (
datetime.utcnow()
if field == "added_at"
else None
)
# Ensure processing_time is a float
if 'processing_time' in item_data:
if "processing_time" in item_data:
try:
item_data['processing_time'] = float(item_data['processing_time'])
item_data["processing_time"] = float(
item_data["processing_time"]
)
except (ValueError, TypeError):
item_data['processing_time'] = 0.0
item_data["processing_time"] = 0.0
return QueueItem(**item_data)
return None
@@ -283,15 +318,21 @@ class QueuePersistenceManager:
backup_path = f"{self.persistence_path}.corrupted.{int(time.time())}"
try:
os.rename(self.persistence_path, backup_path)
logger.info(f"Created backup of corrupted state file: {backup_path}")
logger.info(
f"Created backup of corrupted state file: {backup_path}"
)
except Exception as be:
logger.error(f"Failed to create backup of corrupted state file: {str(be)}")
logger.error(
f"Failed to create backup of corrupted state file: {str(be)}"
)
raise QueueError(f"Failed to load queue state: {str(e)}")
finally:
if lock_fd:
fcntl.flock(lock_fd.fileno(), fcntl.LOCK_UN)
lock_fd.close()
class QueueError(Exception):
"""Base exception for queue-related errors"""
pass