mirror of
https://github.com/pacnpal/Pac-cogs.git
synced 2025-12-20 10:51:05 -05:00
The "AttributeError: 'VideoProcessor' object has no attribute 'process_message'" error has been fixed by implementing the process_message method in the VideoProcessor class. This method now properly handles video processing requests from Discord messages.
The "TypeError: '<' not supported between instances of 'str' and 'datetime.datetime'" error has been fixed by implementing proper datetime handling in the QueueItem class. The added to_dict and from_dict methods ensure that datetime objects are correctly serialized and deserialized when saving and loading queue state.
This commit is contained in:
@@ -117,6 +117,30 @@ class QueueItem:
|
|||||||
compression_attempted: bool = False
|
compression_attempted: bool = False
|
||||||
original_message: Optional[Any] = None # Store the original message reference
|
original_message: Optional[Any] = None # Store the original message reference
|
||||||
|
|
||||||
|
def to_dict(self) -> dict:
|
||||||
|
"""Convert to dictionary with datetime handling"""
|
||||||
|
data = asdict(self)
|
||||||
|
# Convert datetime objects to ISO format strings
|
||||||
|
if self.added_at:
|
||||||
|
data['added_at'] = self.added_at.isoformat()
|
||||||
|
if self.last_retry:
|
||||||
|
data['last_retry'] = self.last_retry.isoformat()
|
||||||
|
if self.last_error_time:
|
||||||
|
data['last_error_time'] = self.last_error_time.isoformat()
|
||||||
|
return data
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict) -> 'QueueItem':
|
||||||
|
"""Create from dictionary with datetime handling"""
|
||||||
|
# Convert ISO format strings back to datetime objects
|
||||||
|
if 'added_at' in data and isinstance(data['added_at'], str):
|
||||||
|
data['added_at'] = datetime.fromisoformat(data['added_at'])
|
||||||
|
if 'last_retry' in data and isinstance(data['last_retry'], str):
|
||||||
|
data['last_retry'] = datetime.fromisoformat(data['last_retry'])
|
||||||
|
if 'last_error_time' in data and isinstance(data['last_error_time'], str):
|
||||||
|
data['last_error_time'] = datetime.fromisoformat(data['last_error_time'])
|
||||||
|
return cls(**data)
|
||||||
|
|
||||||
|
|
||||||
class EnhancedVideoQueueManager:
|
class EnhancedVideoQueueManager:
|
||||||
"""Enhanced queue manager with improved memory management and performance"""
|
"""Enhanced queue manager with improved memory management and performance"""
|
||||||
@@ -444,10 +468,10 @@ class EnhancedVideoQueueManager:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
state = {
|
state = {
|
||||||
"queue": [asdict(item) for item in self._queue],
|
"queue": [item.to_dict() for item in self._queue],
|
||||||
"processing": {k: asdict(v) for k, v in self._processing.items()},
|
"processing": {k: v.to_dict() for k, v in self._processing.items()},
|
||||||
"completed": {k: asdict(v) for k, v in self._completed.items()},
|
"completed": {k: v.to_dict() for k, v in self._completed.items()},
|
||||||
"failed": {k: asdict(v) for k, v in self._failed.items()},
|
"failed": {k: v.to_dict() for k, v in self._failed.items()},
|
||||||
"metrics": {
|
"metrics": {
|
||||||
"total_processed": self.metrics.total_processed,
|
"total_processed": self.metrics.total_processed,
|
||||||
"total_failed": self.metrics.total_failed,
|
"total_failed": self.metrics.total_failed,
|
||||||
@@ -491,23 +515,11 @@ class EnhancedVideoQueueManager:
|
|||||||
with open(self.persistence_path, "r") as f:
|
with open(self.persistence_path, "r") as f:
|
||||||
state = json.load(f)
|
state = json.load(f)
|
||||||
|
|
||||||
# Restore queue items with datetime conversion
|
# Restore queue items with proper datetime conversion
|
||||||
self._queue = []
|
self._queue = [QueueItem.from_dict(item) for item in state["queue"]]
|
||||||
for item in state["queue"]:
|
self._processing = {k: QueueItem.from_dict(v) for k, v in state["processing"].items()}
|
||||||
item["added_at"] = datetime.fromisoformat(item["added_at"])
|
self._completed = {k: QueueItem.from_dict(v) for k, v in state["completed"].items()}
|
||||||
if item.get("last_retry"):
|
self._failed = {k: QueueItem.from_dict(v) for k, v in state["failed"].items()}
|
||||||
item["last_retry"] = datetime.fromisoformat(item["last_retry"])
|
|
||||||
if item.get("last_error_time"):
|
|
||||||
item["last_error_time"] = datetime.fromisoformat(
|
|
||||||
item["last_error_time"]
|
|
||||||
)
|
|
||||||
self._queue.append(QueueItem(**item))
|
|
||||||
|
|
||||||
self._processing = {
|
|
||||||
k: QueueItem(**v) for k, v in state["processing"].items()
|
|
||||||
}
|
|
||||||
self._completed = {k: QueueItem(**v) for k, v in state["completed"].items()}
|
|
||||||
self._failed = {k: QueueItem(**v) for k, v in state["failed"].items()}
|
|
||||||
|
|
||||||
# Restore metrics
|
# Restore metrics
|
||||||
metrics_data = state["metrics"]
|
metrics_data = state["metrics"]
|
||||||
|
|||||||
@@ -91,6 +91,65 @@ class VideoProcessor:
|
|||||||
self._queue_task = asyncio.create_task(self.queue_manager.process_queue(self._process_video))
|
self._queue_task = asyncio.create_task(self.queue_manager.process_queue(self._process_video))
|
||||||
logger.info("Video processing queue started successfully")
|
logger.info("Video processing queue started successfully")
|
||||||
|
|
||||||
|
async def process_message(self, message: discord.Message) -> None:
|
||||||
|
"""Process a message for video content"""
|
||||||
|
try:
|
||||||
|
# Check if message contains any video URLs
|
||||||
|
if not message.content and not message.attachments:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Get guild settings
|
||||||
|
settings = await self.config.get_guild_settings(message.guild.id)
|
||||||
|
if not settings:
|
||||||
|
logger.warning(f"No settings found for guild {message.guild.id}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check if channel is enabled
|
||||||
|
enabled_channels = settings.get("enabled_channels", [])
|
||||||
|
if enabled_channels and message.channel.id not in enabled_channels:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Extract URLs from message content and attachments
|
||||||
|
urls = []
|
||||||
|
if message.content:
|
||||||
|
# Add URLs from message content
|
||||||
|
for word in message.content.split():
|
||||||
|
if any(site in word.lower() for site in settings["enabled_sites"]):
|
||||||
|
urls.append(word)
|
||||||
|
|
||||||
|
# Add attachment URLs
|
||||||
|
for attachment in message.attachments:
|
||||||
|
if any(attachment.filename.lower().endswith(ext) for ext in ['.mp4', '.mov', '.avi', '.webm']):
|
||||||
|
urls.append(attachment.url)
|
||||||
|
|
||||||
|
if not urls:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Add each URL to the queue
|
||||||
|
for url in urls:
|
||||||
|
try:
|
||||||
|
await message.add_reaction(REACTIONS['queued'])
|
||||||
|
await self.queue_manager.add_to_queue(
|
||||||
|
url=url,
|
||||||
|
message_id=message.id,
|
||||||
|
channel_id=message.channel.id,
|
||||||
|
guild_id=message.guild.id,
|
||||||
|
author_id=message.author.id,
|
||||||
|
priority=0
|
||||||
|
)
|
||||||
|
logger.info(f"Added video to queue: {url}")
|
||||||
|
except QueueError as e:
|
||||||
|
logger.error(f"Failed to add video to queue: {str(e)}")
|
||||||
|
await message.add_reaction(REACTIONS['error'])
|
||||||
|
continue
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing message: {traceback.format_exc()}")
|
||||||
|
try:
|
||||||
|
await message.add_reaction(REACTIONS['error'])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
async def _process_video(self, item) -> Tuple[bool, Optional[str]]:
|
async def _process_video(self, item) -> Tuple[bool, Optional[str]]:
|
||||||
"""Process a video from the queue"""
|
"""Process a video from the queue"""
|
||||||
if self._unloading:
|
if self._unloading:
|
||||||
|
|||||||
Reference in New Issue
Block a user