Refactor code structure and remove redundant changes

This commit is contained in:
pacnpal
2025-08-26 13:19:04 -04:00
parent bf7e0c0f40
commit 831be6a2ee
151 changed files with 16260 additions and 9137 deletions

View File

@@ -15,7 +15,6 @@ from .data_structures import (
)
from apps.parks.models import ParkLocation, CompanyHeadquarters
from apps.rides.models import RideLocation
from apps.location.models import Location
class BaseLocationAdapter:
@@ -320,81 +319,8 @@ class CompanyLocationAdapter(BaseLocationAdapter):
return queryset.order_by("company__name")
class GenericLocationAdapter(BaseLocationAdapter):
"""Converts generic Location model to UnifiedLocation."""
def to_unified_location(self, location: Location) -> Optional[UnifiedLocation]:
"""Convert generic Location to UnifiedLocation."""
if not location.point and not (location.latitude and location.longitude):
return None
# Use point coordinates if available, fall back to lat/lng fields
if location.point:
coordinates = (location.point.y, location.point.x)
else:
coordinates = (float(location.latitude), float(location.longitude))
return UnifiedLocation(
id=f"generic_{location.id}",
type=LocationType.GENERIC,
name=location.name,
coordinates=coordinates,
address=location.get_formatted_address(),
metadata={
"location_type": location.location_type,
"content_type": (
location.content_type.model if location.content_type else None
),
"object_id": location.object_id,
"city": location.city,
"state": location.state,
"country": location.country,
},
type_data={
"created_at": (
location.created_at.isoformat() if location.created_at else None
),
"updated_at": (
location.updated_at.isoformat() if location.updated_at else None
),
},
cluster_weight=1,
cluster_category="generic",
)
def get_queryset(
self,
bounds: Optional[GeoBounds] = None,
filters: Optional[MapFilters] = None,
) -> QuerySet:
"""Get optimized queryset for generic locations."""
queryset = Location.objects.select_related("content_type").filter(
models.Q(point__isnull=False)
| models.Q(latitude__isnull=False, longitude__isnull=False)
)
# Spatial filtering
if bounds:
queryset = queryset.filter(
models.Q(point__within=bounds.to_polygon())
| models.Q(
latitude__gte=bounds.south,
latitude__lte=bounds.north,
longitude__gte=bounds.west,
longitude__lte=bounds.east,
)
)
# Generic filters
if filters:
if filters.search_query:
queryset = queryset.filter(name__icontains=filters.search_query)
if filters.country:
queryset = queryset.filter(country=filters.country)
if filters.city:
queryset = queryset.filter(city=filters.city)
return queryset.order_by("name")
# GenericLocationAdapter removed - generic location app is being deprecated
# All location functionality moved to domain-specific models (ParkLocation, RideLocation, etc.)
class LocationAbstractionLayer:
@@ -408,7 +334,7 @@ class LocationAbstractionLayer:
LocationType.PARK: ParkLocationAdapter(),
LocationType.RIDE: RideLocationAdapter(),
LocationType.COMPANY: CompanyLocationAdapter(),
LocationType.GENERIC: GenericLocationAdapter(),
# LocationType.GENERIC: Removed - generic location app deprecated
}
def get_all_locations(
@@ -464,10 +390,7 @@ class LocationAbstractionLayer:
obj = CompanyHeadquarters.objects.select_related("company").get(
company_id=location_id
)
elif location_type == LocationType.GENERIC:
obj = Location.objects.select_related("content_type").get(
id=location_id
)
# LocationType.GENERIC removed - generic location app deprecated
else:
return None

View File

@@ -0,0 +1,192 @@
"""
Shared media service for ThrillWiki.
This module provides shared functionality for media upload, storage, and processing
that can be used across all domain-specific media implementations.
"""
import logging
from typing import Any, Optional, Dict, Tuple
from datetime import datetime
from django.core.files.uploadedfile import UploadedFile
from django.conf import settings
from PIL import Image, ExifTags
import os
logger = logging.getLogger(__name__)
class MediaService:
"""Shared service for media upload and processing operations."""
@staticmethod
def generate_upload_path(
domain: str,
identifier: str,
filename: str,
subdirectory: Optional[str] = None
) -> str:
"""
Generate standardized upload path for media files.
Args:
domain: Domain type (e.g., 'park', 'ride')
identifier: Object identifier (slug or id)
filename: Original filename
subdirectory: Optional subdirectory for organization
Returns:
Standardized upload path
"""
# Always use .jpg extension for consistency
base_filename = f"{identifier}.jpg"
if subdirectory:
return f"{domain}/{subdirectory}/{identifier}/{base_filename}"
else:
return f"{domain}/{identifier}/{base_filename}"
@staticmethod
def extract_exif_date(image_file: UploadedFile) -> Optional[datetime]:
"""
Extract the date taken from image EXIF data.
Args:
image_file: Uploaded image file
Returns:
DateTime when photo was taken, or None if not available
"""
try:
with Image.open(image_file) as img:
exif = img.getexif()
if exif:
# Find the DateTime tag ID
for tag_id in ExifTags.TAGS:
if ExifTags.TAGS[tag_id] == "DateTimeOriginal":
if tag_id in exif:
# EXIF dates are typically in format: '2024:02:15 14:30:00'
date_str = exif[tag_id]
return datetime.strptime(date_str, "%Y:%m:%d %H:%M:%S")
return None
except Exception as e:
logger.warning(f"Failed to extract EXIF date: {str(e)}")
return None
@staticmethod
def validate_image_file(image_file: UploadedFile) -> Tuple[bool, Optional[str]]:
"""
Validate uploaded image file.
Args:
image_file: Uploaded image file
Returns:
Tuple of (is_valid, error_message)
"""
try:
# Check file size
max_size = getattr(settings, 'MAX_PHOTO_SIZE',
10 * 1024 * 1024) # 10MB default
if image_file.size > max_size:
return False, f"File size too large. Maximum size is {max_size // (1024 * 1024)}MB"
# Check file type
allowed_types = getattr(settings, 'ALLOWED_PHOTO_TYPES', [
'image/jpeg', 'image/png', 'image/webp'])
if image_file.content_type not in allowed_types:
return False, f"File type not allowed. Allowed types: {', '.join(allowed_types)}"
# Try to open with PIL to validate it's a real image
with Image.open(image_file) as img:
img.verify()
return True, None
except Exception as e:
return False, f"Invalid image file: {str(e)}"
@staticmethod
def process_image(
image_file: UploadedFile,
max_width: int = 1920,
max_height: int = 1080,
quality: int = 85
) -> UploadedFile:
"""
Process and optimize image file.
Args:
image_file: Original uploaded file
max_width: Maximum width for resizing
max_height: Maximum height for resizing
quality: JPEG quality (1-100)
Returns:
Processed image file
"""
try:
with Image.open(image_file) as img:
# Convert to RGB if necessary
if img.mode in ('RGBA', 'LA', 'P'):
img = img.convert('RGB')
# Resize if necessary
if img.width > max_width or img.height > max_height:
img.thumbnail((max_width, max_height), Image.Resampling.LANCZOS)
# Save processed image
from io import BytesIO
from django.core.files.uploadedfile import InMemoryUploadedFile
output = BytesIO()
img.save(output, format='JPEG', quality=quality, optimize=True)
output.seek(0)
return InMemoryUploadedFile(
output,
'ImageField',
f"{os.path.splitext(image_file.name)[0]}.jpg",
'image/jpeg',
output.getbuffer().nbytes,
None
)
except Exception as e:
logger.warning(f"Failed to process image, using original: {str(e)}")
return image_file
@staticmethod
def generate_default_caption(username: str) -> str:
"""
Generate default caption for uploaded photos.
Args:
username: Username of uploader
Returns:
Default caption string
"""
from django.utils import timezone
current_time = timezone.now()
return f"Uploaded by {username} on {current_time.strftime('%B %d, %Y at %I:%M %p')}"
@staticmethod
def get_storage_stats() -> Dict[str, Any]:
"""
Get media storage statistics.
Returns:
Dictionary with storage statistics
"""
try:
# This would need to be implemented based on your storage backend
return {
"total_files": 0,
"total_size_bytes": 0,
"storage_backend": "default",
"available_space": "unknown"
}
except Exception as e:
logger.error(f"Failed to get storage stats: {str(e)}")
return {"error": str(e)}