mirror of
https://github.com/pacnpal/thrillwiki_django_no_react.git
synced 2025-12-22 02:11:10 -05:00
@@ -1,27 +0,0 @@
|
||||
"""
|
||||
Core services for ThrillWiki unified map functionality.
|
||||
"""
|
||||
|
||||
from .map_service import UnifiedMapService
|
||||
from .clustering_service import ClusteringService
|
||||
from .map_cache_service import MapCacheService
|
||||
from .data_structures import (
|
||||
UnifiedLocation,
|
||||
LocationType,
|
||||
GeoBounds,
|
||||
MapFilters,
|
||||
MapResponse,
|
||||
ClusterData,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"UnifiedMapService",
|
||||
"ClusteringService",
|
||||
"MapCacheService",
|
||||
"UnifiedLocation",
|
||||
"LocationType",
|
||||
"GeoBounds",
|
||||
"MapFilters",
|
||||
"MapResponse",
|
||||
"ClusterData",
|
||||
]
|
||||
@@ -1,365 +0,0 @@
|
||||
"""
|
||||
Clustering service for map locations to improve performance and user experience.
|
||||
"""
|
||||
|
||||
import math
|
||||
from typing import List, Dict, Any, Optional
|
||||
from dataclasses import dataclass
|
||||
from collections import defaultdict
|
||||
|
||||
from .data_structures import (
|
||||
UnifiedLocation,
|
||||
ClusterData,
|
||||
GeoBounds,
|
||||
LocationType,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ClusterPoint:
|
||||
"""Internal representation of a point for clustering."""
|
||||
|
||||
location: UnifiedLocation
|
||||
x: float # Projected x coordinate
|
||||
y: float # Projected y coordinate
|
||||
|
||||
|
||||
class ClusteringService:
|
||||
"""
|
||||
Handles location clustering for map display using a simple grid-based approach
|
||||
with zoom-level dependent clustering radius.
|
||||
"""
|
||||
|
||||
# Clustering configuration
|
||||
DEFAULT_RADIUS = 40 # pixels
|
||||
MIN_POINTS_TO_CLUSTER = 2
|
||||
MAX_ZOOM_FOR_CLUSTERING = 15
|
||||
MIN_ZOOM_FOR_CLUSTERING = 3
|
||||
|
||||
# Zoom level configurations
|
||||
ZOOM_CONFIGS = {
|
||||
3: {"radius": 80, "min_points": 5}, # World level
|
||||
4: {"radius": 70, "min_points": 4}, # Continent level
|
||||
5: {"radius": 60, "min_points": 3}, # Country level
|
||||
6: {"radius": 50, "min_points": 3}, # Large region level
|
||||
7: {"radius": 45, "min_points": 2}, # Region level
|
||||
8: {"radius": 40, "min_points": 2}, # State level
|
||||
9: {"radius": 35, "min_points": 2}, # Metro area level
|
||||
10: {"radius": 30, "min_points": 2}, # City level
|
||||
11: {"radius": 25, "min_points": 2}, # District level
|
||||
12: {"radius": 20, "min_points": 2}, # Neighborhood level
|
||||
13: {"radius": 15, "min_points": 2}, # Block level
|
||||
14: {"radius": 10, "min_points": 2}, # Street level
|
||||
15: {"radius": 5, "min_points": 2}, # Building level
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.cluster_id_counter = 0
|
||||
|
||||
def should_cluster(self, zoom_level: int, point_count: int) -> bool:
|
||||
"""Determine if clustering should be applied based on zoom level and point count."""
|
||||
if zoom_level > self.MAX_ZOOM_FOR_CLUSTERING:
|
||||
return False
|
||||
if zoom_level < self.MIN_ZOOM_FOR_CLUSTERING:
|
||||
return True
|
||||
|
||||
config = self.ZOOM_CONFIGS.get(
|
||||
zoom_level, {"min_points": self.MIN_POINTS_TO_CLUSTER}
|
||||
)
|
||||
return point_count >= config["min_points"]
|
||||
|
||||
def cluster_locations(
|
||||
self,
|
||||
locations: List[UnifiedLocation],
|
||||
zoom_level: int,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
) -> tuple[List[UnifiedLocation], List[ClusterData]]:
|
||||
"""
|
||||
Cluster locations based on zoom level and density.
|
||||
Returns (unclustered_locations, clusters).
|
||||
"""
|
||||
if not locations or not self.should_cluster(zoom_level, len(locations)):
|
||||
return locations, []
|
||||
|
||||
# Convert locations to projected coordinates for clustering
|
||||
cluster_points = self._project_locations(locations, bounds)
|
||||
|
||||
# Get clustering configuration for zoom level
|
||||
config = self.ZOOM_CONFIGS.get(
|
||||
zoom_level,
|
||||
{
|
||||
"radius": self.DEFAULT_RADIUS,
|
||||
"min_points": self.MIN_POINTS_TO_CLUSTER,
|
||||
},
|
||||
)
|
||||
|
||||
# Perform clustering
|
||||
clustered_groups = self._cluster_points(
|
||||
cluster_points, config["radius"], config["min_points"]
|
||||
)
|
||||
|
||||
# Separate individual locations from clusters
|
||||
unclustered_locations = []
|
||||
clusters = []
|
||||
|
||||
for group in clustered_groups:
|
||||
if len(group) < config["min_points"]:
|
||||
# Add individual locations
|
||||
unclustered_locations.extend([cp.location for cp in group])
|
||||
else:
|
||||
# Create cluster
|
||||
cluster = self._create_cluster(group)
|
||||
clusters.append(cluster)
|
||||
|
||||
return unclustered_locations, clusters
|
||||
|
||||
def _project_locations(
|
||||
self,
|
||||
locations: List[UnifiedLocation],
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
) -> List[ClusterPoint]:
|
||||
"""Convert lat/lng coordinates to projected x/y for clustering calculations."""
|
||||
cluster_points = []
|
||||
|
||||
# Use bounds or calculate from locations
|
||||
if not bounds:
|
||||
lats = [loc.latitude for loc in locations]
|
||||
lngs = [loc.longitude for loc in locations]
|
||||
bounds = GeoBounds(
|
||||
north=max(lats),
|
||||
south=min(lats),
|
||||
east=max(lngs),
|
||||
west=min(lngs),
|
||||
)
|
||||
|
||||
# Simple equirectangular projection (good enough for clustering)
|
||||
center_lat = (bounds.north + bounds.south) / 2
|
||||
lat_scale = 111320 # meters per degree latitude
|
||||
lng_scale = 111320 * math.cos(
|
||||
math.radians(center_lat)
|
||||
) # meters per degree longitude
|
||||
|
||||
for location in locations:
|
||||
# Convert to meters relative to bounds center
|
||||
x = (location.longitude - (bounds.west + bounds.east) / 2) * lng_scale
|
||||
y = (location.latitude - (bounds.north + bounds.south) / 2) * lat_scale
|
||||
|
||||
cluster_points.append(ClusterPoint(location=location, x=x, y=y))
|
||||
|
||||
return cluster_points
|
||||
|
||||
def _cluster_points(
|
||||
self, points: List[ClusterPoint], radius_pixels: int, min_points: int
|
||||
) -> List[List[ClusterPoint]]:
|
||||
"""
|
||||
Cluster points using a simple distance-based approach.
|
||||
Radius is in pixels, converted to meters based on zoom level.
|
||||
"""
|
||||
# Convert pixel radius to meters (rough approximation)
|
||||
# At zoom level 10, 1 pixel ≈ 150 meters
|
||||
radius_meters = radius_pixels * 150
|
||||
|
||||
clustered = [False] * len(points)
|
||||
clusters = []
|
||||
|
||||
for i, point in enumerate(points):
|
||||
if clustered[i]:
|
||||
continue
|
||||
|
||||
# Find all points within radius
|
||||
cluster_group = [point]
|
||||
clustered[i] = True
|
||||
|
||||
for j, other_point in enumerate(points):
|
||||
if i == j or clustered[j]:
|
||||
continue
|
||||
|
||||
distance = self._calculate_distance(point, other_point)
|
||||
if distance <= radius_meters:
|
||||
cluster_group.append(other_point)
|
||||
clustered[j] = True
|
||||
|
||||
clusters.append(cluster_group)
|
||||
|
||||
return clusters
|
||||
|
||||
def _calculate_distance(self, point1: ClusterPoint, point2: ClusterPoint) -> float:
|
||||
"""Calculate Euclidean distance between two projected points in meters."""
|
||||
dx = point1.x - point2.x
|
||||
dy = point1.y - point2.y
|
||||
return math.sqrt(dx * dx + dy * dy)
|
||||
|
||||
def _create_cluster(self, cluster_points: List[ClusterPoint]) -> ClusterData:
|
||||
"""Create a ClusterData object from a group of points."""
|
||||
locations = [cp.location for cp in cluster_points]
|
||||
|
||||
# Calculate cluster center (average position)
|
||||
avg_lat = sum(loc.latitude for loc in locations) / len(locations)
|
||||
avg_lng = sum(loc.longitude for loc in locations) / len(locations)
|
||||
|
||||
# Calculate cluster bounds
|
||||
lats = [loc.latitude for loc in locations]
|
||||
lngs = [loc.longitude for loc in locations]
|
||||
cluster_bounds = GeoBounds(
|
||||
north=max(lats), south=min(lats), east=max(lngs), west=min(lngs)
|
||||
)
|
||||
|
||||
# Collect location types in cluster
|
||||
types = set(loc.type for loc in locations)
|
||||
|
||||
# Select representative location (highest weight)
|
||||
representative = self._select_representative_location(locations)
|
||||
|
||||
# Generate cluster ID
|
||||
self.cluster_id_counter += 1
|
||||
cluster_id = f"cluster_{self.cluster_id_counter}"
|
||||
|
||||
return ClusterData(
|
||||
id=cluster_id,
|
||||
coordinates=[avg_lat, avg_lng],
|
||||
count=len(locations),
|
||||
types=types,
|
||||
bounds=cluster_bounds,
|
||||
representative_location=representative,
|
||||
)
|
||||
|
||||
def _select_representative_location(
|
||||
self, locations: List[UnifiedLocation]
|
||||
) -> Optional[UnifiedLocation]:
|
||||
"""Select the most representative location for a cluster."""
|
||||
if not locations:
|
||||
return None
|
||||
|
||||
# Prioritize by: 1) Parks over rides/companies, 2) Higher weight, 3)
|
||||
# Better rating
|
||||
parks = [loc for loc in locations if loc.type == LocationType.PARK]
|
||||
if parks:
|
||||
return max(
|
||||
parks,
|
||||
key=lambda x: (
|
||||
x.cluster_weight,
|
||||
x.metadata.get("rating", 0) or 0,
|
||||
),
|
||||
)
|
||||
|
||||
rides = [loc for loc in locations if loc.type == LocationType.RIDE]
|
||||
if rides:
|
||||
return max(
|
||||
rides,
|
||||
key=lambda x: (
|
||||
x.cluster_weight,
|
||||
x.metadata.get("rating", 0) or 0,
|
||||
),
|
||||
)
|
||||
|
||||
companies = [loc for loc in locations if loc.type == LocationType.COMPANY]
|
||||
if companies:
|
||||
return max(companies, key=lambda x: x.cluster_weight)
|
||||
|
||||
# Fall back to highest weight location
|
||||
return max(locations, key=lambda x: x.cluster_weight)
|
||||
|
||||
def get_cluster_breakdown(self, clusters: List[ClusterData]) -> Dict[str, Any]:
|
||||
"""Get statistics about clustering results."""
|
||||
if not clusters:
|
||||
return {
|
||||
"total_clusters": 0,
|
||||
"total_points_clustered": 0,
|
||||
"average_cluster_size": 0,
|
||||
"type_distribution": {},
|
||||
"category_distribution": {},
|
||||
}
|
||||
|
||||
total_points = sum(cluster.count for cluster in clusters)
|
||||
type_counts = defaultdict(int)
|
||||
category_counts = defaultdict(int)
|
||||
|
||||
for cluster in clusters:
|
||||
for location_type in cluster.types:
|
||||
type_counts[location_type.value] += cluster.count
|
||||
|
||||
if cluster.representative_location:
|
||||
category_counts[cluster.representative_location.cluster_category] += 1
|
||||
|
||||
return {
|
||||
"total_clusters": len(clusters),
|
||||
"total_points_clustered": total_points,
|
||||
"average_cluster_size": total_points / len(clusters),
|
||||
"largest_cluster_size": max(cluster.count for cluster in clusters),
|
||||
"smallest_cluster_size": min(cluster.count for cluster in clusters),
|
||||
"type_distribution": dict(type_counts),
|
||||
"category_distribution": dict(category_counts),
|
||||
}
|
||||
|
||||
def expand_cluster(
|
||||
self, cluster: ClusterData, zoom_level: int
|
||||
) -> List[UnifiedLocation]:
|
||||
"""
|
||||
Expand a cluster to show individual locations (for drill-down functionality).
|
||||
This would typically require re-querying the database with the cluster bounds.
|
||||
"""
|
||||
# This is a placeholder - in practice, this would re-query the database
|
||||
# with the cluster bounds and higher detail level
|
||||
return []
|
||||
|
||||
|
||||
class SmartClusteringRules:
|
||||
"""
|
||||
Advanced clustering rules that consider location types and importance.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def should_cluster_together(loc1: UnifiedLocation, loc2: UnifiedLocation) -> bool:
|
||||
"""Determine if two locations should be clustered together."""
|
||||
|
||||
# Same park rides should cluster together more readily
|
||||
if loc1.type == LocationType.RIDE and loc2.type == LocationType.RIDE:
|
||||
park1_id = loc1.metadata.get("park_id")
|
||||
park2_id = loc2.metadata.get("park_id")
|
||||
if park1_id and park2_id and park1_id == park2_id:
|
||||
return True
|
||||
|
||||
# Major parks should resist clustering unless very close
|
||||
if (
|
||||
loc1.cluster_category == "major_park"
|
||||
or loc2.cluster_category == "major_park"
|
||||
):
|
||||
return False
|
||||
|
||||
# Similar types cluster more readily
|
||||
if loc1.type == loc2.type:
|
||||
return True
|
||||
|
||||
# Different types can cluster but with higher threshold
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def calculate_cluster_priority(
|
||||
locations: List[UnifiedLocation],
|
||||
) -> UnifiedLocation:
|
||||
"""Select the representative location for a cluster based on priority rules."""
|
||||
# Prioritize by: 1) Parks over rides, 2) Higher weight, 3) Better
|
||||
# rating
|
||||
parks = [loc for loc in locations if loc.type == LocationType.PARK]
|
||||
if parks:
|
||||
return max(
|
||||
parks,
|
||||
key=lambda x: (
|
||||
x.cluster_weight,
|
||||
x.metadata.get("rating", 0) or 0,
|
||||
x.metadata.get("ride_count", 0) or 0,
|
||||
),
|
||||
)
|
||||
|
||||
rides = [loc for loc in locations if loc.type == LocationType.RIDE]
|
||||
if rides:
|
||||
return max(
|
||||
rides,
|
||||
key=lambda x: (
|
||||
x.cluster_weight,
|
||||
x.metadata.get("rating", 0) or 0,
|
||||
),
|
||||
)
|
||||
|
||||
# Fall back to highest weight
|
||||
return max(locations, key=lambda x: x.cluster_weight)
|
||||
@@ -1,253 +0,0 @@
|
||||
"""
|
||||
Data structures for the unified map service.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import Dict, List, Optional, Set, Any
|
||||
from django.contrib.gis.geos import Polygon
|
||||
|
||||
|
||||
class LocationType(Enum):
|
||||
"""Types of locations supported by the map service."""
|
||||
|
||||
PARK = "park"
|
||||
RIDE = "ride"
|
||||
COMPANY = "company"
|
||||
GENERIC = "generic"
|
||||
|
||||
|
||||
@dataclass
|
||||
class GeoBounds:
|
||||
"""Geographic boundary box for spatial queries."""
|
||||
|
||||
north: float
|
||||
south: float
|
||||
east: float
|
||||
west: float
|
||||
|
||||
def __post_init__(self):
|
||||
"""Validate bounds after initialization."""
|
||||
if self.north < self.south:
|
||||
raise ValueError("North bound must be greater than south bound")
|
||||
if self.east < self.west:
|
||||
raise ValueError("East bound must be greater than west bound")
|
||||
if not (-90 <= self.south <= 90 and -90 <= self.north <= 90):
|
||||
raise ValueError("Latitude bounds must be between -90 and 90")
|
||||
if not (-180 <= self.west <= 180 and -180 <= self.east <= 180):
|
||||
raise ValueError("Longitude bounds must be between -180 and 180")
|
||||
|
||||
def to_polygon(self) -> Polygon:
|
||||
"""Convert bounds to PostGIS Polygon for database queries."""
|
||||
return Polygon.from_bbox((self.west, self.south, self.east, self.north))
|
||||
|
||||
def expand(self, factor: float = 1.1) -> "GeoBounds":
|
||||
"""Expand bounds by factor for buffer queries."""
|
||||
center_lat = (self.north + self.south) / 2
|
||||
center_lng = (self.east + self.west) / 2
|
||||
|
||||
lat_range = (self.north - self.south) * factor / 2
|
||||
lng_range = (self.east - self.west) * factor / 2
|
||||
|
||||
return GeoBounds(
|
||||
north=min(90, center_lat + lat_range),
|
||||
south=max(-90, center_lat - lat_range),
|
||||
east=min(180, center_lng + lng_range),
|
||||
west=max(-180, center_lng - lng_range),
|
||||
)
|
||||
|
||||
def contains_point(self, lat: float, lng: float) -> bool:
|
||||
"""Check if a point is within these bounds."""
|
||||
return self.south <= lat <= self.north and self.west <= lng <= self.east
|
||||
|
||||
def to_dict(self) -> Dict[str, float]:
|
||||
"""Convert to dictionary for JSON serialization."""
|
||||
return {
|
||||
"north": self.north,
|
||||
"south": self.south,
|
||||
"east": self.east,
|
||||
"west": self.west,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class MapFilters:
|
||||
"""Filtering options for map queries."""
|
||||
|
||||
location_types: Optional[Set[LocationType]] = None
|
||||
park_status: Optional[Set[str]] = None # OPERATING, CLOSED_TEMP, etc.
|
||||
ride_types: Optional[Set[str]] = None
|
||||
company_roles: Optional[Set[str]] = None # OPERATOR, MANUFACTURER, etc.
|
||||
search_query: Optional[str] = None
|
||||
min_rating: Optional[float] = None
|
||||
has_coordinates: bool = True
|
||||
country: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
city: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for caching and serialization."""
|
||||
return {
|
||||
"location_types": (
|
||||
[t.value for t in self.location_types] if self.location_types else None
|
||||
),
|
||||
"park_status": (list(self.park_status) if self.park_status else None),
|
||||
"ride_types": list(self.ride_types) if self.ride_types else None,
|
||||
"company_roles": (list(self.company_roles) if self.company_roles else None),
|
||||
"search_query": self.search_query,
|
||||
"min_rating": self.min_rating,
|
||||
"has_coordinates": self.has_coordinates,
|
||||
"country": self.country,
|
||||
"state": self.state,
|
||||
"city": self.city,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class UnifiedLocation:
|
||||
"""Unified location interface for all location types."""
|
||||
|
||||
id: str # Composite: f"{type}_{id}"
|
||||
type: LocationType
|
||||
name: str
|
||||
coordinates: List[float] # [lat, lng]
|
||||
address: Optional[str] = None
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
type_data: Dict[str, Any] = field(default_factory=dict)
|
||||
cluster_weight: int = 1
|
||||
cluster_category: str = "default"
|
||||
|
||||
@property
|
||||
def latitude(self) -> float:
|
||||
"""Get latitude from coordinates."""
|
||||
return self.coordinates[0]
|
||||
|
||||
@property
|
||||
def longitude(self) -> float:
|
||||
"""Get longitude from coordinates."""
|
||||
return self.coordinates[1]
|
||||
|
||||
def to_geojson_feature(self) -> Dict[str, Any]:
|
||||
"""Convert to GeoJSON feature for mapping libraries."""
|
||||
return {
|
||||
"type": "Feature",
|
||||
"properties": {
|
||||
"id": self.id,
|
||||
"type": self.type.value,
|
||||
"name": self.name,
|
||||
"address": self.address,
|
||||
"metadata": self.metadata,
|
||||
"type_data": self.type_data,
|
||||
"cluster_weight": self.cluster_weight,
|
||||
"cluster_category": self.cluster_category,
|
||||
},
|
||||
"geometry": {
|
||||
"type": "Point",
|
||||
# GeoJSON uses lng, lat
|
||||
"coordinates": [self.longitude, self.latitude],
|
||||
},
|
||||
}
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for JSON responses."""
|
||||
return {
|
||||
"id": self.id,
|
||||
"type": self.type.value,
|
||||
"name": self.name,
|
||||
"coordinates": list(self.coordinates),
|
||||
"address": self.address,
|
||||
"metadata": self.metadata,
|
||||
"type_data": self.type_data,
|
||||
"cluster_weight": self.cluster_weight,
|
||||
"cluster_category": self.cluster_category,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ClusterData:
|
||||
"""Represents a cluster of locations for map display."""
|
||||
|
||||
id: str
|
||||
coordinates: List[float] # [lat, lng]
|
||||
count: int
|
||||
types: Set[LocationType]
|
||||
bounds: GeoBounds
|
||||
representative_location: Optional[UnifiedLocation] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for JSON responses."""
|
||||
return {
|
||||
"id": self.id,
|
||||
"coordinates": list(self.coordinates),
|
||||
"count": self.count,
|
||||
"types": [t.value for t in self.types],
|
||||
"bounds": self.bounds.to_dict(),
|
||||
"representative": (
|
||||
self.representative_location.to_dict()
|
||||
if self.representative_location
|
||||
else None
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class MapResponse:
|
||||
"""Response structure for map API calls."""
|
||||
|
||||
locations: List[UnifiedLocation] = field(default_factory=list)
|
||||
clusters: List[ClusterData] = field(default_factory=list)
|
||||
bounds: Optional[GeoBounds] = None
|
||||
total_count: int = 0
|
||||
filtered_count: int = 0
|
||||
zoom_level: Optional[int] = None
|
||||
clustered: bool = False
|
||||
cache_hit: bool = False
|
||||
query_time_ms: Optional[int] = None
|
||||
filters_applied: List[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for JSON responses."""
|
||||
return {
|
||||
"status": "success",
|
||||
"data": {
|
||||
"locations": [loc.to_dict() for loc in self.locations],
|
||||
"clusters": [cluster.to_dict() for cluster in self.clusters],
|
||||
"bounds": self.bounds.to_dict() if self.bounds else None,
|
||||
"total_count": self.total_count,
|
||||
"filtered_count": self.filtered_count,
|
||||
"zoom_level": self.zoom_level,
|
||||
"clustered": self.clustered,
|
||||
},
|
||||
"meta": {
|
||||
"cache_hit": self.cache_hit,
|
||||
"query_time_ms": self.query_time_ms,
|
||||
"filters_applied": self.filters_applied,
|
||||
"pagination": {
|
||||
"has_more": False, # TODO: Implement pagination
|
||||
"total_pages": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class QueryPerformanceMetrics:
|
||||
"""Performance metrics for query optimization."""
|
||||
|
||||
query_time_ms: int
|
||||
db_query_count: int
|
||||
cache_hit: bool
|
||||
result_count: int
|
||||
bounds_used: bool
|
||||
clustering_used: bool
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for logging."""
|
||||
return {
|
||||
"query_time_ms": self.query_time_ms,
|
||||
"db_query_count": self.db_query_count,
|
||||
"cache_hit": self.cache_hit,
|
||||
"result_count": self.result_count,
|
||||
"bounds_used": self.bounds_used,
|
||||
"clustering_used": self.clustering_used,
|
||||
}
|
||||
@@ -1,320 +0,0 @@
|
||||
"""
|
||||
Enhanced caching service with multiple cache backends and strategies.
|
||||
"""
|
||||
|
||||
from typing import Optional, Any, Dict, Callable
|
||||
from django.core.cache import caches
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from functools import wraps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Define GeoBounds for type hinting
|
||||
class GeoBounds:
|
||||
def __init__(self, min_lat: float, min_lng: float, max_lat: float, max_lng: float):
|
||||
self.min_lat = min_lat
|
||||
self.min_lng = min_lng
|
||||
self.max_lat = max_lat
|
||||
self.max_lng = max_lng
|
||||
|
||||
|
||||
class EnhancedCacheService:
|
||||
"""Comprehensive caching service with multiple cache backends"""
|
||||
|
||||
def __init__(self):
|
||||
self.default_cache = caches["default"]
|
||||
try:
|
||||
self.api_cache = caches["api"]
|
||||
except Exception:
|
||||
# Fallback to default cache if api cache not configured
|
||||
self.api_cache = self.default_cache
|
||||
|
||||
# L1: Query-level caching
|
||||
def cache_queryset(
|
||||
self,
|
||||
cache_key: str,
|
||||
queryset_func: Callable,
|
||||
timeout: int = 3600,
|
||||
**kwargs,
|
||||
) -> Any:
|
||||
"""Cache expensive querysets"""
|
||||
cached_result = self.default_cache.get(cache_key)
|
||||
if cached_result is None:
|
||||
start_time = time.time()
|
||||
result = queryset_func(**kwargs)
|
||||
duration = time.time() - start_time
|
||||
|
||||
# Log cache miss and function execution time
|
||||
logger.info(
|
||||
f"Cache miss for key '{cache_key}', executed in {duration:.3f}s",
|
||||
extra={"cache_key": cache_key, "execution_time": duration},
|
||||
)
|
||||
|
||||
self.default_cache.set(cache_key, result, timeout)
|
||||
return result
|
||||
|
||||
logger.debug(f"Cache hit for key '{cache_key}'")
|
||||
return cached_result
|
||||
|
||||
# L2: API response caching
|
||||
def cache_api_response(
|
||||
self,
|
||||
view_name: str,
|
||||
params: Dict,
|
||||
response_data: Any,
|
||||
timeout: int = 1800,
|
||||
):
|
||||
"""Cache API responses based on view and parameters"""
|
||||
cache_key = self._generate_api_cache_key(view_name, params)
|
||||
self.api_cache.set(cache_key, response_data, timeout)
|
||||
logger.debug(f"Cached API response for view '{view_name}'")
|
||||
|
||||
def get_cached_api_response(self, view_name: str, params: Dict) -> Optional[Any]:
|
||||
"""Retrieve cached API response"""
|
||||
cache_key = self._generate_api_cache_key(view_name, params)
|
||||
result = self.api_cache.get(cache_key)
|
||||
|
||||
if result:
|
||||
logger.debug(f"Cache hit for API view '{view_name}'")
|
||||
else:
|
||||
logger.debug(f"Cache miss for API view '{view_name}'")
|
||||
|
||||
return result
|
||||
|
||||
# L3: Geographic caching (building on existing MapCacheService)
|
||||
def cache_geographic_data(
|
||||
self,
|
||||
bounds: "GeoBounds",
|
||||
data: Any,
|
||||
zoom_level: int,
|
||||
timeout: int = 1800,
|
||||
):
|
||||
"""Cache geographic data with spatial keys"""
|
||||
# Generate spatial cache key based on bounds and zoom level
|
||||
cache_key = f"geo:{bounds.min_lat}:{bounds.min_lng}:{bounds.max_lat}:{
|
||||
bounds.max_lng
|
||||
}:z{zoom_level}"
|
||||
self.default_cache.set(cache_key, data, timeout)
|
||||
logger.debug(f"Cached geographic data for bounds {bounds}")
|
||||
|
||||
def get_cached_geographic_data(
|
||||
self, bounds: "GeoBounds", zoom_level: int
|
||||
) -> Optional[Any]:
|
||||
"""Retrieve cached geographic data"""
|
||||
cache_key = f"geo:{bounds.min_lat}:{bounds.min_lng}:{bounds.max_lat}:{
|
||||
bounds.max_lng
|
||||
}:z{zoom_level}"
|
||||
return self.default_cache.get(cache_key)
|
||||
|
||||
# Cache invalidation utilities
|
||||
def invalidate_pattern(self, pattern: str):
|
||||
"""Invalidate cache keys matching a pattern (if backend supports it)"""
|
||||
try:
|
||||
# For Redis cache backends
|
||||
if hasattr(self.default_cache, "delete_pattern"):
|
||||
deleted_count = self.default_cache.delete_pattern(pattern)
|
||||
logger.info(
|
||||
f"Invalidated {deleted_count} cache keys matching pattern '{pattern}'"
|
||||
)
|
||||
return deleted_count
|
||||
else:
|
||||
logger.warning(
|
||||
f"Cache backend does not support pattern deletion for pattern '{pattern}'"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error invalidating cache pattern '{pattern}': {e}")
|
||||
|
||||
def invalidate_model_cache(
|
||||
self, model_name: str, instance_id: Optional[int] = None
|
||||
):
|
||||
"""Invalidate cache keys related to a specific model"""
|
||||
if instance_id:
|
||||
pattern = f"*{model_name}:{instance_id}*"
|
||||
else:
|
||||
pattern = f"*{model_name}*"
|
||||
|
||||
self.invalidate_pattern(pattern)
|
||||
|
||||
# Cache warming utilities
|
||||
def warm_cache(
|
||||
self,
|
||||
cache_key: str,
|
||||
warm_func: Callable,
|
||||
timeout: int = 3600,
|
||||
**kwargs,
|
||||
):
|
||||
"""Proactively warm cache with data"""
|
||||
try:
|
||||
data = warm_func(**kwargs)
|
||||
self.default_cache.set(cache_key, data, timeout)
|
||||
logger.info(f"Warmed cache for key '{cache_key}'")
|
||||
except Exception as e:
|
||||
logger.error(f"Error warming cache for key '{cache_key}': {e}")
|
||||
|
||||
def _generate_api_cache_key(self, view_name: str, params: Dict) -> str:
|
||||
"""Generate consistent cache keys for API responses"""
|
||||
# Sort params to ensure consistent key generation
|
||||
params_str = json.dumps(params, sort_keys=True, default=str)
|
||||
params_hash = hashlib.md5(params_str.encode()).hexdigest()
|
||||
return f"api:{view_name}:{params_hash}"
|
||||
|
||||
|
||||
# Cache decorators
|
||||
def cache_api_response(timeout=1800, vary_on=None, key_prefix=""):
|
||||
"""Decorator for caching API responses"""
|
||||
|
||||
def decorator(view_func):
|
||||
@wraps(view_func)
|
||||
def wrapper(self, request, *args, **kwargs):
|
||||
if request.method != "GET":
|
||||
return view_func(self, request, *args, **kwargs)
|
||||
|
||||
# Generate cache key based on view, user, and parameters
|
||||
cache_key_parts = [
|
||||
key_prefix or view_func.__name__,
|
||||
(
|
||||
str(request.user.id)
|
||||
if request.user.is_authenticated
|
||||
else "anonymous"
|
||||
),
|
||||
str(hash(frozenset(request.GET.items()))),
|
||||
]
|
||||
|
||||
if vary_on:
|
||||
for field in vary_on:
|
||||
cache_key_parts.append(str(getattr(request, field, "")))
|
||||
|
||||
cache_key = ":".join(cache_key_parts)
|
||||
|
||||
# Try to get from cache
|
||||
cache_service = EnhancedCacheService()
|
||||
cached_response = cache_service.api_cache.get(cache_key)
|
||||
if cached_response:
|
||||
logger.debug(f"Cache hit for API view {view_func.__name__}")
|
||||
return cached_response
|
||||
|
||||
# Execute view and cache result
|
||||
response = view_func(self, request, *args, **kwargs)
|
||||
if hasattr(response, "status_code") and response.status_code == 200:
|
||||
cache_service.api_cache.set(cache_key, response, timeout)
|
||||
logger.debug(f"Cached API response for view {view_func.__name__}")
|
||||
|
||||
return response
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def cache_queryset_result(cache_key_template: str, timeout: int = 3600):
|
||||
"""Decorator for caching queryset results"""
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
# Generate cache key from template and arguments
|
||||
cache_key = cache_key_template.format(*args, **kwargs)
|
||||
|
||||
cache_service = EnhancedCacheService()
|
||||
return cache_service.cache_queryset(
|
||||
cache_key, func, timeout, *args, **kwargs
|
||||
)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
# Context manager for cache warming
|
||||
class CacheWarmer:
|
||||
"""Context manager for batch cache warming operations"""
|
||||
|
||||
def __init__(self):
|
||||
self.cache_service = EnhancedCacheService()
|
||||
self.warm_operations = []
|
||||
|
||||
def add(
|
||||
self,
|
||||
cache_key: str,
|
||||
warm_func: Callable,
|
||||
timeout: int = 3600,
|
||||
**kwargs,
|
||||
):
|
||||
"""Add a cache warming operation to the batch"""
|
||||
self.warm_operations.append(
|
||||
{
|
||||
"cache_key": cache_key,
|
||||
"warm_func": warm_func,
|
||||
"timeout": timeout,
|
||||
"kwargs": kwargs,
|
||||
}
|
||||
)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Execute all cache warming operations"""
|
||||
logger.info(f"Warming {len(self.warm_operations)} cache entries")
|
||||
|
||||
for operation in self.warm_operations:
|
||||
try:
|
||||
self.cache_service.warm_cache(**operation)
|
||||
except Exception as e:
|
||||
logger.error(f"Error warming cache for {operation['cache_key']}: {e}")
|
||||
|
||||
|
||||
# Cache statistics and monitoring
|
||||
class CacheMonitor:
|
||||
"""Monitor cache performance and statistics"""
|
||||
|
||||
def __init__(self):
|
||||
self.cache_service = EnhancedCacheService()
|
||||
|
||||
def get_cache_stats(self) -> Dict[str, Any]:
|
||||
"""Get cache statistics if available"""
|
||||
stats = {}
|
||||
|
||||
try:
|
||||
# Try to get Redis cache stats
|
||||
cache_backend = self.cache_service.default_cache.__class__.__name__
|
||||
|
||||
if "Redis" in cache_backend:
|
||||
# Attempt to get Redis client and stats
|
||||
redis_client = self.cache_service.default_cache._cache.get_client()
|
||||
info = redis_client.info()
|
||||
stats["redis"] = {
|
||||
"used_memory": info.get("used_memory_human"),
|
||||
"connected_clients": info.get("connected_clients"),
|
||||
"total_commands_processed": info.get("total_commands_processed"),
|
||||
"keyspace_hits": info.get("keyspace_hits"),
|
||||
"keyspace_misses": info.get("keyspace_misses"),
|
||||
}
|
||||
|
||||
# Calculate hit rate
|
||||
hits = info.get("keyspace_hits", 0)
|
||||
misses = info.get("keyspace_misses", 0)
|
||||
if hits + misses > 0:
|
||||
stats["redis"]["hit_rate"] = hits / (hits + misses) * 100
|
||||
else:
|
||||
# For local memory cache or other backends
|
||||
stats["cache_backend"] = cache_backend
|
||||
stats["message"] = f"Cache statistics not available for {cache_backend}"
|
||||
|
||||
except Exception:
|
||||
# Don't log as error since this is expected for non-Redis backends
|
||||
cache_backend = self.cache_service.default_cache.__class__.__name__
|
||||
stats["cache_backend"] = cache_backend
|
||||
stats["message"] = f"Cache statistics not available for {cache_backend}"
|
||||
|
||||
return stats
|
||||
|
||||
def log_cache_performance(self):
|
||||
"""Log cache performance metrics"""
|
||||
stats = self.get_cache_stats()
|
||||
if stats:
|
||||
logger.info("Cache performance statistics", extra=stats)
|
||||
@@ -1,415 +0,0 @@
|
||||
"""
|
||||
Entity Fuzzy Matching Service for ThrillWiki
|
||||
|
||||
Provides intelligent entity matching when exact lookups fail, with authentication
|
||||
prompts for suggesting new entity creation.
|
||||
|
||||
Features:
|
||||
- Levenshtein distance for typo correction
|
||||
- Phonetic matching using Soundex algorithm
|
||||
- Partial name matching
|
||||
- Priority-based scoring (parks > rides > companies)
|
||||
- Authentication state-aware suggestions
|
||||
"""
|
||||
|
||||
import re
|
||||
from difflib import SequenceMatcher
|
||||
from typing import List, Dict, Any, Optional
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
from django.db.models import Q
|
||||
|
||||
from apps.parks.models import Park
|
||||
from apps.rides.models import Ride
|
||||
from apps.parks.models import Company
|
||||
|
||||
|
||||
class EntityType(Enum):
|
||||
"""Supported entity types for fuzzy matching."""
|
||||
|
||||
PARK = "park"
|
||||
RIDE = "ride"
|
||||
COMPANY = "company"
|
||||
|
||||
|
||||
@dataclass
|
||||
class FuzzyMatchResult:
|
||||
"""Result of a fuzzy matching operation."""
|
||||
|
||||
entity_type: EntityType
|
||||
entity: Any # The actual model instance
|
||||
name: str
|
||||
slug: str
|
||||
score: float # 0.0 to 1.0, higher is better match
|
||||
match_reason: str # Description of why this was matched
|
||||
confidence: str # 'high', 'medium', 'low'
|
||||
url: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for API responses."""
|
||||
return {
|
||||
"entity_type": self.entity_type.value,
|
||||
"name": self.name,
|
||||
"slug": self.slug,
|
||||
"score": round(self.score, 3),
|
||||
"match_reason": self.match_reason,
|
||||
"confidence": self.confidence,
|
||||
"url": self.url,
|
||||
"entity_id": getattr(self.entity, "id", None),
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class EntitySuggestion:
|
||||
"""Suggestion for creating a new entity when no matches found."""
|
||||
|
||||
suggested_name: str
|
||||
entity_type: EntityType
|
||||
requires_authentication: bool
|
||||
login_prompt: str
|
||||
signup_prompt: str
|
||||
creation_hint: str
|
||||
|
||||
|
||||
class FuzzyMatchingAlgorithms:
|
||||
"""Collection of fuzzy matching algorithms."""
|
||||
|
||||
@staticmethod
|
||||
def levenshtein_distance(s1: str, s2: str) -> int:
|
||||
"""Calculate Levenshtein distance between two strings."""
|
||||
if len(s1) < len(s2):
|
||||
return FuzzyMatchingAlgorithms.levenshtein_distance(s2, s1)
|
||||
|
||||
if len(s2) == 0:
|
||||
return len(s1)
|
||||
|
||||
previous_row = list(range(len(s2) + 1))
|
||||
for i, c1 in enumerate(s1):
|
||||
current_row = [i + 1]
|
||||
for j, c2 in enumerate(s2):
|
||||
insertions = previous_row[j + 1] + 1
|
||||
deletions = current_row[j] + 1
|
||||
substitutions = previous_row[j] + (c1 != c2)
|
||||
current_row.append(min(insertions, deletions, substitutions))
|
||||
previous_row = current_row
|
||||
|
||||
return previous_row[-1]
|
||||
|
||||
@staticmethod
|
||||
def similarity_ratio(s1: str, s2: str) -> float:
|
||||
"""Calculate similarity ratio (0.0 to 1.0) using SequenceMatcher."""
|
||||
return SequenceMatcher(None, s1.lower(), s2.lower()).ratio()
|
||||
|
||||
@staticmethod
|
||||
def soundex(name: str) -> str:
|
||||
"""Generate Soundex code for phonetic matching."""
|
||||
name = re.sub(r"[^A-Za-z]", "", name.upper())
|
||||
if not name:
|
||||
return "0000"
|
||||
|
||||
# Soundex algorithm
|
||||
soundex_map = {
|
||||
"BFPV": "1",
|
||||
"CGJKQSXZ": "2",
|
||||
"DT": "3",
|
||||
"L": "4",
|
||||
"MN": "5",
|
||||
"R": "6",
|
||||
}
|
||||
|
||||
first_letter = name[0]
|
||||
name = name[1:]
|
||||
|
||||
# Replace letters with numbers
|
||||
for letters, number in soundex_map.items():
|
||||
name = re.sub(f"[{letters}]", number, name)
|
||||
|
||||
# Remove consecutive duplicates
|
||||
name = re.sub(r"(\d)\1+", r"\1", name)
|
||||
|
||||
# Remove zeros
|
||||
name = re.sub("0", "", name)
|
||||
|
||||
# Pad or truncate to 4 characters
|
||||
soundex_code = (first_letter + name + "000")[:4]
|
||||
return soundex_code
|
||||
|
||||
@staticmethod
|
||||
def partial_match_score(query: str, target: str) -> float:
|
||||
"""Calculate partial matching score for substring matches."""
|
||||
query_lower = query.lower()
|
||||
target_lower = target.lower()
|
||||
|
||||
# Exact match
|
||||
if query_lower == target_lower:
|
||||
return 1.0
|
||||
|
||||
# Starts with
|
||||
if target_lower.startswith(query_lower):
|
||||
return 0.8 + (len(query) / len(target)) * 0.15
|
||||
|
||||
# Contains
|
||||
if query_lower in target_lower:
|
||||
return 0.6 + (len(query) / len(target)) * 0.2
|
||||
|
||||
# Words match
|
||||
query_words = set(query_lower.split())
|
||||
target_words = set(target_lower.split())
|
||||
if query_words & target_words:
|
||||
intersection = len(query_words & target_words)
|
||||
union = len(query_words | target_words)
|
||||
return 0.4 + (intersection / union) * 0.3
|
||||
|
||||
return 0.0
|
||||
|
||||
|
||||
class EntityFuzzyMatcher:
|
||||
"""Main fuzzy matching service for entities."""
|
||||
|
||||
# Matching thresholds
|
||||
HIGH_CONFIDENCE_THRESHOLD = 0.8
|
||||
MEDIUM_CONFIDENCE_THRESHOLD = 0.6
|
||||
LOW_CONFIDENCE_THRESHOLD = 0.4
|
||||
|
||||
# Maximum results to consider
|
||||
MAX_CANDIDATES = 50
|
||||
MAX_RESULTS = 5
|
||||
|
||||
def __init__(self):
|
||||
self.algorithms = FuzzyMatchingAlgorithms()
|
||||
|
||||
def find_entity(
|
||||
self, query: str, entity_types: Optional[List[EntityType]] = None, user=None
|
||||
) -> tuple[List[FuzzyMatchResult], Optional[EntitySuggestion]]:
|
||||
"""
|
||||
Find entities matching the query with fuzzy matching.
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
entity_types: Limit search to specific entity types
|
||||
user: Current user for authentication context
|
||||
|
||||
Returns:
|
||||
Tuple of (matches, suggestion_for_new_entity)
|
||||
"""
|
||||
if not query or len(query.strip()) < 2:
|
||||
return [], None
|
||||
|
||||
query = query.strip()
|
||||
entity_types = entity_types or [
|
||||
EntityType.PARK,
|
||||
EntityType.RIDE,
|
||||
EntityType.COMPANY,
|
||||
]
|
||||
|
||||
# Collect all potential matches
|
||||
candidates = []
|
||||
|
||||
for entity_type in entity_types:
|
||||
candidates.extend(self._get_candidates(query, entity_type))
|
||||
|
||||
# Score and rank candidates
|
||||
matches = self._score_and_rank_candidates(query, candidates)
|
||||
|
||||
# Generate suggestion if no good matches found
|
||||
suggestion = None
|
||||
if not matches or matches[0].score < self.LOW_CONFIDENCE_THRESHOLD:
|
||||
suggestion = self._generate_entity_suggestion(query, entity_types, user)
|
||||
|
||||
return matches[: self.MAX_RESULTS], suggestion
|
||||
|
||||
def _get_candidates(
|
||||
self, query: str, entity_type: EntityType
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get potential matching candidates for an entity type."""
|
||||
candidates = []
|
||||
|
||||
if entity_type == EntityType.PARK:
|
||||
parks = Park.objects.filter(
|
||||
Q(name__icontains=query)
|
||||
| Q(slug__icontains=query.lower().replace(" ", "-"))
|
||||
| Q(former_names__icontains=query)
|
||||
)[: self.MAX_CANDIDATES]
|
||||
|
||||
for park in parks:
|
||||
candidates.append(
|
||||
{
|
||||
"entity_type": EntityType.PARK,
|
||||
"entity": park,
|
||||
"name": park.name,
|
||||
"slug": park.slug,
|
||||
"search_names": [park.name],
|
||||
"url": getattr(park, "get_absolute_url", lambda: None)(),
|
||||
"priority_boost": 0.1, # Parks get priority
|
||||
}
|
||||
)
|
||||
|
||||
elif entity_type == EntityType.RIDE:
|
||||
rides = Ride.objects.select_related("park").filter(
|
||||
Q(name__icontains=query)
|
||||
| Q(slug__icontains=query.lower().replace(" ", "-"))
|
||||
| Q(former_names__icontains=query)
|
||||
| Q(park__name__icontains=query)
|
||||
)[: self.MAX_CANDIDATES]
|
||||
|
||||
for ride in rides:
|
||||
candidates.append(
|
||||
{
|
||||
"entity_type": EntityType.RIDE,
|
||||
"entity": ride,
|
||||
"name": ride.name,
|
||||
"slug": ride.slug,
|
||||
"search_names": [ride.name, f"{ride.park.name} {ride.name}"],
|
||||
"url": getattr(ride, "get_absolute_url", lambda: None)(),
|
||||
"priority_boost": 0.05, # Rides get some priority
|
||||
}
|
||||
)
|
||||
|
||||
elif entity_type == EntityType.COMPANY:
|
||||
companies = Company.objects.filter(
|
||||
Q(name__icontains=query)
|
||||
| Q(slug__icontains=query.lower().replace(" ", "-"))
|
||||
)[: self.MAX_CANDIDATES]
|
||||
|
||||
for company in companies:
|
||||
candidates.append(
|
||||
{
|
||||
"entity_type": EntityType.COMPANY,
|
||||
"entity": company,
|
||||
"name": company.name,
|
||||
"slug": company.slug,
|
||||
"search_names": [company.name],
|
||||
"url": getattr(company, "get_absolute_url", lambda: None)(),
|
||||
"priority_boost": 0.0, # Companies get no priority boost
|
||||
}
|
||||
)
|
||||
|
||||
return candidates
|
||||
|
||||
def _score_and_rank_candidates(
|
||||
self, query: str, candidates: List[Dict[str, Any]]
|
||||
) -> List[FuzzyMatchResult]:
|
||||
"""Score and rank all candidates using multiple algorithms."""
|
||||
scored_matches = []
|
||||
|
||||
for candidate in candidates:
|
||||
best_score = 0.0
|
||||
best_reason = ""
|
||||
|
||||
# Test against all search names for this candidate
|
||||
for search_name in candidate["search_names"]:
|
||||
# Algorithm 1: Sequence similarity
|
||||
similarity_score = self.algorithms.similarity_ratio(query, search_name)
|
||||
if similarity_score > best_score:
|
||||
best_score = similarity_score
|
||||
best_reason = f"Text similarity with '{search_name}'"
|
||||
|
||||
# Algorithm 2: Partial matching
|
||||
partial_score = self.algorithms.partial_match_score(query, search_name)
|
||||
if partial_score > best_score:
|
||||
best_score = partial_score
|
||||
best_reason = f"Partial match with '{search_name}'"
|
||||
|
||||
# Algorithm 3: Levenshtein distance
|
||||
if len(query) > 3 and len(search_name) > 3:
|
||||
max_len = max(len(query), len(search_name))
|
||||
distance = self.algorithms.levenshtein_distance(query, search_name)
|
||||
lev_score = 1.0 - (distance / max_len)
|
||||
if lev_score > best_score:
|
||||
best_score = lev_score
|
||||
best_reason = f"Similar spelling to '{search_name}'"
|
||||
|
||||
# Algorithm 4: Soundex phonetic matching
|
||||
if len(query) > 2 and len(search_name) > 2:
|
||||
query_soundex = self.algorithms.soundex(query)
|
||||
name_soundex = self.algorithms.soundex(search_name)
|
||||
if query_soundex == name_soundex and best_score < 0.7:
|
||||
best_score = max(best_score, 0.7)
|
||||
best_reason = f"Sounds like '{search_name}'"
|
||||
|
||||
# Apply priority boost
|
||||
best_score += candidate["priority_boost"]
|
||||
best_score = min(1.0, best_score) # Cap at 1.0
|
||||
|
||||
# Determine confidence level
|
||||
if best_score >= self.HIGH_CONFIDENCE_THRESHOLD:
|
||||
confidence = "high"
|
||||
elif best_score >= self.MEDIUM_CONFIDENCE_THRESHOLD:
|
||||
confidence = "medium"
|
||||
else:
|
||||
confidence = "low"
|
||||
|
||||
# Only include if above minimum threshold
|
||||
if best_score >= self.LOW_CONFIDENCE_THRESHOLD:
|
||||
match = FuzzyMatchResult(
|
||||
entity_type=candidate["entity_type"],
|
||||
entity=candidate["entity"],
|
||||
name=candidate["name"],
|
||||
slug=candidate["slug"],
|
||||
score=best_score,
|
||||
match_reason=best_reason,
|
||||
confidence=confidence,
|
||||
url=candidate["url"],
|
||||
)
|
||||
scored_matches.append(match)
|
||||
|
||||
# Sort by score (highest first) and return
|
||||
return sorted(scored_matches, key=lambda x: x.score, reverse=True)
|
||||
|
||||
def _generate_entity_suggestion(
|
||||
self, query: str, entity_types: List[EntityType], user
|
||||
) -> EntitySuggestion:
|
||||
"""Generate suggestion for creating new entity when no matches found."""
|
||||
|
||||
# Determine most likely entity type based on query characteristics
|
||||
suggested_type = EntityType.PARK # Default to park
|
||||
|
||||
# Simple heuristics for entity type detection
|
||||
query_lower = query.lower()
|
||||
if any(
|
||||
word in query_lower
|
||||
for word in ["roller coaster", "ride", "coaster", "attraction"]
|
||||
):
|
||||
suggested_type = EntityType.RIDE
|
||||
elif any(
|
||||
word in query_lower for word in ["inc", "corp", "company", "manufacturer"]
|
||||
):
|
||||
suggested_type = EntityType.COMPANY
|
||||
elif EntityType.PARK in entity_types:
|
||||
suggested_type = EntityType.PARK
|
||||
elif entity_types:
|
||||
suggested_type = entity_types[0]
|
||||
|
||||
# Clean up the suggested name
|
||||
suggested_name = " ".join(word.capitalize() for word in query.split())
|
||||
|
||||
# Check if user is authenticated
|
||||
is_authenticated = (
|
||||
user and hasattr(user, "is_authenticated") and user.is_authenticated
|
||||
)
|
||||
|
||||
# Generate appropriate prompts
|
||||
entity_name = suggested_type.value
|
||||
login_prompt = (
|
||||
f"Log in to suggest adding '{suggested_name}' as a new {entity_name}"
|
||||
)
|
||||
signup_prompt = (
|
||||
f"Sign up to contribute and add '{suggested_name}' to ThrillWiki"
|
||||
)
|
||||
creation_hint = (
|
||||
f"Help expand ThrillWiki by adding information about '{suggested_name}'"
|
||||
)
|
||||
|
||||
return EntitySuggestion(
|
||||
suggested_name=suggested_name,
|
||||
entity_type=suggested_type,
|
||||
requires_authentication=not is_authenticated,
|
||||
login_prompt=login_prompt,
|
||||
signup_prompt=signup_prompt,
|
||||
creation_hint=creation_hint,
|
||||
)
|
||||
|
||||
|
||||
# Global service instance
|
||||
entity_fuzzy_matcher = EntityFuzzyMatcher()
|
||||
@@ -1,409 +0,0 @@
|
||||
"""
|
||||
Location adapters for converting between domain-specific models and UnifiedLocation.
|
||||
"""
|
||||
|
||||
from typing import List, Optional
|
||||
from django.db.models import QuerySet
|
||||
from django.urls import reverse
|
||||
|
||||
from .data_structures import (
|
||||
UnifiedLocation,
|
||||
LocationType,
|
||||
GeoBounds,
|
||||
MapFilters,
|
||||
)
|
||||
from apps.parks.models import ParkLocation, CompanyHeadquarters
|
||||
from apps.rides.models import RideLocation
|
||||
|
||||
|
||||
class BaseLocationAdapter:
|
||||
"""Base adapter class for location conversions."""
|
||||
|
||||
def to_unified_location(self, location_obj) -> Optional[UnifiedLocation]:
|
||||
"""Convert model instance to UnifiedLocation."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_queryset(
|
||||
self,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
filters: Optional[MapFilters] = None,
|
||||
) -> QuerySet:
|
||||
"""Get optimized queryset for this location type."""
|
||||
raise NotImplementedError
|
||||
|
||||
def bulk_convert(self, queryset: QuerySet) -> List[UnifiedLocation]:
|
||||
"""Convert multiple location objects efficiently."""
|
||||
unified_locations = []
|
||||
for obj in queryset:
|
||||
unified_loc = self.to_unified_location(obj)
|
||||
if unified_loc:
|
||||
unified_locations.append(unified_loc)
|
||||
return unified_locations
|
||||
|
||||
|
||||
class ParkLocationAdapter(BaseLocationAdapter):
|
||||
"""Converts Park/ParkLocation to UnifiedLocation."""
|
||||
|
||||
def to_unified_location(
|
||||
self, location_obj: ParkLocation
|
||||
) -> Optional[UnifiedLocation]:
|
||||
"""Convert ParkLocation to UnifiedLocation."""
|
||||
if (
|
||||
not location_obj.point
|
||||
or location_obj.latitude is None
|
||||
or location_obj.longitude is None
|
||||
):
|
||||
return None
|
||||
|
||||
park = location_obj.park
|
||||
|
||||
return UnifiedLocation(
|
||||
id=f"park_{park.id}",
|
||||
type=LocationType.PARK,
|
||||
name=park.name,
|
||||
coordinates=[float(location_obj.latitude), float(location_obj.longitude)],
|
||||
address=location_obj.formatted_address,
|
||||
metadata={
|
||||
"status": getattr(park, "status", "UNKNOWN"),
|
||||
"rating": (
|
||||
float(park.average_rating)
|
||||
if hasattr(park, "average_rating") and park.average_rating
|
||||
else None
|
||||
),
|
||||
"ride_count": getattr(park, "ride_count", 0),
|
||||
"coaster_count": getattr(park, "coaster_count", 0),
|
||||
"operator": (
|
||||
park.operator.name
|
||||
if hasattr(park, "operator") and park.operator
|
||||
else None
|
||||
),
|
||||
"city": location_obj.city,
|
||||
"state": location_obj.state,
|
||||
"country": location_obj.country,
|
||||
},
|
||||
type_data={
|
||||
"slug": park.slug,
|
||||
"opening_date": (
|
||||
park.opening_date.isoformat()
|
||||
if hasattr(park, "opening_date") and park.opening_date
|
||||
else None
|
||||
),
|
||||
"website": getattr(park, "website", ""),
|
||||
"operating_season": getattr(park, "operating_season", ""),
|
||||
"highway_exit": location_obj.highway_exit,
|
||||
"parking_notes": location_obj.parking_notes,
|
||||
"best_arrival_time": (
|
||||
location_obj.best_arrival_time.strftime("%H:%M")
|
||||
if location_obj.best_arrival_time
|
||||
else None
|
||||
),
|
||||
"seasonal_notes": location_obj.seasonal_notes,
|
||||
"url": self._get_park_url(park),
|
||||
},
|
||||
cluster_weight=self._calculate_park_weight(park),
|
||||
cluster_category=self._get_park_category(park),
|
||||
)
|
||||
|
||||
def get_queryset(
|
||||
self,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
filters: Optional[MapFilters] = None,
|
||||
) -> QuerySet:
|
||||
"""Get optimized queryset for park locations."""
|
||||
queryset = ParkLocation.objects.select_related("park", "park__operator").filter(
|
||||
point__isnull=False
|
||||
)
|
||||
|
||||
# Spatial filtering
|
||||
if bounds:
|
||||
queryset = queryset.filter(point__within=bounds.to_polygon())
|
||||
|
||||
# Park-specific filters
|
||||
if filters:
|
||||
if filters.park_status:
|
||||
queryset = queryset.filter(park__status__in=filters.park_status)
|
||||
if filters.search_query:
|
||||
queryset = queryset.filter(park__name__icontains=filters.search_query)
|
||||
if filters.country:
|
||||
queryset = queryset.filter(country=filters.country)
|
||||
if filters.state:
|
||||
queryset = queryset.filter(state=filters.state)
|
||||
if filters.city:
|
||||
queryset = queryset.filter(city=filters.city)
|
||||
|
||||
return queryset.order_by("park__name")
|
||||
|
||||
def _calculate_park_weight(self, park) -> int:
|
||||
"""Calculate clustering weight based on park importance."""
|
||||
weight = 1
|
||||
if hasattr(park, "ride_count") and park.ride_count and park.ride_count > 20:
|
||||
weight += 2
|
||||
if (
|
||||
hasattr(park, "coaster_count")
|
||||
and park.coaster_count
|
||||
and park.coaster_count > 5
|
||||
):
|
||||
weight += 1
|
||||
if (
|
||||
hasattr(park, "average_rating")
|
||||
and park.average_rating
|
||||
and park.average_rating > 4.0
|
||||
):
|
||||
weight += 1
|
||||
return min(weight, 5) # Cap at 5
|
||||
|
||||
def _get_park_category(self, park) -> str:
|
||||
"""Determine park category for clustering."""
|
||||
coaster_count = getattr(park, "coaster_count", 0) or 0
|
||||
ride_count = getattr(park, "ride_count", 0) or 0
|
||||
|
||||
if coaster_count >= 10:
|
||||
return "major_park"
|
||||
elif ride_count >= 15:
|
||||
return "theme_park"
|
||||
else:
|
||||
return "small_park"
|
||||
|
||||
def _get_park_url(self, park) -> str:
|
||||
"""Get URL for park detail page."""
|
||||
try:
|
||||
return reverse("parks:detail", kwargs={"slug": park.slug})
|
||||
except BaseException:
|
||||
return f"/parks/{park.slug}/"
|
||||
|
||||
|
||||
class RideLocationAdapter(BaseLocationAdapter):
|
||||
"""Converts Ride/RideLocation to UnifiedLocation."""
|
||||
|
||||
def to_unified_location(
|
||||
self, location_obj: RideLocation
|
||||
) -> Optional[UnifiedLocation]:
|
||||
"""Convert RideLocation to UnifiedLocation."""
|
||||
if (
|
||||
not location_obj.point
|
||||
or location_obj.latitude is None
|
||||
or location_obj.longitude is None
|
||||
):
|
||||
return None
|
||||
|
||||
ride = location_obj.ride
|
||||
|
||||
return UnifiedLocation(
|
||||
id=f"ride_{ride.id}",
|
||||
type=LocationType.RIDE,
|
||||
name=ride.name,
|
||||
coordinates=[float(location_obj.latitude), float(location_obj.longitude)],
|
||||
address=(
|
||||
f"{location_obj.park_area}, {ride.park.name}"
|
||||
if location_obj.park_area
|
||||
else ride.park.name
|
||||
),
|
||||
metadata={
|
||||
"park_id": ride.park.id,
|
||||
"park_name": ride.park.name,
|
||||
"park_area": location_obj.park_area,
|
||||
"ride_type": getattr(ride, "ride_type", "Unknown"),
|
||||
"status": getattr(ride, "status", "UNKNOWN"),
|
||||
"rating": (
|
||||
float(ride.average_rating)
|
||||
if hasattr(ride, "average_rating") and ride.average_rating
|
||||
else None
|
||||
),
|
||||
"manufacturer": (
|
||||
getattr(ride, "manufacturer", {}).get("name")
|
||||
if hasattr(ride, "manufacturer")
|
||||
else None
|
||||
),
|
||||
},
|
||||
type_data={
|
||||
"slug": ride.slug,
|
||||
"opening_date": (
|
||||
ride.opening_date.isoformat()
|
||||
if hasattr(ride, "opening_date") and ride.opening_date
|
||||
else None
|
||||
),
|
||||
"height_requirement": getattr(ride, "height_requirement", ""),
|
||||
"duration_minutes": getattr(ride, "duration_minutes", None),
|
||||
"max_speed_mph": getattr(ride, "max_speed_mph", None),
|
||||
"entrance_notes": location_obj.entrance_notes,
|
||||
"accessibility_notes": location_obj.accessibility_notes,
|
||||
"url": self._get_ride_url(ride),
|
||||
},
|
||||
cluster_weight=self._calculate_ride_weight(ride),
|
||||
cluster_category=self._get_ride_category(ride),
|
||||
)
|
||||
|
||||
def get_queryset(
|
||||
self,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
filters: Optional[MapFilters] = None,
|
||||
) -> QuerySet:
|
||||
"""Get optimized queryset for ride locations."""
|
||||
queryset = RideLocation.objects.select_related(
|
||||
"ride", "ride__park", "ride__park__operator"
|
||||
).filter(point__isnull=False)
|
||||
|
||||
# Spatial filtering
|
||||
if bounds:
|
||||
queryset = queryset.filter(point__within=bounds.to_polygon())
|
||||
|
||||
# Ride-specific filters
|
||||
if filters:
|
||||
if filters.ride_types:
|
||||
queryset = queryset.filter(ride__ride_type__in=filters.ride_types)
|
||||
if filters.search_query:
|
||||
queryset = queryset.filter(ride__name__icontains=filters.search_query)
|
||||
|
||||
return queryset.order_by("ride__name")
|
||||
|
||||
def _calculate_ride_weight(self, ride) -> int:
|
||||
"""Calculate clustering weight based on ride importance."""
|
||||
weight = 1
|
||||
ride_type = getattr(ride, "ride_type", "").lower()
|
||||
if "coaster" in ride_type or "roller" in ride_type:
|
||||
weight += 1
|
||||
if (
|
||||
hasattr(ride, "average_rating")
|
||||
and ride.average_rating
|
||||
and ride.average_rating > 4.0
|
||||
):
|
||||
weight += 1
|
||||
return min(weight, 3) # Cap at 3 for rides
|
||||
|
||||
def _get_ride_category(self, ride) -> str:
|
||||
"""Determine ride category for clustering."""
|
||||
ride_type = getattr(ride, "ride_type", "").lower()
|
||||
if "coaster" in ride_type or "roller" in ride_type:
|
||||
return "coaster"
|
||||
elif "water" in ride_type or "splash" in ride_type:
|
||||
return "water_ride"
|
||||
else:
|
||||
return "other_ride"
|
||||
|
||||
def _get_ride_url(self, ride) -> str:
|
||||
"""Get URL for ride detail page."""
|
||||
try:
|
||||
return reverse("rides:detail", kwargs={"slug": ride.slug})
|
||||
except BaseException:
|
||||
return f"/rides/{ride.slug}/"
|
||||
|
||||
|
||||
class CompanyLocationAdapter(BaseLocationAdapter):
|
||||
"""Converts Company/CompanyHeadquarters to UnifiedLocation."""
|
||||
|
||||
def to_unified_location(
|
||||
self, location_obj: CompanyHeadquarters
|
||||
) -> Optional[UnifiedLocation]:
|
||||
"""Convert CompanyHeadquarters to UnifiedLocation."""
|
||||
# Note: CompanyHeadquarters doesn't have coordinates, so we need to geocode
|
||||
# For now, we'll skip companies without coordinates
|
||||
# TODO: Implement geocoding service integration
|
||||
return None
|
||||
|
||||
def get_queryset(
|
||||
self,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
filters: Optional[MapFilters] = None,
|
||||
) -> QuerySet:
|
||||
"""Get optimized queryset for company locations."""
|
||||
queryset = CompanyHeadquarters.objects.select_related("company")
|
||||
|
||||
# Company-specific filters
|
||||
if filters:
|
||||
if filters.company_roles:
|
||||
queryset = queryset.filter(
|
||||
company__roles__overlap=filters.company_roles
|
||||
)
|
||||
if filters.search_query:
|
||||
queryset = queryset.filter(
|
||||
company__name__icontains=filters.search_query
|
||||
)
|
||||
if filters.country:
|
||||
queryset = queryset.filter(country=filters.country)
|
||||
if filters.city:
|
||||
queryset = queryset.filter(city=filters.city)
|
||||
|
||||
return queryset.order_by("company__name")
|
||||
|
||||
|
||||
# GenericLocationAdapter removed - generic location app is being deprecated
|
||||
# All location functionality moved to domain-specific models (ParkLocation, RideLocation, etc.)
|
||||
|
||||
|
||||
class LocationAbstractionLayer:
|
||||
"""
|
||||
Abstraction layer handling different location model types.
|
||||
Implements the adapter pattern to provide unified access to all location types.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.adapters = {
|
||||
LocationType.PARK: ParkLocationAdapter(),
|
||||
LocationType.RIDE: RideLocationAdapter(),
|
||||
LocationType.COMPANY: CompanyLocationAdapter(),
|
||||
# LocationType.GENERIC: Removed - generic location app deprecated
|
||||
}
|
||||
|
||||
def get_all_locations(
|
||||
self,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
filters: Optional[MapFilters] = None,
|
||||
) -> List[UnifiedLocation]:
|
||||
"""Get locations from all sources within bounds."""
|
||||
all_locations = []
|
||||
|
||||
# Determine which location types to include
|
||||
location_types = (
|
||||
filters.location_types
|
||||
if filters and filters.location_types
|
||||
else set(LocationType)
|
||||
)
|
||||
|
||||
for location_type in location_types:
|
||||
adapter = self.adapters[location_type]
|
||||
queryset = adapter.get_queryset(bounds, filters)
|
||||
locations = adapter.bulk_convert(queryset)
|
||||
all_locations.extend(locations)
|
||||
|
||||
return all_locations
|
||||
|
||||
def get_locations_by_type(
|
||||
self,
|
||||
location_type: LocationType,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
filters: Optional[MapFilters] = None,
|
||||
) -> List[UnifiedLocation]:
|
||||
"""Get locations of specific type."""
|
||||
adapter = self.adapters[location_type]
|
||||
queryset = adapter.get_queryset(bounds, filters)
|
||||
return adapter.bulk_convert(queryset)
|
||||
|
||||
def get_location_by_id(
|
||||
self, location_type: LocationType, location_id: int
|
||||
) -> Optional[UnifiedLocation]:
|
||||
"""Get single location with full details."""
|
||||
adapter = self.adapters[location_type]
|
||||
|
||||
try:
|
||||
if location_type == LocationType.PARK:
|
||||
obj = ParkLocation.objects.select_related("park", "park__operator").get(
|
||||
park_id=location_id
|
||||
)
|
||||
elif location_type == LocationType.RIDE:
|
||||
obj = RideLocation.objects.select_related("ride", "ride__park").get(
|
||||
ride_id=location_id
|
||||
)
|
||||
elif location_type == LocationType.COMPANY:
|
||||
obj = CompanyHeadquarters.objects.select_related("company").get(
|
||||
company_id=location_id
|
||||
)
|
||||
# LocationType.GENERIC removed - generic location app deprecated
|
||||
else:
|
||||
return None
|
||||
|
||||
return adapter.to_unified_location(obj)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
# Import models after defining adapters to avoid circular imports
|
||||
@@ -1,462 +0,0 @@
|
||||
"""
|
||||
Location-aware search service for ThrillWiki.
|
||||
|
||||
Integrates PostGIS location data with existing search functionality
|
||||
to provide proximity-based search, location filtering, and geographic
|
||||
search capabilities.
|
||||
"""
|
||||
|
||||
from django.contrib.gis.geos import Point
|
||||
from django.contrib.gis.measure import Distance
|
||||
from django.db.models import Q
|
||||
from typing import Optional, List, Dict, Any, Set
|
||||
from dataclasses import dataclass
|
||||
|
||||
from apps.parks.models import Park, Company, ParkLocation
|
||||
from apps.rides.models import Ride
|
||||
|
||||
|
||||
@dataclass
|
||||
class LocationSearchFilters:
|
||||
"""Filters for location-aware search queries."""
|
||||
|
||||
# Text search
|
||||
search_query: Optional[str] = None
|
||||
|
||||
# Location-based filters
|
||||
location_point: Optional[Point] = None
|
||||
radius_km: Optional[float] = None
|
||||
location_types: Optional[Set[str]] = None # 'park', 'ride', 'company'
|
||||
|
||||
# Geographic filters
|
||||
country: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
city: Optional[str] = None
|
||||
|
||||
# Content-specific filters
|
||||
park_status: Optional[List[str]] = None
|
||||
ride_types: Optional[List[str]] = None
|
||||
company_roles: Optional[List[str]] = None
|
||||
|
||||
# Result options
|
||||
include_distance: bool = True
|
||||
max_results: int = 100
|
||||
|
||||
|
||||
@dataclass
|
||||
class LocationSearchResult:
|
||||
"""Single search result with location data."""
|
||||
|
||||
# Core data
|
||||
content_type: str # 'park', 'ride', 'company'
|
||||
object_id: int
|
||||
name: str
|
||||
description: Optional[str] = None
|
||||
url: Optional[str] = None
|
||||
|
||||
# Location data
|
||||
latitude: Optional[float] = None
|
||||
longitude: Optional[float] = None
|
||||
address: Optional[str] = None
|
||||
city: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
country: Optional[str] = None
|
||||
|
||||
# Distance data (if proximity search)
|
||||
distance_km: Optional[float] = None
|
||||
|
||||
# Additional metadata
|
||||
status: Optional[str] = None
|
||||
tags: Optional[List[str]] = None
|
||||
rating: Optional[float] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for JSON serialization."""
|
||||
return {
|
||||
"content_type": self.content_type,
|
||||
"object_id": self.object_id,
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"url": self.url,
|
||||
"location": {
|
||||
"latitude": self.latitude,
|
||||
"longitude": self.longitude,
|
||||
"address": self.address,
|
||||
"city": self.city,
|
||||
"state": self.state,
|
||||
"country": self.country,
|
||||
},
|
||||
"distance_km": self.distance_km,
|
||||
"status": self.status,
|
||||
"tags": self.tags or [],
|
||||
"rating": self.rating,
|
||||
}
|
||||
|
||||
|
||||
class LocationSearchService:
|
||||
"""Service for performing location-aware searches across ThrillWiki content."""
|
||||
|
||||
def search(self, filters: LocationSearchFilters) -> List[LocationSearchResult]:
|
||||
"""
|
||||
Perform a comprehensive location-aware search.
|
||||
|
||||
Args:
|
||||
filters: Search filters and options
|
||||
|
||||
Returns:
|
||||
List of search results with location data
|
||||
"""
|
||||
results = []
|
||||
|
||||
# Search each content type based on filters
|
||||
if not filters.location_types or "park" in filters.location_types:
|
||||
results.extend(self._search_parks(filters))
|
||||
|
||||
if not filters.location_types or "ride" in filters.location_types:
|
||||
results.extend(self._search_rides(filters))
|
||||
|
||||
if not filters.location_types or "company" in filters.location_types:
|
||||
results.extend(self._search_companies(filters))
|
||||
|
||||
# Sort by distance if proximity search, otherwise by relevance
|
||||
if filters.location_point and filters.include_distance:
|
||||
results.sort(key=lambda x: x.distance_km or float("inf"))
|
||||
else:
|
||||
results.sort(key=lambda x: x.name.lower())
|
||||
|
||||
# Apply max results limit
|
||||
return results[: filters.max_results]
|
||||
|
||||
def _search_parks(
|
||||
self, filters: LocationSearchFilters
|
||||
) -> List[LocationSearchResult]:
|
||||
"""Search parks with location data."""
|
||||
queryset = Park.objects.select_related("location", "operator").all()
|
||||
|
||||
# Apply location filters
|
||||
queryset = self._apply_location_filters(queryset, filters, "location__point")
|
||||
|
||||
# Apply text search
|
||||
if filters.search_query:
|
||||
query = (
|
||||
Q(name__icontains=filters.search_query)
|
||||
| Q(description__icontains=filters.search_query)
|
||||
| Q(location__city__icontains=filters.search_query)
|
||||
| Q(location__state__icontains=filters.search_query)
|
||||
| Q(location__country__icontains=filters.search_query)
|
||||
)
|
||||
queryset = queryset.filter(query)
|
||||
|
||||
# Apply park-specific filters
|
||||
if filters.park_status:
|
||||
queryset = queryset.filter(status__in=filters.park_status)
|
||||
|
||||
# Add distance annotation if proximity search
|
||||
if filters.location_point and filters.include_distance:
|
||||
queryset = queryset.annotate(
|
||||
distance=Distance("location__point", filters.location_point)
|
||||
).order_by("distance")
|
||||
|
||||
# Convert to search results
|
||||
results = []
|
||||
for park in queryset:
|
||||
result = LocationSearchResult(
|
||||
content_type="park",
|
||||
object_id=park.id,
|
||||
name=park.name,
|
||||
description=park.description,
|
||||
url=(
|
||||
park.get_absolute_url()
|
||||
if hasattr(park, "get_absolute_url")
|
||||
else None
|
||||
),
|
||||
status=park.get_status_display(),
|
||||
rating=(float(park.average_rating) if park.average_rating else None),
|
||||
tags=["park", park.status.lower()],
|
||||
)
|
||||
|
||||
# Add location data
|
||||
if hasattr(park, "location") and park.location:
|
||||
location = park.location
|
||||
result.latitude = location.latitude
|
||||
result.longitude = location.longitude
|
||||
result.address = location.formatted_address
|
||||
result.city = location.city
|
||||
result.state = location.state
|
||||
result.country = location.country
|
||||
|
||||
# Add distance if proximity search
|
||||
if (
|
||||
filters.location_point
|
||||
and filters.include_distance
|
||||
and hasattr(park, "distance")
|
||||
):
|
||||
result.distance_km = float(park.distance.km)
|
||||
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
def _search_rides(
|
||||
self, filters: LocationSearchFilters
|
||||
) -> List[LocationSearchResult]:
|
||||
"""Search rides with location data."""
|
||||
queryset = Ride.objects.select_related("park", "location").all()
|
||||
|
||||
# Apply location filters
|
||||
queryset = self._apply_location_filters(queryset, filters, "location__point")
|
||||
|
||||
# Apply text search
|
||||
if filters.search_query:
|
||||
query = (
|
||||
Q(name__icontains=filters.search_query)
|
||||
| Q(description__icontains=filters.search_query)
|
||||
| Q(park__name__icontains=filters.search_query)
|
||||
| Q(location__park_area__icontains=filters.search_query)
|
||||
)
|
||||
queryset = queryset.filter(query)
|
||||
|
||||
# Apply ride-specific filters
|
||||
if filters.ride_types:
|
||||
queryset = queryset.filter(ride_type__in=filters.ride_types)
|
||||
|
||||
# Add distance annotation if proximity search
|
||||
if filters.location_point and filters.include_distance:
|
||||
queryset = queryset.annotate(
|
||||
distance=Distance("location__point", filters.location_point)
|
||||
).order_by("distance")
|
||||
|
||||
# Convert to search results
|
||||
results = []
|
||||
for ride in queryset:
|
||||
result = LocationSearchResult(
|
||||
content_type="ride",
|
||||
object_id=ride.id,
|
||||
name=ride.name,
|
||||
description=ride.description,
|
||||
url=(
|
||||
ride.get_absolute_url()
|
||||
if hasattr(ride, "get_absolute_url")
|
||||
else None
|
||||
),
|
||||
status=ride.status,
|
||||
tags=[
|
||||
"ride",
|
||||
ride.ride_type.lower() if ride.ride_type else "attraction",
|
||||
],
|
||||
)
|
||||
|
||||
# Add location data from ride location or park location
|
||||
location = None
|
||||
if hasattr(ride, "location") and ride.location:
|
||||
location = ride.location
|
||||
result.latitude = location.latitude
|
||||
result.longitude = location.longitude
|
||||
result.address = (
|
||||
f"{ride.park.name} - {location.park_area}"
|
||||
if location.park_area
|
||||
else ride.park.name
|
||||
)
|
||||
|
||||
# Add distance if proximity search
|
||||
if (
|
||||
filters.location_point
|
||||
and filters.include_distance
|
||||
and hasattr(ride, "distance")
|
||||
):
|
||||
result.distance_km = float(ride.distance.km)
|
||||
|
||||
# Fall back to park location if no specific ride location
|
||||
elif ride.park and hasattr(ride.park, "location") and ride.park.location:
|
||||
park_location = ride.park.location
|
||||
result.latitude = park_location.latitude
|
||||
result.longitude = park_location.longitude
|
||||
result.address = park_location.formatted_address
|
||||
result.city = park_location.city
|
||||
result.state = park_location.state
|
||||
result.country = park_location.country
|
||||
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
def _search_companies(
|
||||
self, filters: LocationSearchFilters
|
||||
) -> List[LocationSearchResult]:
|
||||
"""Search companies with headquarters location data."""
|
||||
queryset = Company.objects.select_related("headquarters").all()
|
||||
|
||||
# Apply location filters
|
||||
queryset = self._apply_location_filters(
|
||||
queryset, filters, "headquarters__point"
|
||||
)
|
||||
|
||||
# Apply text search
|
||||
if filters.search_query:
|
||||
query = (
|
||||
Q(name__icontains=filters.search_query)
|
||||
| Q(description__icontains=filters.search_query)
|
||||
| Q(headquarters__city__icontains=filters.search_query)
|
||||
| Q(headquarters__state_province__icontains=filters.search_query)
|
||||
| Q(headquarters__country__icontains=filters.search_query)
|
||||
)
|
||||
queryset = queryset.filter(query)
|
||||
|
||||
# Apply company-specific filters
|
||||
if filters.company_roles:
|
||||
queryset = queryset.filter(roles__overlap=filters.company_roles)
|
||||
|
||||
# Add distance annotation if proximity search
|
||||
if filters.location_point and filters.include_distance:
|
||||
queryset = queryset.annotate(
|
||||
distance=Distance("headquarters__point", filters.location_point)
|
||||
).order_by("distance")
|
||||
|
||||
# Convert to search results
|
||||
results = []
|
||||
for company in queryset:
|
||||
result = LocationSearchResult(
|
||||
content_type="company",
|
||||
object_id=company.id,
|
||||
name=company.name,
|
||||
description=company.description,
|
||||
url=(
|
||||
company.get_absolute_url()
|
||||
if hasattr(company, "get_absolute_url")
|
||||
else None
|
||||
),
|
||||
tags=["company"] + (company.roles or []),
|
||||
)
|
||||
|
||||
# Add location data
|
||||
if hasattr(company, "headquarters") and company.headquarters:
|
||||
hq = company.headquarters
|
||||
result.latitude = hq.latitude
|
||||
result.longitude = hq.longitude
|
||||
result.address = hq.formatted_address
|
||||
result.city = hq.city
|
||||
result.state = hq.state_province
|
||||
result.country = hq.country
|
||||
|
||||
# Add distance if proximity search
|
||||
if (
|
||||
filters.location_point
|
||||
and filters.include_distance
|
||||
and hasattr(company, "distance")
|
||||
):
|
||||
result.distance_km = float(company.distance.km)
|
||||
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
def _apply_location_filters(
|
||||
self, queryset, filters: LocationSearchFilters, point_field: str
|
||||
):
|
||||
"""Apply common location filters to a queryset."""
|
||||
|
||||
# Proximity filter
|
||||
if filters.location_point and filters.radius_km:
|
||||
distance = Distance(km=filters.radius_km)
|
||||
queryset = queryset.filter(
|
||||
**{
|
||||
f"{point_field}__distance_lte": (
|
||||
filters.location_point,
|
||||
distance,
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
# Geographic filters - adjust field names based on model
|
||||
if filters.country:
|
||||
if "headquarters" in point_field:
|
||||
queryset = queryset.filter(
|
||||
headquarters__country__icontains=filters.country
|
||||
)
|
||||
else:
|
||||
location_field = point_field.split("__")[0]
|
||||
queryset = queryset.filter(
|
||||
**{f"{location_field}__country__icontains": filters.country}
|
||||
)
|
||||
|
||||
if filters.state:
|
||||
if "headquarters" in point_field:
|
||||
queryset = queryset.filter(
|
||||
headquarters__state_province__icontains=filters.state
|
||||
)
|
||||
else:
|
||||
location_field = point_field.split("__")[0]
|
||||
queryset = queryset.filter(
|
||||
**{f"{location_field}__state__icontains": filters.state}
|
||||
)
|
||||
|
||||
if filters.city:
|
||||
location_field = point_field.split("__")[0]
|
||||
queryset = queryset.filter(
|
||||
**{f"{location_field}__city__icontains": filters.city}
|
||||
)
|
||||
|
||||
return queryset
|
||||
|
||||
def suggest_locations(self, query: str, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get location suggestions for autocomplete.
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
limit: Maximum number of suggestions
|
||||
|
||||
Returns:
|
||||
List of location suggestions
|
||||
"""
|
||||
suggestions = []
|
||||
|
||||
if len(query) < 2:
|
||||
return suggestions
|
||||
|
||||
# Get park location suggestions
|
||||
park_locations = ParkLocation.objects.filter(
|
||||
Q(park__name__icontains=query)
|
||||
| Q(city__icontains=query)
|
||||
| Q(state__icontains=query)
|
||||
).select_related("park")[: limit // 3]
|
||||
|
||||
for location in park_locations:
|
||||
suggestions.append(
|
||||
{
|
||||
"type": "park",
|
||||
"name": location.park.name,
|
||||
"address": location.formatted_address,
|
||||
"coordinates": location.coordinates,
|
||||
"url": (
|
||||
location.park.get_absolute_url()
|
||||
if hasattr(location.park, "get_absolute_url")
|
||||
else None
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
# Get city suggestions
|
||||
cities = (
|
||||
ParkLocation.objects.filter(city__icontains=query)
|
||||
.values("city", "state", "country")
|
||||
.distinct()[: limit // 3]
|
||||
)
|
||||
|
||||
for city_data in cities:
|
||||
suggestions.append(
|
||||
{
|
||||
"type": "city",
|
||||
"name": f"{city_data['city']}, {city_data['state']}",
|
||||
"address": f"{city_data['city']}, {city_data['state']}, {
|
||||
city_data['country']
|
||||
}",
|
||||
"coordinates": None,
|
||||
}
|
||||
)
|
||||
|
||||
return suggestions[:limit]
|
||||
|
||||
|
||||
# Global instance
|
||||
location_search_service = LocationSearchService()
|
||||
@@ -1,434 +0,0 @@
|
||||
"""
|
||||
Caching service for map data to improve performance and reduce database load.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import time
|
||||
from typing import Dict, List, Optional, Any
|
||||
|
||||
from django.core.cache import cache
|
||||
from django.utils import timezone
|
||||
|
||||
from .data_structures import (
|
||||
UnifiedLocation,
|
||||
ClusterData,
|
||||
GeoBounds,
|
||||
MapFilters,
|
||||
MapResponse,
|
||||
QueryPerformanceMetrics,
|
||||
)
|
||||
|
||||
|
||||
class MapCacheService:
|
||||
"""
|
||||
Handles caching of map data with geographic partitioning and intelligent invalidation.
|
||||
"""
|
||||
|
||||
# Cache configuration
|
||||
DEFAULT_TTL = 3600 # 1 hour
|
||||
CLUSTER_TTL = 7200 # 2 hours (clusters change less frequently)
|
||||
LOCATION_DETAIL_TTL = 1800 # 30 minutes
|
||||
BOUNDS_CACHE_TTL = 1800 # 30 minutes
|
||||
|
||||
# Cache key prefixes
|
||||
CACHE_PREFIX = "thrillwiki_map"
|
||||
LOCATIONS_PREFIX = f"{CACHE_PREFIX}:locations"
|
||||
CLUSTERS_PREFIX = f"{CACHE_PREFIX}:clusters"
|
||||
BOUNDS_PREFIX = f"{CACHE_PREFIX}:bounds"
|
||||
DETAIL_PREFIX = f"{CACHE_PREFIX}:detail"
|
||||
STATS_PREFIX = f"{CACHE_PREFIX}:stats"
|
||||
|
||||
# Geographic partitioning settings
|
||||
GEOHASH_PRECISION = 6 # ~1.2km precision for cache partitioning
|
||||
|
||||
def __init__(self):
|
||||
self.cache_stats = {
|
||||
"hits": 0,
|
||||
"misses": 0,
|
||||
"invalidations": 0,
|
||||
"geohash_partitions": 0,
|
||||
}
|
||||
|
||||
def get_locations_cache_key(
|
||||
self,
|
||||
bounds: Optional[GeoBounds],
|
||||
filters: Optional[MapFilters],
|
||||
zoom_level: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Generate cache key for location queries."""
|
||||
key_parts = [self.LOCATIONS_PREFIX]
|
||||
|
||||
if bounds:
|
||||
# Use geohash for spatial locality
|
||||
geohash = self._bounds_to_geohash(bounds)
|
||||
key_parts.append(f"geo:{geohash}")
|
||||
|
||||
if filters:
|
||||
# Create deterministic hash of filters
|
||||
filter_hash = self._hash_filters(filters)
|
||||
key_parts.append(f"filters:{filter_hash}")
|
||||
|
||||
if zoom_level is not None:
|
||||
key_parts.append(f"zoom:{zoom_level}")
|
||||
|
||||
return ":".join(key_parts)
|
||||
|
||||
def get_clusters_cache_key(
|
||||
self,
|
||||
bounds: Optional[GeoBounds],
|
||||
filters: Optional[MapFilters],
|
||||
zoom_level: int,
|
||||
) -> str:
|
||||
"""Generate cache key for cluster queries."""
|
||||
key_parts = [self.CLUSTERS_PREFIX, f"zoom:{zoom_level}"]
|
||||
|
||||
if bounds:
|
||||
geohash = self._bounds_to_geohash(bounds)
|
||||
key_parts.append(f"geo:{geohash}")
|
||||
|
||||
if filters:
|
||||
filter_hash = self._hash_filters(filters)
|
||||
key_parts.append(f"filters:{filter_hash}")
|
||||
|
||||
return ":".join(key_parts)
|
||||
|
||||
def get_location_detail_cache_key(
|
||||
self, location_type: str, location_id: int
|
||||
) -> str:
|
||||
"""Generate cache key for individual location details."""
|
||||
return f"{self.DETAIL_PREFIX}:{location_type}:{location_id}"
|
||||
|
||||
def cache_locations(
|
||||
self,
|
||||
cache_key: str,
|
||||
locations: List[UnifiedLocation],
|
||||
ttl: Optional[int] = None,
|
||||
) -> None:
|
||||
"""Cache location data."""
|
||||
try:
|
||||
# Convert locations to serializable format
|
||||
cache_data = {
|
||||
"locations": [loc.to_dict() for loc in locations],
|
||||
"cached_at": timezone.now().isoformat(),
|
||||
"count": len(locations),
|
||||
}
|
||||
|
||||
cache.set(cache_key, cache_data, ttl or self.DEFAULT_TTL)
|
||||
except Exception as e:
|
||||
# Log error but don't fail the request
|
||||
print(f"Cache write error for key {cache_key}: {e}")
|
||||
|
||||
def cache_clusters(
|
||||
self,
|
||||
cache_key: str,
|
||||
clusters: List[ClusterData],
|
||||
ttl: Optional[int] = None,
|
||||
) -> None:
|
||||
"""Cache cluster data."""
|
||||
try:
|
||||
cache_data = {
|
||||
"clusters": [cluster.to_dict() for cluster in clusters],
|
||||
"cached_at": timezone.now().isoformat(),
|
||||
"count": len(clusters),
|
||||
}
|
||||
|
||||
cache.set(cache_key, cache_data, ttl or self.CLUSTER_TTL)
|
||||
except Exception as e:
|
||||
print(f"Cache write error for clusters {cache_key}: {e}")
|
||||
|
||||
def cache_map_response(
|
||||
self, cache_key: str, response: MapResponse, ttl: Optional[int] = None
|
||||
) -> None:
|
||||
"""Cache complete map response."""
|
||||
try:
|
||||
cache_data = response.to_dict()
|
||||
cache_data["cached_at"] = timezone.now().isoformat()
|
||||
|
||||
cache.set(cache_key, cache_data, ttl or self.DEFAULT_TTL)
|
||||
except Exception as e:
|
||||
print(f"Cache write error for response {cache_key}: {e}")
|
||||
|
||||
def get_cached_locations(self, cache_key: str) -> Optional[List[UnifiedLocation]]:
|
||||
"""Retrieve cached location data."""
|
||||
try:
|
||||
cache_data = cache.get(cache_key)
|
||||
if not cache_data:
|
||||
self.cache_stats["misses"] += 1
|
||||
return None
|
||||
|
||||
self.cache_stats["hits"] += 1
|
||||
|
||||
# Convert back to UnifiedLocation objects
|
||||
locations = []
|
||||
for loc_data in cache_data["locations"]:
|
||||
# Reconstruct UnifiedLocation from dictionary
|
||||
locations.append(self._dict_to_unified_location(loc_data))
|
||||
|
||||
return locations
|
||||
|
||||
except Exception as e:
|
||||
print(f"Cache read error for key {cache_key}: {e}")
|
||||
self.cache_stats["misses"] += 1
|
||||
return None
|
||||
|
||||
def get_cached_clusters(self, cache_key: str) -> Optional[List[ClusterData]]:
|
||||
"""Retrieve cached cluster data."""
|
||||
try:
|
||||
cache_data = cache.get(cache_key)
|
||||
if not cache_data:
|
||||
self.cache_stats["misses"] += 1
|
||||
return None
|
||||
|
||||
self.cache_stats["hits"] += 1
|
||||
|
||||
# Convert back to ClusterData objects
|
||||
clusters = []
|
||||
for cluster_data in cache_data["clusters"]:
|
||||
clusters.append(self._dict_to_cluster_data(cluster_data))
|
||||
|
||||
return clusters
|
||||
|
||||
except Exception as e:
|
||||
print(f"Cache read error for clusters {cache_key}: {e}")
|
||||
self.cache_stats["misses"] += 1
|
||||
return None
|
||||
|
||||
def get_cached_map_response(self, cache_key: str) -> Optional[MapResponse]:
|
||||
"""Retrieve cached map response."""
|
||||
try:
|
||||
cache_data = cache.get(cache_key)
|
||||
if not cache_data:
|
||||
self.cache_stats["misses"] += 1
|
||||
return None
|
||||
|
||||
self.cache_stats["hits"] += 1
|
||||
|
||||
# Convert back to MapResponse object
|
||||
return self._dict_to_map_response(cache_data["data"])
|
||||
|
||||
except Exception as e:
|
||||
print(f"Cache read error for response {cache_key}: {e}")
|
||||
self.cache_stats["misses"] += 1
|
||||
return None
|
||||
|
||||
def invalidate_location_cache(
|
||||
self, location_type: str, location_id: Optional[int] = None
|
||||
) -> None:
|
||||
"""Invalidate cache for specific location or all locations of a type."""
|
||||
try:
|
||||
if location_id:
|
||||
# Invalidate specific location detail
|
||||
detail_key = self.get_location_detail_cache_key(
|
||||
location_type, location_id
|
||||
)
|
||||
cache.delete(detail_key)
|
||||
|
||||
# Invalidate related location and cluster caches
|
||||
# In a production system, you'd want more sophisticated cache
|
||||
# tagging
|
||||
cache.delete_many(
|
||||
[f"{self.LOCATIONS_PREFIX}:*", f"{self.CLUSTERS_PREFIX}:*"]
|
||||
)
|
||||
|
||||
self.cache_stats["invalidations"] += 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"Cache invalidation error: {e}")
|
||||
|
||||
def invalidate_bounds_cache(self, bounds: GeoBounds) -> None:
|
||||
"""Invalidate cache for specific geographic bounds."""
|
||||
try:
|
||||
geohash = self._bounds_to_geohash(bounds)
|
||||
pattern = f"{self.LOCATIONS_PREFIX}:geo:{geohash}*"
|
||||
|
||||
# In production, you'd use cache tagging or Redis SCAN
|
||||
# For now, we'll invalidate broader patterns
|
||||
cache.delete_many([pattern])
|
||||
|
||||
self.cache_stats["invalidations"] += 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"Bounds cache invalidation error: {e}")
|
||||
|
||||
def clear_all_map_cache(self) -> None:
|
||||
"""Clear all map-related cache data."""
|
||||
try:
|
||||
cache.delete_many(
|
||||
[
|
||||
f"{self.LOCATIONS_PREFIX}:*",
|
||||
f"{self.CLUSTERS_PREFIX}:*",
|
||||
f"{self.BOUNDS_PREFIX}:*",
|
||||
f"{self.DETAIL_PREFIX}:*",
|
||||
]
|
||||
)
|
||||
|
||||
self.cache_stats["invalidations"] += 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"Cache clear error: {e}")
|
||||
|
||||
def get_cache_stats(self) -> Dict[str, Any]:
|
||||
"""Get cache performance statistics."""
|
||||
total_requests = self.cache_stats["hits"] + self.cache_stats["misses"]
|
||||
hit_rate = (
|
||||
(self.cache_stats["hits"] / total_requests * 100)
|
||||
if total_requests > 0
|
||||
else 0
|
||||
)
|
||||
|
||||
return {
|
||||
"hits": self.cache_stats["hits"],
|
||||
"misses": self.cache_stats["misses"],
|
||||
"hit_rate_percent": round(hit_rate, 2),
|
||||
"invalidations": self.cache_stats["invalidations"],
|
||||
"geohash_partitions": self.cache_stats["geohash_partitions"],
|
||||
}
|
||||
|
||||
def record_performance_metrics(self, metrics: QueryPerformanceMetrics) -> None:
|
||||
"""Record query performance metrics for analysis."""
|
||||
try:
|
||||
# 5-minute buckets
|
||||
stats_key = f"{self.STATS_PREFIX}:performance:{int(time.time() // 300)}"
|
||||
|
||||
current_stats = cache.get(
|
||||
stats_key,
|
||||
{
|
||||
"query_count": 0,
|
||||
"total_time_ms": 0,
|
||||
"cache_hits": 0,
|
||||
"db_queries": 0,
|
||||
},
|
||||
)
|
||||
|
||||
current_stats["query_count"] += 1
|
||||
current_stats["total_time_ms"] += metrics.query_time_ms
|
||||
current_stats["cache_hits"] += 1 if metrics.cache_hit else 0
|
||||
current_stats["db_queries"] += metrics.db_query_count
|
||||
|
||||
cache.set(stats_key, current_stats, 3600) # Keep for 1 hour
|
||||
|
||||
except Exception as e:
|
||||
print(f"Performance metrics recording error: {e}")
|
||||
|
||||
def _bounds_to_geohash(self, bounds: GeoBounds) -> str:
|
||||
"""Convert geographic bounds to geohash for cache partitioning."""
|
||||
# Use center point of bounds for geohash
|
||||
center_lat = (bounds.north + bounds.south) / 2
|
||||
center_lng = (bounds.east + bounds.west) / 2
|
||||
|
||||
# Simple geohash implementation (in production, use a library)
|
||||
return self._encode_geohash(center_lat, center_lng, self.GEOHASH_PRECISION)
|
||||
|
||||
def _encode_geohash(self, lat: float, lng: float, precision: int) -> str:
|
||||
"""Simple geohash encoding implementation."""
|
||||
# This is a simplified implementation
|
||||
# In production, use the `geohash` library
|
||||
lat_range = [-90.0, 90.0]
|
||||
lng_range = [-180.0, 180.0]
|
||||
|
||||
geohash = ""
|
||||
bits = 0
|
||||
bit_count = 0
|
||||
even_bit = True
|
||||
|
||||
while len(geohash) < precision:
|
||||
if even_bit:
|
||||
# longitude
|
||||
mid = (lng_range[0] + lng_range[1]) / 2
|
||||
if lng >= mid:
|
||||
bits = (bits << 1) + 1
|
||||
lng_range[0] = mid
|
||||
else:
|
||||
bits = bits << 1
|
||||
lng_range[1] = mid
|
||||
else:
|
||||
# latitude
|
||||
mid = (lat_range[0] + lat_range[1]) / 2
|
||||
if lat >= mid:
|
||||
bits = (bits << 1) + 1
|
||||
lat_range[0] = mid
|
||||
else:
|
||||
bits = bits << 1
|
||||
lat_range[1] = mid
|
||||
|
||||
even_bit = not even_bit
|
||||
bit_count += 1
|
||||
|
||||
if bit_count == 5:
|
||||
# Convert 5 bits to base32 character
|
||||
geohash += "0123456789bcdefghjkmnpqrstuvwxyz"[bits]
|
||||
bits = 0
|
||||
bit_count = 0
|
||||
|
||||
return geohash
|
||||
|
||||
def _hash_filters(self, filters: MapFilters) -> str:
|
||||
"""Create deterministic hash of filters for cache keys."""
|
||||
filter_dict = filters.to_dict()
|
||||
# Sort to ensure consistent ordering
|
||||
filter_str = json.dumps(filter_dict, sort_keys=True)
|
||||
return hashlib.md5(filter_str.encode()).hexdigest()[:8]
|
||||
|
||||
def _dict_to_unified_location(self, data: Dict[str, Any]) -> UnifiedLocation:
|
||||
"""Convert dictionary back to UnifiedLocation object."""
|
||||
from .data_structures import LocationType
|
||||
|
||||
return UnifiedLocation(
|
||||
id=data["id"],
|
||||
type=LocationType(data["type"]),
|
||||
name=data["name"],
|
||||
coordinates=list(data["coordinates"]),
|
||||
address=data.get("address"),
|
||||
metadata=data.get("metadata", {}),
|
||||
type_data=data.get("type_data", {}),
|
||||
cluster_weight=data.get("cluster_weight", 1),
|
||||
cluster_category=data.get("cluster_category", "default"),
|
||||
)
|
||||
|
||||
def _dict_to_cluster_data(self, data: Dict[str, Any]) -> ClusterData:
|
||||
"""Convert dictionary back to ClusterData object."""
|
||||
from .data_structures import LocationType
|
||||
|
||||
bounds = GeoBounds(**data["bounds"])
|
||||
types = {LocationType(t) for t in data["types"]}
|
||||
|
||||
representative = None
|
||||
if data.get("representative"):
|
||||
representative = self._dict_to_unified_location(data["representative"])
|
||||
|
||||
return ClusterData(
|
||||
id=data["id"],
|
||||
coordinates=list(data["coordinates"]),
|
||||
count=data["count"],
|
||||
types=types,
|
||||
bounds=bounds,
|
||||
representative_location=representative,
|
||||
)
|
||||
|
||||
def _dict_to_map_response(self, data: Dict[str, Any]) -> MapResponse:
|
||||
"""Convert dictionary back to MapResponse object."""
|
||||
locations = [
|
||||
self._dict_to_unified_location(loc) for loc in data.get("locations", [])
|
||||
]
|
||||
clusters = [
|
||||
self._dict_to_cluster_data(cluster) for cluster in data.get("clusters", [])
|
||||
]
|
||||
|
||||
bounds = None
|
||||
if data.get("bounds"):
|
||||
bounds = GeoBounds(**data["bounds"])
|
||||
|
||||
return MapResponse(
|
||||
locations=locations,
|
||||
clusters=clusters,
|
||||
bounds=bounds,
|
||||
total_count=data.get("total_count", 0),
|
||||
filtered_count=data.get("filtered_count", 0),
|
||||
zoom_level=data.get("zoom_level"),
|
||||
clustered=data.get("clustered", False),
|
||||
)
|
||||
|
||||
|
||||
# Global cache service instance
|
||||
map_cache = MapCacheService()
|
||||
@@ -1,474 +0,0 @@
|
||||
"""
|
||||
Unified Map Service - Main orchestrating service for all map functionality.
|
||||
"""
|
||||
|
||||
import time
|
||||
from typing import List, Optional, Dict, Any, Set
|
||||
from django.db import connection
|
||||
|
||||
from .data_structures import (
|
||||
UnifiedLocation,
|
||||
ClusterData,
|
||||
GeoBounds,
|
||||
MapFilters,
|
||||
MapResponse,
|
||||
LocationType,
|
||||
QueryPerformanceMetrics,
|
||||
)
|
||||
from .location_adapters import LocationAbstractionLayer
|
||||
from .clustering_service import ClusteringService
|
||||
from .map_cache_service import MapCacheService
|
||||
|
||||
|
||||
class UnifiedMapService:
|
||||
"""
|
||||
Main service orchestrating map data retrieval, filtering, clustering, and caching.
|
||||
Provides a unified interface for all location types with performance optimization.
|
||||
"""
|
||||
|
||||
# Performance thresholds
|
||||
MAX_UNCLUSTERED_POINTS = 500
|
||||
MAX_CLUSTERED_POINTS = 2000
|
||||
DEFAULT_ZOOM_LEVEL = 10
|
||||
|
||||
def __init__(self):
|
||||
self.location_layer = LocationAbstractionLayer()
|
||||
self.clustering_service = ClusteringService()
|
||||
self.cache_service = MapCacheService()
|
||||
|
||||
def get_map_data(
|
||||
self,
|
||||
*,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
filters: Optional[MapFilters] = None,
|
||||
zoom_level: int = DEFAULT_ZOOM_LEVEL,
|
||||
cluster: bool = True,
|
||||
use_cache: bool = True,
|
||||
) -> MapResponse:
|
||||
"""
|
||||
Primary method for retrieving unified map data.
|
||||
|
||||
Args:
|
||||
bounds: Geographic bounds to query within
|
||||
filters: Filtering criteria for locations
|
||||
zoom_level: Map zoom level for clustering decisions
|
||||
cluster: Whether to apply clustering
|
||||
use_cache: Whether to use cached data
|
||||
|
||||
Returns:
|
||||
MapResponse with locations, clusters, and metadata
|
||||
"""
|
||||
start_time = time.time()
|
||||
initial_query_count = len(connection.queries)
|
||||
cache_hit = False
|
||||
|
||||
try:
|
||||
# Generate cache key
|
||||
cache_key = None
|
||||
if use_cache:
|
||||
cache_key = self._generate_cache_key(
|
||||
bounds, filters, zoom_level, cluster
|
||||
)
|
||||
|
||||
# Try to get from cache first
|
||||
cached_response = self.cache_service.get_cached_map_response(cache_key)
|
||||
if cached_response:
|
||||
cached_response.cache_hit = True
|
||||
cached_response.query_time_ms = int(
|
||||
(time.time() - start_time) * 1000
|
||||
)
|
||||
return cached_response
|
||||
|
||||
# Get locations from database
|
||||
locations = self._get_locations_from_db(bounds, filters)
|
||||
|
||||
# Apply smart limiting based on zoom level and density
|
||||
locations = self._apply_smart_limiting(locations, bounds, zoom_level)
|
||||
|
||||
# Determine if clustering should be applied
|
||||
should_cluster = cluster and self.clustering_service.should_cluster(
|
||||
zoom_level, len(locations)
|
||||
)
|
||||
|
||||
# Apply clustering if needed
|
||||
clusters = []
|
||||
if should_cluster:
|
||||
locations, clusters = self.clustering_service.cluster_locations(
|
||||
locations, zoom_level, bounds
|
||||
)
|
||||
|
||||
# Calculate response bounds
|
||||
response_bounds = self._calculate_response_bounds(
|
||||
locations, clusters, bounds
|
||||
)
|
||||
|
||||
# Create response
|
||||
response = MapResponse(
|
||||
locations=locations,
|
||||
clusters=clusters,
|
||||
bounds=response_bounds,
|
||||
total_count=len(locations) + sum(cluster.count for cluster in clusters),
|
||||
filtered_count=len(locations),
|
||||
zoom_level=zoom_level,
|
||||
clustered=should_cluster,
|
||||
cache_hit=cache_hit,
|
||||
query_time_ms=int((time.time() - start_time) * 1000),
|
||||
filters_applied=self._get_applied_filters_list(filters),
|
||||
)
|
||||
|
||||
# Cache the response
|
||||
if use_cache and cache_key:
|
||||
self.cache_service.cache_map_response(cache_key, response)
|
||||
|
||||
# Record performance metrics
|
||||
self._record_performance_metrics(
|
||||
start_time,
|
||||
initial_query_count,
|
||||
cache_hit,
|
||||
len(locations) + len(clusters),
|
||||
bounds is not None,
|
||||
should_cluster,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception:
|
||||
# Return error response
|
||||
return MapResponse(
|
||||
locations=[],
|
||||
clusters=[],
|
||||
total_count=0,
|
||||
filtered_count=0,
|
||||
query_time_ms=int((time.time() - start_time) * 1000),
|
||||
cache_hit=False,
|
||||
)
|
||||
|
||||
def get_location_details(
|
||||
self, location_type: str, location_id: int
|
||||
) -> Optional[UnifiedLocation]:
|
||||
"""
|
||||
Get detailed information for a specific location.
|
||||
|
||||
Args:
|
||||
location_type: Type of location (park, ride, company, generic)
|
||||
location_id: ID of the location
|
||||
|
||||
Returns:
|
||||
UnifiedLocation with full details or None if not found
|
||||
"""
|
||||
try:
|
||||
# Check cache first
|
||||
cache_key = self.cache_service.get_location_detail_cache_key(
|
||||
location_type, location_id
|
||||
)
|
||||
cached_locations = self.cache_service.get_cached_locations(cache_key)
|
||||
if cached_locations:
|
||||
return cached_locations[0] if cached_locations else None
|
||||
|
||||
# Get from database
|
||||
location_type_enum = LocationType(location_type.lower())
|
||||
location = self.location_layer.get_location_by_id(
|
||||
location_type_enum, location_id
|
||||
)
|
||||
|
||||
# Cache the result
|
||||
if location:
|
||||
self.cache_service.cache_locations(
|
||||
cache_key,
|
||||
[location],
|
||||
self.cache_service.LOCATION_DETAIL_TTL,
|
||||
)
|
||||
|
||||
return location
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error getting location details: {e}")
|
||||
return None
|
||||
|
||||
def search_locations(
|
||||
self,
|
||||
query: str,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
location_types: Optional[Set[LocationType]] = None,
|
||||
limit: int = 50,
|
||||
) -> List[UnifiedLocation]:
|
||||
"""
|
||||
Search locations with text query.
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
bounds: Optional geographic bounds to search within
|
||||
location_types: Optional set of location types to search
|
||||
limit: Maximum number of results
|
||||
|
||||
Returns:
|
||||
List of matching UnifiedLocation objects
|
||||
"""
|
||||
try:
|
||||
# Create search filters
|
||||
filters = MapFilters(
|
||||
search_query=query,
|
||||
location_types=location_types or {LocationType.PARK, LocationType.RIDE},
|
||||
has_coordinates=True,
|
||||
)
|
||||
|
||||
# Get locations
|
||||
locations = self.location_layer.get_all_locations(bounds, filters)
|
||||
|
||||
# Apply limit
|
||||
return locations[:limit]
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error searching locations: {e}")
|
||||
return []
|
||||
|
||||
def get_locations_by_bounds(
|
||||
self,
|
||||
north: float,
|
||||
south: float,
|
||||
east: float,
|
||||
west: float,
|
||||
location_types: Optional[Set[LocationType]] = None,
|
||||
zoom_level: int = DEFAULT_ZOOM_LEVEL,
|
||||
) -> MapResponse:
|
||||
"""
|
||||
Get locations within specific geographic bounds.
|
||||
|
||||
Args:
|
||||
north, south, east, west: Bounding box coordinates
|
||||
location_types: Optional filter for location types
|
||||
zoom_level: Map zoom level for optimization
|
||||
|
||||
Returns:
|
||||
MapResponse with locations in bounds
|
||||
"""
|
||||
try:
|
||||
bounds = GeoBounds(north=north, south=south, east=east, west=west)
|
||||
filters = (
|
||||
MapFilters(location_types=location_types) if location_types else None
|
||||
)
|
||||
|
||||
return self.get_map_data(
|
||||
bounds=bounds, filters=filters, zoom_level=zoom_level
|
||||
)
|
||||
|
||||
except ValueError:
|
||||
# Invalid bounds
|
||||
return MapResponse(
|
||||
locations=[], clusters=[], total_count=0, filtered_count=0
|
||||
)
|
||||
|
||||
def get_clustered_locations(
|
||||
self,
|
||||
zoom_level: int,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
filters: Optional[MapFilters] = None,
|
||||
) -> MapResponse:
|
||||
"""
|
||||
Get clustered location data for map display.
|
||||
|
||||
Args:
|
||||
zoom_level: Map zoom level for clustering configuration
|
||||
bounds: Optional geographic bounds
|
||||
filters: Optional filtering criteria
|
||||
|
||||
Returns:
|
||||
MapResponse with clustered data
|
||||
"""
|
||||
return self.get_map_data(
|
||||
bounds=bounds, filters=filters, zoom_level=zoom_level, cluster=True
|
||||
)
|
||||
|
||||
def get_locations_by_type(
|
||||
self,
|
||||
location_type: LocationType,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
limit: Optional[int] = None,
|
||||
) -> List[UnifiedLocation]:
|
||||
"""
|
||||
Get locations of a specific type.
|
||||
|
||||
Args:
|
||||
location_type: Type of locations to retrieve
|
||||
bounds: Optional geographic bounds
|
||||
limit: Optional limit on results
|
||||
|
||||
Returns:
|
||||
List of UnifiedLocation objects
|
||||
"""
|
||||
try:
|
||||
filters = MapFilters(location_types={location_type})
|
||||
locations = self.location_layer.get_locations_by_type(
|
||||
location_type, bounds, filters
|
||||
)
|
||||
|
||||
if limit:
|
||||
locations = locations[:limit]
|
||||
|
||||
return locations
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error getting locations by type: {e}")
|
||||
return []
|
||||
|
||||
def invalidate_cache(
|
||||
self,
|
||||
location_type: Optional[str] = None,
|
||||
location_id: Optional[int] = None,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Invalidate cached map data.
|
||||
|
||||
Args:
|
||||
location_type: Optional specific location type to invalidate
|
||||
location_id: Optional specific location ID to invalidate
|
||||
bounds: Optional specific bounds to invalidate
|
||||
"""
|
||||
if location_type and location_id:
|
||||
self.cache_service.invalidate_location_cache(location_type, location_id)
|
||||
elif bounds:
|
||||
self.cache_service.invalidate_bounds_cache(bounds)
|
||||
else:
|
||||
self.cache_service.clear_all_map_cache()
|
||||
|
||||
def get_service_stats(self) -> Dict[str, Any]:
|
||||
"""Get service performance and usage statistics."""
|
||||
cache_stats = self.cache_service.get_cache_stats()
|
||||
|
||||
return {
|
||||
"cache_performance": cache_stats,
|
||||
"clustering_available": True,
|
||||
"supported_location_types": [t.value for t in LocationType],
|
||||
"max_unclustered_points": self.MAX_UNCLUSTERED_POINTS,
|
||||
"max_clustered_points": self.MAX_CLUSTERED_POINTS,
|
||||
"service_version": "1.0.0",
|
||||
}
|
||||
|
||||
def _get_locations_from_db(
|
||||
self, bounds: Optional[GeoBounds], filters: Optional[MapFilters]
|
||||
) -> List[UnifiedLocation]:
|
||||
"""Get locations from database using the abstraction layer."""
|
||||
return self.location_layer.get_all_locations(bounds, filters)
|
||||
|
||||
def _apply_smart_limiting(
|
||||
self,
|
||||
locations: List[UnifiedLocation],
|
||||
bounds: Optional[GeoBounds],
|
||||
zoom_level: int,
|
||||
) -> List[UnifiedLocation]:
|
||||
"""Apply intelligent limiting based on zoom level and density."""
|
||||
if zoom_level < 6: # Very zoomed out - show only major parks
|
||||
major_parks = [
|
||||
loc
|
||||
for loc in locations
|
||||
if (
|
||||
loc.type == LocationType.PARK
|
||||
and loc.cluster_category in ["major_park", "theme_park"]
|
||||
)
|
||||
]
|
||||
return major_parks[:200]
|
||||
elif zoom_level < 10: # Regional level
|
||||
return locations[:1000]
|
||||
else: # City level and closer
|
||||
return locations[: self.MAX_CLUSTERED_POINTS]
|
||||
|
||||
def _calculate_response_bounds(
|
||||
self,
|
||||
locations: List[UnifiedLocation],
|
||||
clusters: List[ClusterData],
|
||||
request_bounds: Optional[GeoBounds],
|
||||
) -> Optional[GeoBounds]:
|
||||
"""Calculate the actual bounds of the response data."""
|
||||
if request_bounds:
|
||||
return request_bounds
|
||||
|
||||
all_coords = []
|
||||
|
||||
# Add location coordinates
|
||||
for loc in locations:
|
||||
all_coords.append((loc.latitude, loc.longitude))
|
||||
|
||||
# Add cluster coordinates
|
||||
for cluster in clusters:
|
||||
all_coords.append(cluster.coordinates)
|
||||
|
||||
if not all_coords:
|
||||
return None
|
||||
|
||||
lats, lngs = zip(*all_coords)
|
||||
return GeoBounds(
|
||||
north=max(lats), south=min(lats), east=max(lngs), west=min(lngs)
|
||||
)
|
||||
|
||||
def _get_applied_filters_list(self, filters: Optional[MapFilters]) -> List[str]:
|
||||
"""Get list of applied filter types for metadata."""
|
||||
if not filters:
|
||||
return []
|
||||
|
||||
applied = []
|
||||
if filters.location_types:
|
||||
applied.append("location_types")
|
||||
if filters.search_query:
|
||||
applied.append("search_query")
|
||||
if filters.park_status:
|
||||
applied.append("park_status")
|
||||
if filters.ride_types:
|
||||
applied.append("ride_types")
|
||||
if filters.company_roles:
|
||||
applied.append("company_roles")
|
||||
if filters.min_rating:
|
||||
applied.append("min_rating")
|
||||
if filters.country:
|
||||
applied.append("country")
|
||||
if filters.state:
|
||||
applied.append("state")
|
||||
if filters.city:
|
||||
applied.append("city")
|
||||
|
||||
return applied
|
||||
|
||||
def _generate_cache_key(
|
||||
self,
|
||||
bounds: Optional[GeoBounds],
|
||||
filters: Optional[MapFilters],
|
||||
zoom_level: int,
|
||||
cluster: bool,
|
||||
) -> str:
|
||||
"""Generate cache key for the request."""
|
||||
if cluster:
|
||||
return self.cache_service.get_clusters_cache_key(
|
||||
bounds, filters, zoom_level
|
||||
)
|
||||
else:
|
||||
return self.cache_service.get_locations_cache_key(
|
||||
bounds, filters, zoom_level
|
||||
)
|
||||
|
||||
def _record_performance_metrics(
|
||||
self,
|
||||
start_time: float,
|
||||
initial_query_count: int,
|
||||
cache_hit: bool,
|
||||
result_count: int,
|
||||
bounds_used: bool,
|
||||
clustering_used: bool,
|
||||
) -> None:
|
||||
"""Record performance metrics for monitoring."""
|
||||
query_time_ms = int((time.time() - start_time) * 1000)
|
||||
db_query_count = len(connection.queries) - initial_query_count
|
||||
|
||||
metrics = QueryPerformanceMetrics(
|
||||
query_time_ms=query_time_ms,
|
||||
db_query_count=db_query_count,
|
||||
cache_hit=cache_hit,
|
||||
result_count=result_count,
|
||||
bounds_used=bounds_used,
|
||||
clustering_used=clustering_used,
|
||||
)
|
||||
|
||||
self.cache_service.record_performance_metrics(metrics)
|
||||
|
||||
|
||||
# Global service instance
|
||||
unified_map_service = UnifiedMapService()
|
||||
@@ -1,200 +0,0 @@
|
||||
"""
|
||||
Shared media service for ThrillWiki.
|
||||
|
||||
This module provides shared functionality for media upload, storage, and processing
|
||||
that can be used across all domain-specific media implementations.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any, Optional, Dict
|
||||
from datetime import datetime
|
||||
from django.core.files.uploadedfile import UploadedFile
|
||||
from django.conf import settings
|
||||
from PIL import Image, ExifTags
|
||||
import os
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MediaService:
|
||||
"""Shared service for media upload and processing operations."""
|
||||
|
||||
@staticmethod
|
||||
def generate_upload_path(
|
||||
domain: str, identifier: str, filename: str, subdirectory: Optional[str] = None
|
||||
) -> str:
|
||||
"""
|
||||
Generate standardized upload path for media files.
|
||||
|
||||
Args:
|
||||
domain: Domain type (e.g., 'park', 'ride')
|
||||
identifier: Object identifier (slug or id)
|
||||
filename: Original filename
|
||||
subdirectory: Optional subdirectory for organization
|
||||
|
||||
Returns:
|
||||
Standardized upload path
|
||||
"""
|
||||
# Always use .jpg extension for consistency
|
||||
base_filename = f"{identifier}.jpg"
|
||||
|
||||
if subdirectory:
|
||||
return f"{domain}/{subdirectory}/{identifier}/{base_filename}"
|
||||
else:
|
||||
return f"{domain}/{identifier}/{base_filename}"
|
||||
|
||||
@staticmethod
|
||||
def extract_exif_date(image_file: UploadedFile) -> Optional[datetime]:
|
||||
"""
|
||||
Extract the date taken from image EXIF data.
|
||||
|
||||
Args:
|
||||
image_file: Uploaded image file
|
||||
|
||||
Returns:
|
||||
DateTime when photo was taken, or None if not available
|
||||
"""
|
||||
try:
|
||||
with Image.open(image_file) as img:
|
||||
exif = img.getexif()
|
||||
if exif:
|
||||
# Find the DateTime tag ID
|
||||
for tag_id in ExifTags.TAGS:
|
||||
if ExifTags.TAGS[tag_id] == "DateTimeOriginal":
|
||||
if tag_id in exif:
|
||||
# EXIF dates are typically in format: '2024:02:15 14:30:00'
|
||||
date_str = exif[tag_id]
|
||||
return datetime.strptime(date_str, "%Y:%m:%d %H:%M:%S")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to extract EXIF date: {str(e)}")
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def validate_image_file(image_file: UploadedFile) -> tuple[bool, Optional[str]]:
|
||||
"""
|
||||
Validate uploaded image file.
|
||||
|
||||
Args:
|
||||
image_file: Uploaded image file
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error_message)
|
||||
"""
|
||||
try:
|
||||
# Check file size
|
||||
max_size = getattr(
|
||||
settings, "MAX_PHOTO_SIZE", 10 * 1024 * 1024
|
||||
) # 10MB default
|
||||
if image_file.size > max_size:
|
||||
return (
|
||||
False,
|
||||
f"File size too large. Maximum size is {max_size // (1024 * 1024)}MB",
|
||||
)
|
||||
|
||||
# Check file type
|
||||
allowed_types = getattr(
|
||||
settings,
|
||||
"ALLOWED_PHOTO_TYPES",
|
||||
["image/jpeg", "image/png", "image/webp"],
|
||||
)
|
||||
if image_file.content_type not in allowed_types:
|
||||
return (
|
||||
False,
|
||||
f"File type not allowed. Allowed types: {', '.join(allowed_types)}",
|
||||
)
|
||||
|
||||
# Try to open with PIL to validate it's a real image
|
||||
with Image.open(image_file) as img:
|
||||
img.verify()
|
||||
|
||||
return True, None
|
||||
|
||||
except Exception as e:
|
||||
return False, f"Invalid image file: {str(e)}"
|
||||
|
||||
@staticmethod
|
||||
def process_image(
|
||||
image_file: UploadedFile,
|
||||
max_width: int = 1920,
|
||||
max_height: int = 1080,
|
||||
quality: int = 85,
|
||||
) -> UploadedFile:
|
||||
"""
|
||||
Process and optimize image file.
|
||||
|
||||
Args:
|
||||
image_file: Original uploaded file
|
||||
max_width: Maximum width for resizing
|
||||
max_height: Maximum height for resizing
|
||||
quality: JPEG quality (1-100)
|
||||
|
||||
Returns:
|
||||
Processed image file
|
||||
"""
|
||||
try:
|
||||
with Image.open(image_file) as img:
|
||||
# Convert to RGB if necessary
|
||||
if img.mode in ("RGBA", "LA", "P"):
|
||||
img = img.convert("RGB")
|
||||
|
||||
# Resize if necessary
|
||||
if img.width > max_width or img.height > max_height:
|
||||
img.thumbnail((max_width, max_height), Image.Resampling.LANCZOS)
|
||||
|
||||
# Save processed image
|
||||
from io import BytesIO
|
||||
from django.core.files.uploadedfile import InMemoryUploadedFile
|
||||
|
||||
output = BytesIO()
|
||||
img.save(output, format="JPEG", quality=quality, optimize=True)
|
||||
output.seek(0)
|
||||
|
||||
return InMemoryUploadedFile(
|
||||
output,
|
||||
"ImageField",
|
||||
f"{os.path.splitext(image_file.name)[0]}.jpg",
|
||||
"image/jpeg",
|
||||
output.getbuffer().nbytes,
|
||||
None,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process image, using original: {str(e)}")
|
||||
return image_file
|
||||
|
||||
@staticmethod
|
||||
def generate_default_caption(username: str) -> str:
|
||||
"""
|
||||
Generate default caption for uploaded photos.
|
||||
|
||||
Args:
|
||||
username: Username of uploader
|
||||
|
||||
Returns:
|
||||
Default caption string
|
||||
"""
|
||||
from django.utils import timezone
|
||||
|
||||
current_time = timezone.now()
|
||||
return f"Uploaded by {username} on {current_time.strftime('%B %d, %Y at %I:%M %p')}"
|
||||
|
||||
@staticmethod
|
||||
def get_storage_stats() -> Dict[str, Any]:
|
||||
"""
|
||||
Get media storage statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary with storage statistics
|
||||
"""
|
||||
try:
|
||||
# This would need to be implemented based on your storage backend
|
||||
return {
|
||||
"total_files": 0,
|
||||
"total_size_bytes": 0,
|
||||
"storage_backend": "default",
|
||||
"available_space": "unknown",
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get storage stats: {str(e)}")
|
||||
return {"error": str(e)}
|
||||
@@ -1,148 +0,0 @@
|
||||
"""
|
||||
Media URL service for generating friendly URLs.
|
||||
|
||||
This service provides utilities for generating SEO-friendly URLs for media files
|
||||
while maintaining compatibility with Cloudflare Images.
|
||||
"""
|
||||
|
||||
import re
|
||||
from typing import Optional, Dict, Any
|
||||
from django.utils.text import slugify
|
||||
|
||||
|
||||
class MediaURLService:
|
||||
"""Service for generating and parsing friendly media URLs."""
|
||||
|
||||
@staticmethod
|
||||
def generate_friendly_filename(caption: str, photo_id: int, extension: str = "jpg") -> str:
|
||||
"""
|
||||
Generate a friendly filename from photo caption and ID.
|
||||
|
||||
Args:
|
||||
caption: Photo caption
|
||||
photo_id: Photo database ID
|
||||
extension: File extension (default: jpg)
|
||||
|
||||
Returns:
|
||||
Friendly filename like "beautiful-park-entrance-123.jpg"
|
||||
"""
|
||||
if caption:
|
||||
# Clean and slugify the caption
|
||||
slug = slugify(caption)
|
||||
# Limit length to avoid overly long URLs
|
||||
if len(slug) > 50:
|
||||
slug = slug[:50].rsplit('-', 1)[0] # Cut at word boundary
|
||||
return f"{slug}-{photo_id}.{extension}"
|
||||
else:
|
||||
return f"photo-{photo_id}.{extension}"
|
||||
|
||||
@staticmethod
|
||||
def generate_park_photo_url(park_slug: str, caption: str, photo_id: int, variant: str = "public") -> str:
|
||||
"""
|
||||
Generate a friendly URL for a park photo.
|
||||
|
||||
Args:
|
||||
park_slug: Park slug
|
||||
caption: Photo caption
|
||||
photo_id: Photo database ID
|
||||
variant: Image variant (public, thumbnail, medium, large)
|
||||
|
||||
Returns:
|
||||
Friendly URL like "/parks/cedar-point/photos/beautiful-entrance-123.jpg"
|
||||
"""
|
||||
filename = MediaURLService.generate_friendly_filename(caption, photo_id)
|
||||
|
||||
# Add variant to filename if not public
|
||||
if variant != "public":
|
||||
name, ext = filename.rsplit('.', 1)
|
||||
filename = f"{name}-{variant}.{ext}"
|
||||
|
||||
return f"/parks/{park_slug}/photos/{filename}"
|
||||
|
||||
@staticmethod
|
||||
def generate_ride_photo_url(park_slug: str, ride_slug: str, caption: str, photo_id: int, variant: str = "public") -> str:
|
||||
"""
|
||||
Generate a friendly URL for a ride photo.
|
||||
|
||||
Args:
|
||||
park_slug: Park slug
|
||||
ride_slug: Ride slug
|
||||
caption: Photo caption
|
||||
photo_id: Photo database ID
|
||||
variant: Image variant
|
||||
|
||||
Returns:
|
||||
Friendly URL like "/parks/cedar-point/rides/millennium-force/photos/first-drop-456.jpg"
|
||||
"""
|
||||
filename = MediaURLService.generate_friendly_filename(caption, photo_id)
|
||||
|
||||
if variant != "public":
|
||||
name, ext = filename.rsplit('.', 1)
|
||||
filename = f"{name}-{variant}.{ext}"
|
||||
|
||||
return f"/parks/{park_slug}/rides/{ride_slug}/photos/{filename}"
|
||||
|
||||
@staticmethod
|
||||
def parse_photo_filename(filename: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Parse a friendly filename to extract photo ID and variant.
|
||||
|
||||
Args:
|
||||
filename: Filename like "beautiful-entrance-123-thumbnail.jpg"
|
||||
|
||||
Returns:
|
||||
Dict with photo_id and variant, or None if parsing fails
|
||||
"""
|
||||
# Remove extension
|
||||
name = filename.rsplit('.', 1)[0]
|
||||
|
||||
# Check for variant suffix
|
||||
variant = "public"
|
||||
variant_patterns = ["thumbnail", "medium", "large"]
|
||||
|
||||
for v in variant_patterns:
|
||||
if name.endswith(f"-{v}"):
|
||||
variant = v
|
||||
name = name[:-len(f"-{v}")]
|
||||
break
|
||||
|
||||
# Extract photo ID (should be the last number)
|
||||
match = re.search(r'-(\d+)$', name)
|
||||
if match:
|
||||
photo_id = int(match.group(1))
|
||||
return {
|
||||
"photo_id": photo_id,
|
||||
"variant": variant
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_cloudflare_url_with_fallback(cloudflare_image, variant: str = "public") -> Optional[str]:
|
||||
"""
|
||||
Get Cloudflare URL with fallback handling.
|
||||
|
||||
Args:
|
||||
cloudflare_image: CloudflareImage instance
|
||||
variant: Desired variant
|
||||
|
||||
Returns:
|
||||
Cloudflare URL or None
|
||||
"""
|
||||
if not cloudflare_image:
|
||||
return None
|
||||
|
||||
try:
|
||||
# Try the specific variant first
|
||||
url = cloudflare_image.get_url(variant)
|
||||
if url:
|
||||
return url
|
||||
|
||||
# Fallback to public URL
|
||||
if variant != "public":
|
||||
return cloudflare_image.public_url
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
@@ -1,407 +0,0 @@
|
||||
"""
|
||||
Performance monitoring utilities and context managers.
|
||||
"""
|
||||
|
||||
import time
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
from functools import wraps
|
||||
from typing import Optional, Dict, Any, List
|
||||
from django.db import connection
|
||||
from django.conf import settings
|
||||
from django.utils import timezone
|
||||
|
||||
logger = logging.getLogger("performance")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def monitor_performance(operation_name: str, **tags):
|
||||
"""Context manager for monitoring operation performance"""
|
||||
start_time = time.time()
|
||||
initial_queries = len(connection.queries)
|
||||
|
||||
# Create performance context
|
||||
performance_context = {
|
||||
"operation": operation_name,
|
||||
"start_time": start_time,
|
||||
"timestamp": timezone.now().isoformat(),
|
||||
**tags,
|
||||
}
|
||||
|
||||
try:
|
||||
yield performance_context
|
||||
except Exception as e:
|
||||
performance_context["error"] = str(e)
|
||||
performance_context["status"] = "error"
|
||||
raise
|
||||
else:
|
||||
performance_context["status"] = "success"
|
||||
finally:
|
||||
end_time = time.time()
|
||||
duration = end_time - start_time
|
||||
total_queries = len(connection.queries) - initial_queries
|
||||
|
||||
# Update performance context with final metrics
|
||||
performance_context.update(
|
||||
{
|
||||
"duration_seconds": duration,
|
||||
"duration_ms": round(duration * 1000, 2),
|
||||
"query_count": total_queries,
|
||||
"end_time": end_time,
|
||||
}
|
||||
)
|
||||
|
||||
# Log performance data
|
||||
log_level = (
|
||||
logging.WARNING if duration > 2.0 or total_queries > 10 else logging.INFO
|
||||
)
|
||||
logger.log(
|
||||
log_level,
|
||||
f"Performance: {operation_name} completed in {duration:.3f}s with {
|
||||
total_queries
|
||||
} queries",
|
||||
extra=performance_context,
|
||||
)
|
||||
|
||||
# Log slow operations with additional detail
|
||||
if duration > 2.0:
|
||||
logger.warning(
|
||||
f"Slow operation detected: {operation_name} took {duration:.3f}s",
|
||||
extra={
|
||||
"slow_operation": True,
|
||||
"threshold_exceeded": "duration",
|
||||
**performance_context,
|
||||
},
|
||||
)
|
||||
|
||||
if total_queries > 10:
|
||||
logger.warning(
|
||||
f"High query count: {operation_name} executed {total_queries} queries",
|
||||
extra={
|
||||
"high_query_count": True,
|
||||
"threshold_exceeded": "query_count",
|
||||
**performance_context,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def track_queries(operation_name: str, warn_threshold: int = 10):
|
||||
"""Context manager to track database queries for specific operations"""
|
||||
if not settings.DEBUG:
|
||||
yield
|
||||
return
|
||||
|
||||
initial_queries = len(connection.queries)
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
end_time = time.time()
|
||||
total_queries = len(connection.queries) - initial_queries
|
||||
execution_time = end_time - start_time
|
||||
|
||||
query_details = []
|
||||
if hasattr(connection, "queries") and total_queries > 0:
|
||||
recent_queries = connection.queries[-total_queries:]
|
||||
query_details = [
|
||||
{
|
||||
"sql": (
|
||||
query["sql"][:200] + "..."
|
||||
if len(query["sql"]) > 200
|
||||
else query["sql"]
|
||||
),
|
||||
"time": float(query["time"]),
|
||||
}
|
||||
for query in recent_queries
|
||||
]
|
||||
|
||||
performance_data = {
|
||||
"operation": operation_name,
|
||||
"query_count": total_queries,
|
||||
"execution_time": execution_time,
|
||||
"queries": query_details if settings.DEBUG else [],
|
||||
}
|
||||
|
||||
if total_queries > warn_threshold or execution_time > 1.0:
|
||||
logger.warning(
|
||||
f"Performance concern in {operation_name}: "
|
||||
f"{total_queries} queries, {execution_time:.2f}s",
|
||||
extra=performance_data,
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
f"Query tracking for {operation_name}: "
|
||||
f"{total_queries} queries, {execution_time:.2f}s",
|
||||
extra=performance_data,
|
||||
)
|
||||
|
||||
|
||||
class PerformanceProfiler:
|
||||
"""Advanced performance profiling with detailed metrics"""
|
||||
|
||||
def __init__(self, name: str):
|
||||
self.name = name
|
||||
self.start_time = None
|
||||
self.end_time = None
|
||||
self.checkpoints = []
|
||||
self.initial_queries = 0
|
||||
self.memory_usage = {}
|
||||
|
||||
def start(self):
|
||||
"""Start profiling"""
|
||||
self.start_time = time.time()
|
||||
self.initial_queries = len(connection.queries)
|
||||
|
||||
# Track memory usage if psutil is available
|
||||
try:
|
||||
import psutil
|
||||
|
||||
process = psutil.Process()
|
||||
self.memory_usage["start"] = process.memory_info().rss
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
logger.debug(f"Started profiling: {self.name}")
|
||||
|
||||
def checkpoint(self, name: str):
|
||||
"""Add a checkpoint"""
|
||||
if self.start_time is None:
|
||||
logger.warning(f"Checkpoint '{name}' called before profiling started")
|
||||
return
|
||||
|
||||
current_time = time.time()
|
||||
elapsed = current_time - self.start_time
|
||||
queries_since_start = len(connection.queries) - self.initial_queries
|
||||
|
||||
checkpoint = {
|
||||
"name": name,
|
||||
"timestamp": current_time,
|
||||
"elapsed_seconds": elapsed,
|
||||
"queries_since_start": queries_since_start,
|
||||
}
|
||||
|
||||
# Memory usage if available
|
||||
try:
|
||||
import psutil
|
||||
|
||||
process = psutil.Process()
|
||||
checkpoint["memory_rss"] = process.memory_info().rss
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
self.checkpoints.append(checkpoint)
|
||||
logger.debug(f"Checkpoint '{name}' at {elapsed:.3f}s")
|
||||
|
||||
def stop(self):
|
||||
"""Stop profiling and log results"""
|
||||
if self.start_time is None:
|
||||
logger.warning("Profiling stopped before it was started")
|
||||
return
|
||||
|
||||
self.end_time = time.time()
|
||||
total_duration = self.end_time - self.start_time
|
||||
total_queries = len(connection.queries) - self.initial_queries
|
||||
|
||||
# Final memory usage
|
||||
try:
|
||||
import psutil
|
||||
|
||||
process = psutil.Process()
|
||||
self.memory_usage["end"] = process.memory_info().rss
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Create detailed profiling report
|
||||
report = {
|
||||
"profiler_name": self.name,
|
||||
"total_duration": total_duration,
|
||||
"total_queries": total_queries,
|
||||
"checkpoints": self.checkpoints,
|
||||
"memory_usage": self.memory_usage,
|
||||
"queries_per_second": (
|
||||
total_queries / total_duration if total_duration > 0 else 0
|
||||
),
|
||||
}
|
||||
|
||||
# Calculate checkpoint intervals
|
||||
if len(self.checkpoints) > 1:
|
||||
intervals = []
|
||||
for i in range(1, len(self.checkpoints)):
|
||||
prev = self.checkpoints[i - 1]
|
||||
curr = self.checkpoints[i]
|
||||
intervals.append(
|
||||
{
|
||||
"from": prev["name"],
|
||||
"to": curr["name"],
|
||||
"duration": curr["elapsed_seconds"] - prev["elapsed_seconds"],
|
||||
"queries": curr["queries_since_start"]
|
||||
- prev["queries_since_start"],
|
||||
}
|
||||
)
|
||||
report["checkpoint_intervals"] = intervals
|
||||
|
||||
# Log the complete report
|
||||
log_level = logging.WARNING if total_duration > 1.0 else logging.INFO
|
||||
logger.log(
|
||||
log_level,
|
||||
f"Profiling complete: {self.name} took {total_duration:.3f}s with {
|
||||
total_queries
|
||||
} queries",
|
||||
extra=report,
|
||||
)
|
||||
|
||||
return report
|
||||
|
||||
|
||||
@contextmanager
|
||||
def profile_operation(name: str):
|
||||
"""Context manager for detailed operation profiling"""
|
||||
profiler = PerformanceProfiler(name)
|
||||
profiler.start()
|
||||
|
||||
try:
|
||||
yield profiler
|
||||
finally:
|
||||
profiler.stop()
|
||||
|
||||
|
||||
class DatabaseQueryAnalyzer:
|
||||
"""Analyze database query patterns and performance"""
|
||||
|
||||
@staticmethod
|
||||
def analyze_queries(queries: List[Dict]) -> Dict[str, Any]:
|
||||
"""Analyze a list of queries for patterns and issues"""
|
||||
if not queries:
|
||||
return {}
|
||||
|
||||
total_time = sum(float(q.get("time", 0)) for q in queries)
|
||||
query_count = len(queries)
|
||||
|
||||
# Group queries by type
|
||||
query_types = {}
|
||||
for query in queries:
|
||||
sql = query.get("sql", "").strip().upper()
|
||||
query_type = sql.split()[0] if sql else "UNKNOWN"
|
||||
query_types[query_type] = query_types.get(query_type, 0) + 1
|
||||
|
||||
# Find slow queries (top 10% by time)
|
||||
sorted_queries = sorted(
|
||||
queries, key=lambda q: float(q.get("time", 0)), reverse=True
|
||||
)
|
||||
slow_query_count = max(1, query_count // 10)
|
||||
slow_queries = sorted_queries[:slow_query_count]
|
||||
|
||||
# Detect duplicate queries
|
||||
query_signatures = {}
|
||||
for query in queries:
|
||||
# Simplified signature - remove literals and normalize whitespace
|
||||
sql = query.get("sql", "")
|
||||
signature = " ".join(sql.split()) # Normalize whitespace
|
||||
query_signatures[signature] = query_signatures.get(signature, 0) + 1
|
||||
|
||||
duplicates = {
|
||||
sig: count for sig, count in query_signatures.items() if count > 1
|
||||
}
|
||||
|
||||
analysis = {
|
||||
"total_queries": query_count,
|
||||
"total_time": total_time,
|
||||
"average_time": total_time / query_count if query_count > 0 else 0,
|
||||
"query_types": query_types,
|
||||
"slow_queries": [
|
||||
{
|
||||
"sql": (
|
||||
q.get("sql", "")[:200] + "..."
|
||||
if len(q.get("sql", "")) > 200
|
||||
else q.get("sql", "")
|
||||
),
|
||||
"time": float(q.get("time", 0)),
|
||||
}
|
||||
for q in slow_queries
|
||||
],
|
||||
"duplicate_query_count": len(duplicates),
|
||||
"duplicate_queries": (
|
||||
duplicates
|
||||
if len(duplicates) <= 10
|
||||
else dict(list(duplicates.items())[:10])
|
||||
),
|
||||
}
|
||||
|
||||
return analysis
|
||||
|
||||
@classmethod
|
||||
def analyze_current_queries(cls) -> Dict[str, Any]:
|
||||
"""Analyze the current request's queries"""
|
||||
if hasattr(connection, "queries"):
|
||||
return cls.analyze_queries(connection.queries)
|
||||
return {}
|
||||
|
||||
|
||||
# Performance monitoring decorators
|
||||
def monitor_function_performance(operation_name: Optional[str] = None):
|
||||
"""Decorator to monitor function performance"""
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
name = operation_name or f"{func.__module__}.{func.__name__}"
|
||||
with monitor_performance(
|
||||
name, function=func.__name__, module=func.__module__
|
||||
):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def track_database_queries(warn_threshold: int = 10):
|
||||
"""Decorator to track database queries for a function"""
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
operation_name = f"{func.__module__}.{func.__name__}"
|
||||
with track_queries(operation_name, warn_threshold):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
# Performance metrics collection
|
||||
class PerformanceMetrics:
|
||||
"""Collect and aggregate performance metrics"""
|
||||
|
||||
def __init__(self):
|
||||
self.metrics = []
|
||||
|
||||
def record_metric(self, name: str, value: float, tags: Optional[Dict] = None):
|
||||
"""Record a performance metric"""
|
||||
metric = {
|
||||
"name": name,
|
||||
"value": value,
|
||||
"timestamp": timezone.now().isoformat(),
|
||||
"tags": tags or {},
|
||||
}
|
||||
self.metrics.append(metric)
|
||||
|
||||
# Log the metric
|
||||
logger.info(f"Performance metric: {name} = {value}", extra=metric)
|
||||
|
||||
def get_metrics(self, name: Optional[str] = None) -> List[Dict]:
|
||||
"""Get recorded metrics, optionally filtered by name"""
|
||||
if name:
|
||||
return [m for m in self.metrics if m["name"] == name]
|
||||
return self.metrics.copy()
|
||||
|
||||
def clear_metrics(self):
|
||||
"""Clear all recorded metrics"""
|
||||
self.metrics.clear()
|
||||
|
||||
|
||||
# Global performance metrics instance
|
||||
performance_metrics = PerformanceMetrics()
|
||||
@@ -1,725 +0,0 @@
|
||||
"""
|
||||
Trending Service for calculating and caching trending content.
|
||||
|
||||
This service implements the weighted trending algorithm that combines:
|
||||
- View growth rates
|
||||
- Content ratings
|
||||
- Recency factors
|
||||
- Popularity metrics
|
||||
|
||||
Results are cached in Redis for performance optimization.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any
|
||||
from django.utils import timezone
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.cache import cache
|
||||
from django.db.models import Q
|
||||
|
||||
from apps.core.analytics import PageView
|
||||
from apps.parks.models import Park
|
||||
from apps.rides.models import Ride
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TrendingService:
|
||||
"""
|
||||
Service for calculating trending content using weighted algorithm.
|
||||
|
||||
Algorithm Components:
|
||||
- View Growth Rate (40% weight): Recent view increase vs historical
|
||||
- Rating Score (30% weight): Average user rating normalized
|
||||
- Recency Factor (20% weight): How recently content was added/updated
|
||||
- Popularity Boost (10% weight): Total view count normalization
|
||||
"""
|
||||
|
||||
# Algorithm weights (must sum to 1.0)
|
||||
WEIGHT_VIEW_GROWTH = 0.4
|
||||
WEIGHT_RATING = 0.3
|
||||
WEIGHT_RECENCY = 0.2
|
||||
WEIGHT_POPULARITY = 0.1
|
||||
|
||||
# Cache configuration
|
||||
CACHE_PREFIX = "trending"
|
||||
CACHE_TTL = 86400 # 24 hours (daily refresh)
|
||||
|
||||
# Time windows for calculations
|
||||
CURRENT_PERIOD_HOURS = 168 # 7 days
|
||||
PREVIOUS_PERIOD_HOURS = 336 # 14 days (for previous 7-day window comparison)
|
||||
RECENCY_BASELINE_DAYS = 365
|
||||
|
||||
def __init__(self):
|
||||
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
|
||||
|
||||
def get_trending_content(
|
||||
self, content_type: str = "all", limit: int = 20, force_refresh: bool = False
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get trending content using direct calculation.
|
||||
|
||||
Args:
|
||||
content_type: 'parks', 'rides', or 'all'
|
||||
limit: Maximum number of results
|
||||
force_refresh: Skip cache and recalculate
|
||||
|
||||
Returns:
|
||||
List of trending content with exact frontend format
|
||||
"""
|
||||
cache_key = f"trending:calculated:{content_type}:{limit}"
|
||||
|
||||
if not force_refresh:
|
||||
cached_result = cache.get(cache_key)
|
||||
if cached_result is not None:
|
||||
self.logger.debug(
|
||||
f"Returning cached trending results for {content_type}"
|
||||
)
|
||||
return cached_result
|
||||
|
||||
self.logger.info(f"Getting trending content for {content_type}")
|
||||
|
||||
try:
|
||||
# Calculate directly without Celery
|
||||
trending_items = []
|
||||
|
||||
if content_type in ["all", "parks"]:
|
||||
park_items = self._calculate_trending_parks(
|
||||
limit * 2 if content_type == "all" else limit
|
||||
)
|
||||
trending_items.extend(park_items)
|
||||
|
||||
if content_type in ["all", "rides"]:
|
||||
ride_items = self._calculate_trending_rides(
|
||||
limit * 2 if content_type == "all" else limit
|
||||
)
|
||||
trending_items.extend(ride_items)
|
||||
|
||||
# Sort by trending score and apply limit
|
||||
trending_items.sort(key=lambda x: x.get("trending_score", 0), reverse=True)
|
||||
trending_items = trending_items[:limit]
|
||||
|
||||
# Format results for API consumption
|
||||
formatted_results = self._format_trending_results(trending_items)
|
||||
|
||||
# Cache results
|
||||
cache.set(cache_key, formatted_results, self.CACHE_TTL)
|
||||
|
||||
self.logger.info(
|
||||
f"Calculated {len(formatted_results)} trending items for {content_type}"
|
||||
)
|
||||
return formatted_results
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting trending content: {e}", exc_info=True)
|
||||
return []
|
||||
|
||||
def get_new_content(
|
||||
self,
|
||||
content_type: str = "all",
|
||||
limit: int = 20,
|
||||
days_back: int = 30,
|
||||
force_refresh: bool = False,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get recently added content using direct calculation.
|
||||
|
||||
Args:
|
||||
content_type: 'parks', 'rides', or 'all'
|
||||
limit: Maximum number of results
|
||||
days_back: How many days to look back
|
||||
force_refresh: Skip cache and recalculate
|
||||
|
||||
Returns:
|
||||
List of new content with exact frontend format
|
||||
"""
|
||||
cache_key = f"new_content:calculated:{content_type}:{days_back}:{limit}"
|
||||
|
||||
if not force_refresh:
|
||||
cached_result = cache.get(cache_key)
|
||||
if cached_result is not None:
|
||||
self.logger.debug(
|
||||
f"Returning cached new content results for {content_type}"
|
||||
)
|
||||
return cached_result
|
||||
|
||||
self.logger.info(f"Getting new content for {content_type}")
|
||||
|
||||
try:
|
||||
# Calculate directly without Celery
|
||||
cutoff_date = timezone.now() - timedelta(days=days_back)
|
||||
new_items = []
|
||||
|
||||
if content_type in ["all", "parks"]:
|
||||
parks = self._get_new_parks(
|
||||
cutoff_date, limit * 2 if content_type == "all" else limit
|
||||
)
|
||||
new_items.extend(parks)
|
||||
|
||||
if content_type in ["all", "rides"]:
|
||||
rides = self._get_new_rides(
|
||||
cutoff_date, limit * 2 if content_type == "all" else limit
|
||||
)
|
||||
new_items.extend(rides)
|
||||
|
||||
# Sort by date added (most recent first) and apply limit
|
||||
new_items.sort(key=lambda x: x.get("date_added", ""), reverse=True)
|
||||
new_items = new_items[:limit]
|
||||
|
||||
# Format results for API consumption
|
||||
formatted_results = self._format_new_content_results(new_items)
|
||||
|
||||
# Cache results
|
||||
cache.set(cache_key, formatted_results, 1800) # Cache for 30 minutes
|
||||
|
||||
self.logger.info(
|
||||
f"Calculated {len(formatted_results)} new items for {content_type}"
|
||||
)
|
||||
return formatted_results
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting new content: {e}", exc_info=True)
|
||||
return []
|
||||
|
||||
def _calculate_trending_parks(self, limit: int) -> List[Dict[str, Any]]:
|
||||
"""Calculate trending scores for parks."""
|
||||
parks = Park.objects.filter(status="OPERATING").select_related(
|
||||
"location", "operator", "card_image"
|
||||
)
|
||||
|
||||
trending_parks = []
|
||||
|
||||
for park in parks:
|
||||
try:
|
||||
score = self._calculate_content_score(park, "park")
|
||||
if score > 0: # Only include items with positive trending scores
|
||||
# Get opening date for date_opened field
|
||||
opening_date = getattr(park, "opening_date", None)
|
||||
if opening_date and isinstance(opening_date, datetime):
|
||||
opening_date = opening_date.date()
|
||||
|
||||
# Get location fields
|
||||
city = ""
|
||||
state = ""
|
||||
country = ""
|
||||
try:
|
||||
location = getattr(park, "location", None)
|
||||
if location:
|
||||
city = getattr(location, "city", "") or ""
|
||||
state = getattr(location, "state", "") or ""
|
||||
country = getattr(location, "country", "") or ""
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Get card image URL
|
||||
card_image_url = ""
|
||||
if park.card_image and hasattr(park.card_image, "image"):
|
||||
card_image_url = (
|
||||
park.card_image.image.url if park.card_image.image else ""
|
||||
)
|
||||
|
||||
# Get primary company (operator)
|
||||
primary_company = park.operator.name if park.operator else ""
|
||||
|
||||
trending_parks.append(
|
||||
{
|
||||
"content_object": park,
|
||||
"content_type": "park",
|
||||
"trending_score": score,
|
||||
"id": park.id,
|
||||
"name": park.name,
|
||||
"slug": park.slug,
|
||||
"park": park.name, # For parks, park field is the park name itself
|
||||
"category": "park",
|
||||
"rating": (
|
||||
float(park.average_rating)
|
||||
if park.average_rating
|
||||
else 0.0
|
||||
),
|
||||
"date_opened": (
|
||||
opening_date.isoformat() if opening_date else ""
|
||||
),
|
||||
"url": park.url,
|
||||
"card_image": card_image_url,
|
||||
"city": city,
|
||||
"state": state,
|
||||
"country": country,
|
||||
"primary_company": primary_company,
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating score for park {park.id}: {e}")
|
||||
|
||||
return trending_parks
|
||||
|
||||
def _calculate_trending_rides(self, limit: int) -> List[Dict[str, Any]]:
|
||||
"""Calculate trending scores for rides."""
|
||||
rides = Ride.objects.filter(status="OPERATING").select_related(
|
||||
"park", "park__location", "card_image"
|
||||
)
|
||||
|
||||
trending_rides = []
|
||||
|
||||
for ride in rides:
|
||||
try:
|
||||
score = self._calculate_content_score(ride, "ride")
|
||||
if score > 0: # Only include items with positive trending scores
|
||||
# Get opening date for date_opened field
|
||||
opening_date = getattr(ride, "opening_date", None)
|
||||
if opening_date and isinstance(opening_date, datetime):
|
||||
opening_date = opening_date.date()
|
||||
|
||||
# Get card image URL
|
||||
card_image_url = ""
|
||||
if ride.card_image and hasattr(ride.card_image, "image"):
|
||||
card_image_url = (
|
||||
ride.card_image.image.url if ride.card_image.image else ""
|
||||
)
|
||||
|
||||
trending_rides.append(
|
||||
{
|
||||
"content_object": ride,
|
||||
"content_type": "ride",
|
||||
"trending_score": score,
|
||||
"id": ride.pk, # Use pk instead of id
|
||||
"name": ride.name,
|
||||
"slug": ride.slug,
|
||||
"park": ride.park.name if ride.park else "",
|
||||
"category": "ride",
|
||||
"rating": (
|
||||
float(ride.average_rating)
|
||||
if ride.average_rating
|
||||
else 0.0
|
||||
),
|
||||
"date_opened": (
|
||||
opening_date.isoformat() if opening_date else ""
|
||||
),
|
||||
"url": ride.url,
|
||||
"park_url": ride.park.url if ride.park else "",
|
||||
"card_image": card_image_url,
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating score for ride {ride.pk}: {e}")
|
||||
|
||||
return trending_rides
|
||||
|
||||
def _calculate_content_score(self, content_obj: Any, content_type: str) -> float:
|
||||
"""
|
||||
Calculate weighted trending score for content object.
|
||||
|
||||
Returns:
|
||||
Float between 0.0 and 1.0 representing trending strength
|
||||
"""
|
||||
try:
|
||||
# Get content type for PageView queries
|
||||
ct = ContentType.objects.get_for_model(content_obj)
|
||||
|
||||
# 1. View Growth Score (40% weight)
|
||||
view_growth_score = self._calculate_view_growth_score(ct, content_obj.id)
|
||||
|
||||
# 2. Rating Score (30% weight)
|
||||
rating_score = self._calculate_rating_score(content_obj)
|
||||
|
||||
# 3. Recency Score (20% weight)
|
||||
recency_score = self._calculate_recency_score(content_obj)
|
||||
|
||||
# 4. Popularity Score (10% weight)
|
||||
popularity_score = self._calculate_popularity_score(ct, content_obj.id)
|
||||
|
||||
# Calculate weighted final score
|
||||
final_score = (
|
||||
view_growth_score * self.WEIGHT_VIEW_GROWTH
|
||||
+ rating_score * self.WEIGHT_RATING
|
||||
+ recency_score * self.WEIGHT_RECENCY
|
||||
+ popularity_score * self.WEIGHT_POPULARITY
|
||||
)
|
||||
|
||||
self.logger.debug(
|
||||
f"{content_type} {content_obj.id}: "
|
||||
f"growth={view_growth_score:.3f}, rating={rating_score:.3f}, "
|
||||
f"recency={recency_score:.3f}, popularity={popularity_score:.3f}, "
|
||||
f"final={final_score:.3f}"
|
||||
)
|
||||
|
||||
return final_score
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
f"Error calculating score for {content_type} {content_obj.id}: {e}"
|
||||
)
|
||||
return 0.0
|
||||
|
||||
def _calculate_view_growth_score(
|
||||
self, content_type: ContentType, object_id: int
|
||||
) -> float:
|
||||
"""Calculate normalized view growth score."""
|
||||
try:
|
||||
current_views, previous_views, growth_percentage = (
|
||||
PageView.get_views_growth(
|
||||
content_type,
|
||||
object_id,
|
||||
self.CURRENT_PERIOD_HOURS,
|
||||
self.PREVIOUS_PERIOD_HOURS,
|
||||
)
|
||||
)
|
||||
|
||||
if previous_views == 0:
|
||||
# New content with views gets boost
|
||||
return min(current_views / 100.0, 1.0) if current_views > 0 else 0.0
|
||||
|
||||
# Normalize growth percentage to 0-1 scale
|
||||
# 100% growth = 0.5, 500% growth = 1.0
|
||||
normalized_growth = (
|
||||
min(growth_percentage / 500.0, 1.0) if growth_percentage > 0 else 0.0
|
||||
)
|
||||
return max(normalized_growth, 0.0)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating view growth: {e}")
|
||||
return 0.0
|
||||
|
||||
def _calculate_rating_score(self, content_obj: Any) -> float:
|
||||
"""Calculate normalized rating score."""
|
||||
try:
|
||||
rating = getattr(content_obj, "average_rating", None)
|
||||
if rating is None or rating == 0:
|
||||
return 0.3 # Neutral score for unrated content
|
||||
|
||||
# Normalize rating from 1-10 scale to 0-1 scale
|
||||
# Rating of 5 = 0.4, Rating of 8 = 0.7, Rating of 10 = 1.0
|
||||
return min(max((float(rating) - 1) / 9.0, 0.0), 1.0)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating rating score: {e}")
|
||||
return 0.3
|
||||
|
||||
def _calculate_recency_score(self, content_obj: Any) -> float:
|
||||
"""Calculate recency score based on when content was added/updated."""
|
||||
try:
|
||||
# Use opening_date for parks/rides, or created_at as fallback
|
||||
date_added = getattr(content_obj, "opening_date", None)
|
||||
if not date_added:
|
||||
date_added = getattr(content_obj, "created_at", None)
|
||||
if not date_added:
|
||||
return 0.5 # Neutral score for unknown dates
|
||||
|
||||
# Handle both date and datetime objects
|
||||
if hasattr(date_added, "date"):
|
||||
date_added = date_added.date()
|
||||
|
||||
# Calculate days since added
|
||||
today = timezone.now().date()
|
||||
days_since_added = (today - date_added).days
|
||||
|
||||
# Recency score: newer content gets higher scores
|
||||
# 0 days = 1.0, 30 days = 0.8, 365 days = 0.1, >365 days = 0.0
|
||||
if days_since_added <= 0:
|
||||
return 1.0
|
||||
elif days_since_added <= 30:
|
||||
return 1.0 - (days_since_added / 30.0) * 0.2 # 1.0 to 0.8
|
||||
elif days_since_added <= self.RECENCY_BASELINE_DAYS:
|
||||
return (
|
||||
0.8
|
||||
- ((days_since_added - 30) / (self.RECENCY_BASELINE_DAYS - 30))
|
||||
* 0.7
|
||||
) # 0.8 to 0.1
|
||||
else:
|
||||
return 0.0
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating recency score: {e}")
|
||||
return 0.5
|
||||
|
||||
def _calculate_popularity_score(
|
||||
self, content_type: ContentType, object_id: int
|
||||
) -> float:
|
||||
"""Calculate popularity score based on total view count."""
|
||||
try:
|
||||
total_views = PageView.get_total_views_count(
|
||||
content_type,
|
||||
object_id,
|
||||
hours=168, # Last 7 days
|
||||
)
|
||||
|
||||
# Normalize views to 0-1 scale
|
||||
# 0 views = 0.0, 100 views = 0.5, 1000+ views = 1.0
|
||||
if total_views == 0:
|
||||
return 0.0
|
||||
elif total_views <= 100:
|
||||
return total_views / 200.0 # 0.0 to 0.5
|
||||
else:
|
||||
return min(0.5 + (total_views - 100) / 1800.0, 1.0) # 0.5 to 1.0
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating popularity score: {e}")
|
||||
return 0.0
|
||||
|
||||
def _get_new_parks(self, cutoff_date: datetime, limit: int) -> List[Dict[str, Any]]:
|
||||
"""Get recently added parks."""
|
||||
new_parks = (
|
||||
Park.objects.filter(
|
||||
Q(created_at__gte=cutoff_date)
|
||||
| Q(opening_date__gte=cutoff_date.date()),
|
||||
status="OPERATING",
|
||||
)
|
||||
.select_related("location", "operator", "card_image")
|
||||
.order_by("-created_at", "-opening_date")[:limit]
|
||||
)
|
||||
|
||||
results = []
|
||||
for park in new_parks:
|
||||
date_added = park.opening_date or park.created_at
|
||||
# Handle datetime to date conversion
|
||||
if date_added:
|
||||
# If it's a datetime, convert to date
|
||||
if isinstance(date_added, datetime):
|
||||
date_added = date_added.date()
|
||||
# If it's already a date, keep it as is
|
||||
|
||||
# Get opening date for date_opened field
|
||||
opening_date = getattr(park, "opening_date", None)
|
||||
if opening_date and isinstance(opening_date, datetime):
|
||||
opening_date = opening_date.date()
|
||||
|
||||
# Get location fields
|
||||
city = ""
|
||||
state = ""
|
||||
country = ""
|
||||
try:
|
||||
location = getattr(park, "location", None)
|
||||
if location:
|
||||
city = getattr(location, "city", "") or ""
|
||||
state = getattr(location, "state", "") or ""
|
||||
country = getattr(location, "country", "") or ""
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Get card image URL
|
||||
card_image_url = ""
|
||||
if park.card_image and hasattr(park.card_image, "image"):
|
||||
card_image_url = (
|
||||
park.card_image.image.url if park.card_image.image else ""
|
||||
)
|
||||
|
||||
# Get primary company (operator)
|
||||
primary_company = park.operator.name if park.operator else ""
|
||||
|
||||
results.append(
|
||||
{
|
||||
"content_object": park,
|
||||
"content_type": "park",
|
||||
"id": park.pk, # Use pk instead of id for Django compatibility
|
||||
"name": park.name,
|
||||
"slug": park.slug,
|
||||
"park": park.name, # For parks, park field is the park name itself
|
||||
"category": "park",
|
||||
"date_added": date_added.isoformat() if date_added else "",
|
||||
"date_opened": opening_date.isoformat() if opening_date else "",
|
||||
"url": park.url,
|
||||
"card_image": card_image_url,
|
||||
"city": city,
|
||||
"state": state,
|
||||
"country": country,
|
||||
"primary_company": primary_company,
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def _get_new_rides(self, cutoff_date: datetime, limit: int) -> List[Dict[str, Any]]:
|
||||
"""Get recently added rides."""
|
||||
new_rides = (
|
||||
Ride.objects.filter(
|
||||
Q(created_at__gte=cutoff_date)
|
||||
| Q(opening_date__gte=cutoff_date.date()),
|
||||
status="OPERATING",
|
||||
)
|
||||
.select_related("park", "park__location", "card_image")
|
||||
.order_by("-created_at", "-opening_date")[:limit]
|
||||
)
|
||||
|
||||
results = []
|
||||
for ride in new_rides:
|
||||
date_added = getattr(ride, "opening_date", None) or getattr(
|
||||
ride, "created_at", None
|
||||
)
|
||||
# Handle datetime to date conversion
|
||||
if date_added:
|
||||
# If it's a datetime, convert to date
|
||||
if isinstance(date_added, datetime):
|
||||
date_added = date_added.date()
|
||||
# If it's already a date, keep it as is
|
||||
|
||||
# Get opening date for date_opened field
|
||||
opening_date = getattr(ride, "opening_date", None)
|
||||
if opening_date and isinstance(opening_date, datetime):
|
||||
opening_date = opening_date.date()
|
||||
|
||||
# Get card image URL
|
||||
card_image_url = ""
|
||||
if ride.card_image and hasattr(ride.card_image, "image"):
|
||||
card_image_url = (
|
||||
ride.card_image.image.url if ride.card_image.image else ""
|
||||
)
|
||||
|
||||
results.append(
|
||||
{
|
||||
"content_object": ride,
|
||||
"content_type": "ride",
|
||||
"id": ride.pk, # Use pk instead of id for Django compatibility
|
||||
"name": ride.name,
|
||||
"slug": ride.slug,
|
||||
"park": ride.park.name if ride.park else "",
|
||||
"category": "ride",
|
||||
"date_added": date_added.isoformat() if date_added else "",
|
||||
"date_opened": opening_date.isoformat() if opening_date else "",
|
||||
"url": ride.url,
|
||||
"park_url": ride.park.url if ride.park else "",
|
||||
"card_image": card_image_url,
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def _format_trending_results(
|
||||
self, trending_items: List[Dict[str, Any]]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Format trending results for frontend consumption."""
|
||||
formatted_results = []
|
||||
|
||||
for rank, item in enumerate(trending_items, 1):
|
||||
try:
|
||||
# Get view change for display
|
||||
content_obj = item["content_object"]
|
||||
ct = ContentType.objects.get_for_model(content_obj)
|
||||
current_views, previous_views, growth_percentage = (
|
||||
PageView.get_views_growth(
|
||||
ct,
|
||||
content_obj.id,
|
||||
self.CURRENT_PERIOD_HOURS,
|
||||
self.PREVIOUS_PERIOD_HOURS,
|
||||
)
|
||||
)
|
||||
|
||||
# Format exactly as frontend expects
|
||||
formatted_item = {
|
||||
"id": item["id"],
|
||||
"name": item["name"],
|
||||
"park": item["park"],
|
||||
"category": item["category"],
|
||||
"rating": item["rating"],
|
||||
"rank": rank,
|
||||
"views": current_views,
|
||||
"views_change": (
|
||||
f"+{growth_percentage:.1f}%"
|
||||
if growth_percentage > 0
|
||||
else f"{growth_percentage:.1f}%"
|
||||
),
|
||||
"slug": item["slug"],
|
||||
"date_opened": item["date_opened"],
|
||||
"url": item["url"],
|
||||
}
|
||||
|
||||
# Add card_image for all items
|
||||
if item.get("card_image"):
|
||||
formatted_item["card_image"] = item["card_image"]
|
||||
|
||||
# Add park-specific fields
|
||||
if item["content_type"] == "park":
|
||||
if item.get("city"):
|
||||
formatted_item["city"] = item["city"]
|
||||
if item.get("state"):
|
||||
formatted_item["state"] = item["state"]
|
||||
if item.get("country"):
|
||||
formatted_item["country"] = item["country"]
|
||||
if item.get("primary_company"):
|
||||
formatted_item["primary_company"] = item["primary_company"]
|
||||
|
||||
# Add park_url for rides
|
||||
if item.get("park_url"):
|
||||
formatted_item["park_url"] = item["park_url"]
|
||||
|
||||
formatted_results.append(formatted_item)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error formatting trending item: {e}")
|
||||
|
||||
return formatted_results
|
||||
|
||||
def _format_new_content_results(
|
||||
self, new_items: List[Dict[str, Any]]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Format new content results for frontend consumption."""
|
||||
formatted_results = []
|
||||
|
||||
for item in new_items:
|
||||
try:
|
||||
# Format exactly as frontend expects
|
||||
formatted_item = {
|
||||
"id": item["id"],
|
||||
"name": item["name"],
|
||||
"park": item["park"],
|
||||
"category": item["category"],
|
||||
"date_added": item["date_added"],
|
||||
"date_opened": item["date_opened"],
|
||||
"slug": item["slug"],
|
||||
"url": item["url"],
|
||||
}
|
||||
|
||||
# Add card_image for all items
|
||||
if item.get("card_image"):
|
||||
formatted_item["card_image"] = item["card_image"]
|
||||
|
||||
# Add park-specific fields
|
||||
if item["content_type"] == "park":
|
||||
if item.get("city"):
|
||||
formatted_item["city"] = item["city"]
|
||||
if item.get("state"):
|
||||
formatted_item["state"] = item["state"]
|
||||
if item.get("country"):
|
||||
formatted_item["country"] = item["country"]
|
||||
if item.get("primary_company"):
|
||||
formatted_item["primary_company"] = item["primary_company"]
|
||||
|
||||
# Add park_url for rides
|
||||
if item.get("park_url"):
|
||||
formatted_item["park_url"] = item["park_url"]
|
||||
|
||||
formatted_results.append(formatted_item)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error formatting new content item: {e}")
|
||||
|
||||
return formatted_results
|
||||
|
||||
def clear_cache(self, content_type: str = "all") -> None:
|
||||
"""Clear trending and new content caches."""
|
||||
try:
|
||||
cache_patterns = [
|
||||
f"{self.CACHE_PREFIX}:trending:{content_type}:*",
|
||||
f"{self.CACHE_PREFIX}:new:{content_type}:*",
|
||||
]
|
||||
|
||||
if content_type == "all":
|
||||
cache_patterns.extend(
|
||||
[
|
||||
f"{self.CACHE_PREFIX}:trending:parks:*",
|
||||
f"{self.CACHE_PREFIX}:trending:rides:*",
|
||||
f"{self.CACHE_PREFIX}:new:parks:*",
|
||||
f"{self.CACHE_PREFIX}:new:rides:*",
|
||||
]
|
||||
)
|
||||
|
||||
# Note: This is a simplified cache clear
|
||||
# In production, you might want to use cache.delete_many() or similar
|
||||
cache.clear()
|
||||
self.logger.info(f"Cleared trending caches for {content_type}")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error clearing cache: {e}")
|
||||
|
||||
|
||||
# Singleton service instance
|
||||
trending_service = TrendingService()
|
||||
Reference in New Issue
Block a user