mirror of
https://github.com/pacnpal/thrillwiki_django_no_react.git
synced 2025-12-20 06:11:07 -05:00
- Added comprehensive documentation for hybrid filtering implementation, including architecture, API endpoints, performance characteristics, and usage examples. - Developed a hybrid pagination and client-side filtering recommendation, detailing server-side responsibilities and client-side logic. - Created a test script for hybrid filtering endpoints, covering various test cases including basic filtering, search functionality, pagination, and edge cases.
818 lines
34 KiB
Python
818 lines
34 KiB
Python
"""
|
|
Park API views for ThrillWiki API v1.
|
|
|
|
This module contains consolidated park photo viewset for the centralized API structure.
|
|
Enhanced from rogue implementation to maintain full feature parity.
|
|
"""
|
|
|
|
from .serializers import (
|
|
ParkPhotoOutputSerializer,
|
|
ParkPhotoCreateInputSerializer,
|
|
ParkPhotoUpdateInputSerializer,
|
|
ParkPhotoListOutputSerializer,
|
|
ParkPhotoApprovalInputSerializer,
|
|
ParkPhotoStatsOutputSerializer,
|
|
)
|
|
from typing import Any, cast
|
|
import logging
|
|
|
|
from django.core.exceptions import PermissionDenied
|
|
from drf_spectacular.utils import extend_schema_view, extend_schema, OpenApiParameter
|
|
from drf_spectacular.types import OpenApiTypes
|
|
from rest_framework import status
|
|
from rest_framework.decorators import action
|
|
from rest_framework.exceptions import ValidationError
|
|
from rest_framework.permissions import IsAuthenticated
|
|
from rest_framework.response import Response
|
|
from rest_framework.viewsets import ModelViewSet
|
|
|
|
from apps.parks.models import ParkPhoto, Park
|
|
from apps.parks.services import ParkMediaService
|
|
from django.contrib.auth import get_user_model
|
|
|
|
UserModel = get_user_model()
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
@extend_schema_view(
|
|
list=extend_schema(
|
|
summary="List park photos",
|
|
description="Retrieve a paginated list of park photos with filtering capabilities.",
|
|
responses={200: ParkPhotoListOutputSerializer(many=True)},
|
|
tags=["Park Media"],
|
|
),
|
|
create=extend_schema(
|
|
summary="Upload park photo",
|
|
description="Upload a new photo for a park. Requires authentication.",
|
|
request=ParkPhotoCreateInputSerializer,
|
|
responses={
|
|
201: ParkPhotoOutputSerializer,
|
|
400: OpenApiTypes.OBJECT,
|
|
401: OpenApiTypes.OBJECT,
|
|
},
|
|
tags=["Park Media"],
|
|
),
|
|
retrieve=extend_schema(
|
|
summary="Get park photo details",
|
|
description="Retrieve detailed information about a specific park photo.",
|
|
responses={
|
|
200: ParkPhotoOutputSerializer,
|
|
404: OpenApiTypes.OBJECT,
|
|
},
|
|
tags=["Park Media"],
|
|
),
|
|
update=extend_schema(
|
|
summary="Update park photo",
|
|
description="Update park photo information. Requires authentication and ownership or admin privileges.",
|
|
request=ParkPhotoUpdateInputSerializer,
|
|
responses={
|
|
200: ParkPhotoOutputSerializer,
|
|
400: OpenApiTypes.OBJECT,
|
|
401: OpenApiTypes.OBJECT,
|
|
403: OpenApiTypes.OBJECT,
|
|
404: OpenApiTypes.OBJECT,
|
|
},
|
|
tags=["Park Media"],
|
|
),
|
|
partial_update=extend_schema(
|
|
summary="Partially update park photo",
|
|
description="Partially update park photo information. Requires authentication and ownership or admin privileges.",
|
|
request=ParkPhotoUpdateInputSerializer,
|
|
responses={
|
|
200: ParkPhotoOutputSerializer,
|
|
400: OpenApiTypes.OBJECT,
|
|
401: OpenApiTypes.OBJECT,
|
|
403: OpenApiTypes.OBJECT,
|
|
404: OpenApiTypes.OBJECT,
|
|
},
|
|
tags=["Park Media"],
|
|
),
|
|
destroy=extend_schema(
|
|
summary="Delete park photo",
|
|
description="Delete a park photo. Requires authentication and ownership or admin privileges.",
|
|
responses={
|
|
204: None,
|
|
401: OpenApiTypes.OBJECT,
|
|
403: OpenApiTypes.OBJECT,
|
|
404: OpenApiTypes.OBJECT,
|
|
},
|
|
tags=["Park Media"],
|
|
),
|
|
)
|
|
class ParkPhotoViewSet(ModelViewSet):
|
|
"""
|
|
Enhanced ViewSet for managing park photos with full feature parity.
|
|
|
|
Provides CRUD operations for park photos with proper permission checking.
|
|
Uses ParkMediaService for business logic operations.
|
|
Includes advanced features like bulk approval and statistics.
|
|
"""
|
|
|
|
permission_classes = [IsAuthenticated]
|
|
lookup_field = "id"
|
|
|
|
def get_queryset(self): # type: ignore[override]
|
|
"""Get photos for the current park with optimized queries."""
|
|
queryset = ParkPhoto.objects.select_related(
|
|
"park", "park__operator", "uploaded_by"
|
|
)
|
|
|
|
# If park_pk is provided in URL kwargs, filter by park
|
|
park_pk = self.kwargs.get("park_pk")
|
|
if park_pk:
|
|
queryset = queryset.filter(park_id=park_pk)
|
|
|
|
return queryset.order_by("-created_at")
|
|
|
|
def get_serializer_class(self): # type: ignore[override]
|
|
"""Return appropriate serializer based on action."""
|
|
if self.action == "list":
|
|
return ParkPhotoListOutputSerializer
|
|
elif self.action == "create":
|
|
return ParkPhotoCreateInputSerializer
|
|
elif self.action in ["update", "partial_update"]:
|
|
return ParkPhotoUpdateInputSerializer
|
|
else:
|
|
return ParkPhotoOutputSerializer
|
|
|
|
def perform_create(self, serializer):
|
|
"""Create a new park photo using ParkMediaService."""
|
|
park_id = self.kwargs.get("park_pk")
|
|
if not park_id:
|
|
raise ValidationError("Park ID is required")
|
|
|
|
try:
|
|
Park.objects.get(pk=park_id)
|
|
except Park.DoesNotExist:
|
|
raise ValidationError("Park not found")
|
|
|
|
try:
|
|
# Use the service to create the photo with proper business logic
|
|
service = cast(Any, ParkMediaService())
|
|
photo = service.create_photo(
|
|
park_id=park_id,
|
|
uploaded_by=self.request.user,
|
|
**serializer.validated_data,
|
|
)
|
|
|
|
# Set the instance for the serializer response
|
|
serializer.instance = photo
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error creating park photo: {e}")
|
|
raise ValidationError(f"Failed to create photo: {str(e)}")
|
|
|
|
def perform_update(self, serializer):
|
|
"""Update park photo with permission checking."""
|
|
instance = self.get_object()
|
|
|
|
# Check permissions - allow owner or staff
|
|
if not (
|
|
self.request.user == instance.uploaded_by
|
|
or cast(Any, self.request.user).is_staff
|
|
):
|
|
raise PermissionDenied("You can only edit your own photos or be an admin.")
|
|
|
|
# Handle primary photo logic using service
|
|
if serializer.validated_data.get("is_primary", False):
|
|
try:
|
|
ParkMediaService().set_primary_photo(
|
|
park_id=instance.park_id, photo_id=instance.id
|
|
)
|
|
# Remove is_primary from validated_data since service handles it
|
|
if "is_primary" in serializer.validated_data:
|
|
del serializer.validated_data["is_primary"]
|
|
except Exception as e:
|
|
logger.error(f"Error setting primary photo: {e}")
|
|
raise ValidationError(f"Failed to set primary photo: {str(e)}")
|
|
|
|
def perform_destroy(self, instance):
|
|
"""Delete park photo with permission checking."""
|
|
# Check permissions - allow owner or staff
|
|
if not (
|
|
self.request.user == instance.uploaded_by
|
|
or cast(Any, self.request.user).is_staff
|
|
):
|
|
raise PermissionDenied(
|
|
"You can only delete your own photos or be an admin."
|
|
)
|
|
|
|
try:
|
|
# Delete from Cloudflare first if image exists
|
|
if instance.image:
|
|
try:
|
|
from django_cloudflareimages_toolkit.services import CloudflareImagesService
|
|
service = CloudflareImagesService()
|
|
service.delete_image(instance.image)
|
|
logger.info(
|
|
f"Successfully deleted park photo from Cloudflare: {instance.image.cloudflare_id}")
|
|
except Exception as e:
|
|
logger.error(
|
|
f"Failed to delete park photo from Cloudflare: {str(e)}")
|
|
# Continue with database deletion even if Cloudflare deletion fails
|
|
|
|
ParkMediaService().delete_photo(
|
|
instance.id, deleted_by=cast(UserModel, self.request.user)
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Error deleting park photo: {e}")
|
|
raise ValidationError(f"Failed to delete photo: {str(e)}")
|
|
|
|
@extend_schema(
|
|
summary="Set photo as primary",
|
|
description="Set this photo as the primary photo for the park",
|
|
responses={
|
|
200: OpenApiTypes.OBJECT,
|
|
400: OpenApiTypes.OBJECT,
|
|
403: OpenApiTypes.OBJECT,
|
|
404: OpenApiTypes.OBJECT,
|
|
},
|
|
tags=["Park Media"],
|
|
)
|
|
@action(detail=True, methods=["post"])
|
|
def set_primary(self, request, **kwargs):
|
|
"""Set this photo as the primary photo for the park."""
|
|
photo = self.get_object()
|
|
|
|
# Check permissions - allow owner or staff
|
|
if not (request.user == photo.uploaded_by or cast(Any, request.user).is_staff):
|
|
raise PermissionDenied(
|
|
"You can only modify your own photos or be an admin."
|
|
)
|
|
|
|
try:
|
|
ParkMediaService().set_primary_photo(
|
|
park_id=photo.park_id, photo_id=photo.id
|
|
)
|
|
|
|
# Refresh the photo instance
|
|
photo.refresh_from_db()
|
|
serializer = self.get_serializer(photo)
|
|
|
|
return Response(
|
|
{
|
|
"message": "Photo set as primary successfully",
|
|
"photo": serializer.data,
|
|
},
|
|
status=status.HTTP_200_OK,
|
|
)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error setting primary photo: {e}")
|
|
return Response(
|
|
{"error": f"Failed to set primary photo: {str(e)}"},
|
|
status=status.HTTP_400_BAD_REQUEST,
|
|
)
|
|
|
|
@extend_schema(
|
|
summary="Bulk approve/reject photos",
|
|
description="Bulk approve or reject multiple park photos (admin only)",
|
|
request=ParkPhotoApprovalInputSerializer,
|
|
responses={
|
|
200: OpenApiTypes.OBJECT,
|
|
400: OpenApiTypes.OBJECT,
|
|
403: OpenApiTypes.OBJECT,
|
|
},
|
|
tags=["Park Media"],
|
|
)
|
|
@action(detail=False, methods=["post"], permission_classes=[IsAuthenticated])
|
|
def bulk_approve(self, request, **kwargs):
|
|
"""Bulk approve or reject multiple photos (admin only)."""
|
|
if not cast(Any, request.user).is_staff:
|
|
raise PermissionDenied("Only administrators can approve photos.")
|
|
|
|
serializer = ParkPhotoApprovalInputSerializer(data=request.data)
|
|
serializer.is_valid(raise_exception=True)
|
|
|
|
validated_data = cast(dict, getattr(serializer, "validated_data", {}))
|
|
photo_ids = validated_data.get("photo_ids")
|
|
approve = validated_data.get("approve")
|
|
park_id = self.kwargs.get("park_pk")
|
|
|
|
if photo_ids is None or approve is None:
|
|
return Response(
|
|
{"error": "Missing required fields: photo_ids and/or approve."},
|
|
status=status.HTTP_400_BAD_REQUEST,
|
|
)
|
|
|
|
try:
|
|
# Filter photos to only those belonging to this park (if park_pk provided)
|
|
photos_queryset = ParkPhoto.objects.filter(id__in=photo_ids)
|
|
if park_id:
|
|
photos_queryset = photos_queryset.filter(park_id=park_id)
|
|
|
|
updated_count = photos_queryset.update(is_approved=approve)
|
|
|
|
return Response(
|
|
{
|
|
"message": f"Successfully {'approved' if approve else 'rejected'} {updated_count} photos",
|
|
"updated_count": updated_count,
|
|
},
|
|
status=status.HTTP_200_OK,
|
|
)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error in bulk photo approval: {e}")
|
|
return Response(
|
|
{"error": f"Failed to update photos: {str(e)}"},
|
|
status=status.HTTP_400_BAD_REQUEST,
|
|
)
|
|
|
|
@extend_schema(
|
|
summary="Get park photo statistics",
|
|
description="Get photo statistics for the park",
|
|
responses={
|
|
200: ParkPhotoStatsOutputSerializer,
|
|
404: OpenApiTypes.OBJECT,
|
|
500: OpenApiTypes.OBJECT,
|
|
},
|
|
tags=["Park Media"],
|
|
)
|
|
@action(detail=False, methods=["get"])
|
|
def stats(self, request, **kwargs):
|
|
"""Get photo statistics for the park."""
|
|
park_pk = self.kwargs.get("park_pk")
|
|
park = None
|
|
if park_pk:
|
|
try:
|
|
park = Park.objects.get(pk=park_pk)
|
|
except Park.DoesNotExist:
|
|
return Response(
|
|
{"error": "Park not found."},
|
|
status=status.HTTP_404_NOT_FOUND,
|
|
)
|
|
|
|
try:
|
|
if park is not None:
|
|
stats = ParkMediaService().get_photo_stats(park=park)
|
|
else:
|
|
stats = ParkMediaService().get_photo_stats(park=cast(Park, None))
|
|
serializer = ParkPhotoStatsOutputSerializer(stats)
|
|
|
|
return Response(serializer.data, status=status.HTTP_200_OK)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error getting park photo stats: {e}")
|
|
return Response(
|
|
{"error": f"Failed to get photo statistics: {str(e)}"},
|
|
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
|
)
|
|
|
|
# Legacy compatibility action using the legacy set_primary logic
|
|
@extend_schema(
|
|
summary="Set photo as primary (legacy)",
|
|
description="Legacy set primary action for backwards compatibility",
|
|
responses={
|
|
200: OpenApiTypes.OBJECT,
|
|
400: OpenApiTypes.OBJECT,
|
|
403: OpenApiTypes.OBJECT,
|
|
},
|
|
tags=["Park Media"],
|
|
)
|
|
@action(detail=True, methods=["post"])
|
|
def set_primary_legacy(self, request, id=None):
|
|
"""Legacy set primary action for backwards compatibility."""
|
|
photo = self.get_object()
|
|
if not (
|
|
request.user == photo.uploaded_by
|
|
or request.user.has_perm("parks.change_parkphoto")
|
|
):
|
|
return Response(
|
|
{"error": "You do not have permission to edit photos for this park."},
|
|
status=status.HTTP_403_FORBIDDEN,
|
|
)
|
|
try:
|
|
ParkMediaService().set_primary_photo(
|
|
park_id=photo.park_id, photo_id=photo.id
|
|
)
|
|
return Response({"message": "Photo set as primary successfully."})
|
|
except Exception as e:
|
|
logger.error(f"Error in set_primary_photo: {str(e)}", exc_info=True)
|
|
return Response({"error": str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
|
|
|
@extend_schema(
|
|
summary="Save Cloudflare image as park photo",
|
|
description="Save a Cloudflare image as a park photo after direct upload to Cloudflare",
|
|
request=OpenApiTypes.OBJECT,
|
|
responses={
|
|
201: ParkPhotoOutputSerializer,
|
|
400: OpenApiTypes.OBJECT,
|
|
401: OpenApiTypes.OBJECT,
|
|
404: OpenApiTypes.OBJECT,
|
|
},
|
|
tags=["Park Media"],
|
|
)
|
|
@action(detail=False, methods=["post"])
|
|
def save_image(self, request, **kwargs):
|
|
"""Save a Cloudflare image as a park photo after direct upload to Cloudflare."""
|
|
park_pk = self.kwargs.get("park_pk")
|
|
if not park_pk:
|
|
return Response(
|
|
{"error": "Park ID is required"},
|
|
status=status.HTTP_400_BAD_REQUEST,
|
|
)
|
|
|
|
try:
|
|
park = Park.objects.get(pk=park_pk)
|
|
except Park.DoesNotExist:
|
|
return Response(
|
|
{"error": "Park not found"},
|
|
status=status.HTTP_404_NOT_FOUND,
|
|
)
|
|
|
|
cloudflare_image_id = request.data.get("cloudflare_image_id")
|
|
if not cloudflare_image_id:
|
|
return Response(
|
|
{"error": "cloudflare_image_id is required"},
|
|
status=status.HTTP_400_BAD_REQUEST,
|
|
)
|
|
|
|
try:
|
|
# Import CloudflareImage model and service
|
|
from django_cloudflareimages_toolkit.models import CloudflareImage
|
|
from django_cloudflareimages_toolkit.services import CloudflareImagesService
|
|
from django.utils import timezone
|
|
|
|
# Always fetch the latest image data from Cloudflare API
|
|
try:
|
|
# Get image details from Cloudflare API
|
|
service = CloudflareImagesService()
|
|
image_data = service.get_image(cloudflare_image_id)
|
|
|
|
if not image_data:
|
|
return Response(
|
|
{"error": "Image not found in Cloudflare"},
|
|
status=status.HTTP_400_BAD_REQUEST,
|
|
)
|
|
|
|
# Try to find existing CloudflareImage record by cloudflare_id
|
|
cloudflare_image = None
|
|
try:
|
|
cloudflare_image = CloudflareImage.objects.get(
|
|
cloudflare_id=cloudflare_image_id)
|
|
|
|
# Update existing record with latest data from Cloudflare
|
|
cloudflare_image.status = 'uploaded'
|
|
cloudflare_image.uploaded_at = timezone.now()
|
|
cloudflare_image.metadata = image_data.get('meta', {})
|
|
# Extract variants from nested result structure
|
|
cloudflare_image.variants = image_data.get(
|
|
'result', {}).get('variants', [])
|
|
cloudflare_image.cloudflare_metadata = image_data
|
|
cloudflare_image.width = image_data.get('width')
|
|
cloudflare_image.height = image_data.get('height')
|
|
cloudflare_image.format = image_data.get('format', '')
|
|
cloudflare_image.save()
|
|
|
|
except CloudflareImage.DoesNotExist:
|
|
# Create new CloudflareImage record from API response
|
|
cloudflare_image = CloudflareImage.objects.create(
|
|
cloudflare_id=cloudflare_image_id,
|
|
user=request.user,
|
|
status='uploaded',
|
|
upload_url='', # Not needed for uploaded images
|
|
expires_at=timezone.now() + timezone.timedelta(days=365), # Set far future expiry
|
|
uploaded_at=timezone.now(),
|
|
metadata=image_data.get('meta', {}),
|
|
# Extract variants from nested result structure
|
|
variants=image_data.get('result', {}).get('variants', []),
|
|
cloudflare_metadata=image_data,
|
|
width=image_data.get('width'),
|
|
height=image_data.get('height'),
|
|
format=image_data.get('format', ''),
|
|
)
|
|
|
|
except Exception as api_error:
|
|
logger.error(
|
|
f"Error fetching image from Cloudflare API: {str(api_error)}", exc_info=True)
|
|
return Response(
|
|
{"error": f"Failed to fetch image from Cloudflare: {str(api_error)}"},
|
|
status=status.HTTP_400_BAD_REQUEST,
|
|
)
|
|
|
|
# Create the park photo with the CloudflareImage reference
|
|
photo = ParkPhoto.objects.create(
|
|
park=park,
|
|
image=cloudflare_image,
|
|
uploaded_by=request.user,
|
|
caption=request.data.get("caption", ""),
|
|
alt_text=request.data.get("alt_text", ""),
|
|
photo_type=request.data.get("photo_type", "exterior"),
|
|
is_primary=request.data.get("is_primary", False),
|
|
is_approved=False, # Default to requiring approval
|
|
)
|
|
|
|
# Handle primary photo logic if requested
|
|
if request.data.get("is_primary", False):
|
|
try:
|
|
ParkMediaService().set_primary_photo(
|
|
park_id=park.id, photo_id=photo.id
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Error setting primary photo: {e}")
|
|
# Don't fail the entire operation, just log the error
|
|
|
|
serializer = ParkPhotoOutputSerializer(photo, context={"request": request})
|
|
return Response(serializer.data, status=status.HTTP_201_CREATED)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error saving park photo: {e}")
|
|
return Response(
|
|
{"error": f"Failed to save photo: {str(e)}"},
|
|
status=status.HTTP_400_BAD_REQUEST,
|
|
)
|
|
|
|
|
|
from rest_framework.views import APIView
|
|
from rest_framework.permissions import AllowAny
|
|
from .serializers import HybridParkSerializer
|
|
from apps.parks.services.hybrid_loader import smart_park_loader
|
|
|
|
|
|
@extend_schema_view(
|
|
get=extend_schema(
|
|
summary="Get parks with hybrid filtering",
|
|
description="Retrieve parks with intelligent hybrid filtering strategy. Automatically chooses between client-side and server-side filtering based on data size.",
|
|
parameters=[
|
|
OpenApiParameter("status", OpenApiTypes.STR, description="Filter by park status (comma-separated for multiple)"),
|
|
OpenApiParameter("park_type", OpenApiTypes.STR, description="Filter by park type (comma-separated for multiple)"),
|
|
OpenApiParameter("country", OpenApiTypes.STR, description="Filter by country (comma-separated for multiple)"),
|
|
OpenApiParameter("state", OpenApiTypes.STR, description="Filter by state (comma-separated for multiple)"),
|
|
OpenApiParameter("opening_year_min", OpenApiTypes.INT, description="Minimum opening year"),
|
|
OpenApiParameter("opening_year_max", OpenApiTypes.INT, description="Maximum opening year"),
|
|
OpenApiParameter("size_min", OpenApiTypes.NUMBER, description="Minimum park size in acres"),
|
|
OpenApiParameter("size_max", OpenApiTypes.NUMBER, description="Maximum park size in acres"),
|
|
OpenApiParameter("rating_min", OpenApiTypes.NUMBER, description="Minimum average rating"),
|
|
OpenApiParameter("rating_max", OpenApiTypes.NUMBER, description="Maximum average rating"),
|
|
OpenApiParameter("ride_count_min", OpenApiTypes.INT, description="Minimum ride count"),
|
|
OpenApiParameter("ride_count_max", OpenApiTypes.INT, description="Maximum ride count"),
|
|
OpenApiParameter("coaster_count_min", OpenApiTypes.INT, description="Minimum coaster count"),
|
|
OpenApiParameter("coaster_count_max", OpenApiTypes.INT, description="Maximum coaster count"),
|
|
OpenApiParameter("operator", OpenApiTypes.STR, description="Filter by operator slug (comma-separated for multiple)"),
|
|
OpenApiParameter("search", OpenApiTypes.STR, description="Search query for park names, descriptions, locations, and operators"),
|
|
OpenApiParameter("offset", OpenApiTypes.INT, description="Offset for progressive loading (server-side pagination)"),
|
|
],
|
|
responses={
|
|
200: {
|
|
"description": "Parks data with hybrid filtering metadata",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"type": "object",
|
|
"properties": {
|
|
"parks": {
|
|
"type": "array",
|
|
"items": {"$ref": "#/components/schemas/HybridParkSerializer"}
|
|
},
|
|
"total_count": {"type": "integer"},
|
|
"strategy": {
|
|
"type": "string",
|
|
"enum": ["client_side", "server_side"],
|
|
"description": "Filtering strategy used"
|
|
},
|
|
"has_more": {
|
|
"type": "boolean",
|
|
"description": "Whether more data is available for progressive loading"
|
|
},
|
|
"next_offset": {
|
|
"type": "integer",
|
|
"nullable": True,
|
|
"description": "Next offset for progressive loading"
|
|
},
|
|
"filter_metadata": {
|
|
"type": "object",
|
|
"description": "Available filter options and ranges"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
tags=["Parks"],
|
|
)
|
|
)
|
|
class HybridParkAPIView(APIView):
|
|
"""
|
|
Hybrid Park API View with intelligent filtering strategy.
|
|
|
|
Automatically chooses between client-side and server-side filtering
|
|
based on data size and complexity. Provides progressive loading
|
|
for large datasets and complete data for smaller sets.
|
|
"""
|
|
|
|
permission_classes = [AllowAny]
|
|
|
|
def get(self, request):
|
|
"""Get parks with hybrid filtering strategy."""
|
|
try:
|
|
# Extract filters from query parameters
|
|
filters = self._extract_filters(request.query_params)
|
|
|
|
# Check if this is a progressive load request
|
|
offset = request.query_params.get('offset')
|
|
if offset is not None:
|
|
try:
|
|
offset = int(offset)
|
|
# Get progressive load data
|
|
data = smart_park_loader.get_progressive_load(offset, filters)
|
|
except ValueError:
|
|
return Response(
|
|
{"error": "Invalid offset parameter"},
|
|
status=status.HTTP_400_BAD_REQUEST
|
|
)
|
|
else:
|
|
# Get initial load data
|
|
data = smart_park_loader.get_initial_load(filters)
|
|
|
|
# Serialize the parks data
|
|
serializer = HybridParkSerializer(data['parks'], many=True)
|
|
|
|
# Prepare response
|
|
response_data = {
|
|
'parks': serializer.data,
|
|
'total_count': data['total_count'],
|
|
'strategy': data.get('strategy', 'server_side'),
|
|
'has_more': data.get('has_more', False),
|
|
'next_offset': data.get('next_offset'),
|
|
}
|
|
|
|
# Include filter metadata for initial loads
|
|
if 'filter_metadata' in data:
|
|
response_data['filter_metadata'] = data['filter_metadata']
|
|
|
|
return Response(response_data, status=status.HTTP_200_OK)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error in HybridParkAPIView: {e}")
|
|
return Response(
|
|
{"error": "Internal server error"},
|
|
status=status.HTTP_500_INTERNAL_SERVER_ERROR
|
|
)
|
|
|
|
def _extract_filters(self, query_params):
|
|
"""Extract and parse filters from query parameters."""
|
|
filters = {}
|
|
|
|
# Handle comma-separated list parameters
|
|
list_params = ['status', 'park_type', 'country', 'state', 'operator']
|
|
for param in list_params:
|
|
value = query_params.get(param)
|
|
if value:
|
|
filters[param] = [v.strip() for v in value.split(',') if v.strip()]
|
|
|
|
# Handle integer parameters
|
|
int_params = [
|
|
'opening_year_min', 'opening_year_max',
|
|
'ride_count_min', 'ride_count_max',
|
|
'coaster_count_min', 'coaster_count_max'
|
|
]
|
|
for param in int_params:
|
|
value = query_params.get(param)
|
|
if value:
|
|
try:
|
|
filters[param] = int(value)
|
|
except ValueError:
|
|
pass # Skip invalid integer values
|
|
|
|
# Handle float parameters
|
|
float_params = ['size_min', 'size_max', 'rating_min', 'rating_max']
|
|
for param in float_params:
|
|
value = query_params.get(param)
|
|
if value:
|
|
try:
|
|
filters[param] = float(value)
|
|
except ValueError:
|
|
pass # Skip invalid float values
|
|
|
|
# Handle search parameter
|
|
search = query_params.get('search')
|
|
if search:
|
|
filters['search'] = search.strip()
|
|
|
|
return filters
|
|
|
|
|
|
@extend_schema_view(
|
|
get=extend_schema(
|
|
summary="Get park filter metadata",
|
|
description="Get available filter options and ranges for parks filtering.",
|
|
parameters=[
|
|
OpenApiParameter("scoped", OpenApiTypes.BOOL, description="Whether to scope metadata to current filters"),
|
|
],
|
|
responses={
|
|
200: {
|
|
"description": "Filter metadata",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"type": "object",
|
|
"properties": {
|
|
"categorical": {
|
|
"type": "object",
|
|
"properties": {
|
|
"countries": {"type": "array", "items": {"type": "string"}},
|
|
"states": {"type": "array", "items": {"type": "string"}},
|
|
"park_types": {"type": "array", "items": {"type": "string"}},
|
|
"statuses": {"type": "array", "items": {"type": "string"}},
|
|
"operators": {
|
|
"type": "array",
|
|
"items": {
|
|
"type": "object",
|
|
"properties": {
|
|
"name": {"type": "string"},
|
|
"slug": {"type": "string"}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"ranges": {
|
|
"type": "object",
|
|
"properties": {
|
|
"opening_year": {
|
|
"type": "object",
|
|
"properties": {
|
|
"min": {"type": "integer", "nullable": True},
|
|
"max": {"type": "integer", "nullable": True}
|
|
}
|
|
},
|
|
"size_acres": {
|
|
"type": "object",
|
|
"properties": {
|
|
"min": {"type": "number", "nullable": True},
|
|
"max": {"type": "number", "nullable": True}
|
|
}
|
|
},
|
|
"average_rating": {
|
|
"type": "object",
|
|
"properties": {
|
|
"min": {"type": "number", "nullable": True},
|
|
"max": {"type": "number", "nullable": True}
|
|
}
|
|
},
|
|
"ride_count": {
|
|
"type": "object",
|
|
"properties": {
|
|
"min": {"type": "integer", "nullable": True},
|
|
"max": {"type": "integer", "nullable": True}
|
|
}
|
|
},
|
|
"coaster_count": {
|
|
"type": "object",
|
|
"properties": {
|
|
"min": {"type": "integer", "nullable": True},
|
|
"max": {"type": "integer", "nullable": True}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"total_count": {"type": "integer"}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
tags=["Parks"],
|
|
)
|
|
)
|
|
class ParkFilterMetadataAPIView(APIView):
|
|
"""
|
|
API view for getting park filter metadata.
|
|
|
|
Provides information about available filter options and ranges
|
|
to help build dynamic filter interfaces.
|
|
"""
|
|
|
|
permission_classes = [AllowAny]
|
|
|
|
def get(self, request):
|
|
"""Get park filter metadata."""
|
|
try:
|
|
# Check if metadata should be scoped to current filters
|
|
scoped = request.query_params.get('scoped', '').lower() == 'true'
|
|
filters = None
|
|
|
|
if scoped:
|
|
filters = self._extract_filters(request.query_params)
|
|
|
|
# Get filter metadata
|
|
metadata = smart_park_loader.get_filter_metadata(filters)
|
|
|
|
return Response(metadata, status=status.HTTP_200_OK)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error in ParkFilterMetadataAPIView: {e}")
|
|
return Response(
|
|
{"error": "Internal server error"},
|
|
status=status.HTTP_500_INTERNAL_SERVER_ERROR
|
|
)
|
|
|
|
def _extract_filters(self, query_params):
|
|
"""Extract and parse filters from query parameters."""
|
|
# Reuse the same filter extraction logic
|
|
view = HybridParkAPIView()
|
|
return view._extract_filters(query_params)
|