Improve park listing performance with optimized queries and caching

Implement performance enhancements for park listing by optimizing database queries, introducing efficient caching mechanisms, and refining pagination for a significantly faster and smoother user experience.

Replit-Commit-Author: Agent
Replit-Commit-Session-Id: c446bc9e-66df-438c-a86c-f53e6da13649
Replit-Commit-Checkpoint-Type: intermediate_checkpoint
This commit is contained in:
pac7
2025-09-23 22:50:09 +00:00
parent 6391b3d81c
commit fff180c476
11 changed files with 2864 additions and 70 deletions

View File

@@ -0,0 +1,198 @@
"""
Django management command to run performance benchmarks.
"""
from django.core.management.base import BaseCommand
from django.utils import timezone
import json
import time
class Command(BaseCommand):
help = 'Run comprehensive performance benchmarks for park listing features'
def add_arguments(self, parser):
parser.add_argument(
'--save',
action='store_true',
help='Save detailed benchmark results to file',
)
parser.add_argument(
'--autocomplete-only',
action='store_true',
help='Run only autocomplete benchmarks',
)
parser.add_argument(
'--listing-only',
action='store_true',
help='Run only listing benchmarks',
)
parser.add_argument(
'--pagination-only',
action='store_true',
help='Run only pagination benchmarks',
)
parser.add_argument(
'--iterations',
type=int,
default=1,
help='Number of iterations to run (default: 1)',
)
def handle(self, *args, **options):
from apps.parks.services.performance_monitoring import BenchmarkSuite
self.stdout.write(
self.style.SUCCESS('Starting Park Listing Performance Benchmarks')
)
suite = BenchmarkSuite()
iterations = options['iterations']
all_results = []
for i in range(iterations):
if iterations > 1:
self.stdout.write(f'\nIteration {i + 1}/{iterations}')
start_time = time.perf_counter()
# Run specific benchmarks or full suite
if options['autocomplete_only']:
result = suite.run_autocomplete_benchmark()
elif options['listing_only']:
result = suite.run_listing_benchmark()
elif options['pagination_only']:
result = suite.run_pagination_benchmark()
else:
result = suite.run_full_benchmark_suite()
duration = time.perf_counter() - start_time
result['iteration'] = i + 1
result['benchmark_duration'] = duration
all_results.append(result)
# Display summary for this iteration
self._display_iteration_summary(result, duration)
# Display overall summary if multiple iterations
if iterations > 1:
self._display_overall_summary(all_results)
# Save results if requested
if options['save']:
self._save_results(all_results)
self.stdout.write(
self.style.SUCCESS('\\nBenchmark completed successfully!')
)
def _display_iteration_summary(self, result, duration):
"""Display summary for a single iteration."""
if 'overall_summary' in result:
summary = result['overall_summary']
self.stdout.write(f'\\nBenchmark Duration: {duration:.3f}s')
self.stdout.write(f'Total Operations: {summary["total_operations"]}')
self.stdout.write(f'Average Response Time: {summary["duration_stats"]["mean"]:.3f}s')
self.stdout.write(f'Average Query Count: {summary["query_stats"]["mean"]:.1f}')
self.stdout.write(f'Cache Hit Rate: {summary["cache_stats"]["hit_rate"]:.1f}%')
# Display slowest operations
if summary.get('slowest_operations'):
self.stdout.write('\\nSlowest Operations:')
for op in summary['slowest_operations'][:3]:
self.stdout.write(f' {op["operation"]}: {op["duration"]:.3f}s ({op["query_count"]} queries)')
# Display recommendations
if result.get('recommendations'):
self.stdout.write('\\nRecommendations:')
for rec in result['recommendations']:
self.stdout.write(f'{rec}')
# Display specific benchmark results
for benchmark_type in ['autocomplete', 'listing', 'pagination']:
if benchmark_type in result:
self._display_benchmark_results(benchmark_type, result[benchmark_type])
def _display_benchmark_results(self, benchmark_type, results):
"""Display results for a specific benchmark type."""
self.stdout.write(f'\\n{benchmark_type.title()} Benchmark Results:')
if benchmark_type == 'autocomplete':
for query_result in results.get('results', []):
self.stdout.write(
f' Query "{query_result["query"]}": {query_result["response_time"]:.3f}s '
f'({query_result["query_count"]} queries)'
)
elif benchmark_type == 'listing':
for scenario in results.get('results', []):
self.stdout.write(
f' {scenario["scenario"]}: {scenario["response_time"]:.3f}s '
f'({scenario["query_count"]} queries, {scenario["result_count"]} results)'
)
elif benchmark_type == 'pagination':
# Group by page size for cleaner display
by_page_size = {}
for result in results.get('results', []):
size = result['page_size']
if size not in by_page_size:
by_page_size[size] = []
by_page_size[size].append(result)
for page_size, page_results in by_page_size.items():
avg_time = sum(r['response_time'] for r in page_results) / len(page_results)
avg_queries = sum(r['query_count'] for r in page_results) / len(page_results)
self.stdout.write(
f' Page size {page_size}: avg {avg_time:.3f}s ({avg_queries:.1f} queries)'
)
def _display_overall_summary(self, all_results):
"""Display summary across all iterations."""
self.stdout.write('\\n' + '='*50)
self.stdout.write('OVERALL SUMMARY ACROSS ALL ITERATIONS')
self.stdout.write('='*50)
# Calculate averages across iterations
total_duration = sum(r['benchmark_duration'] for r in all_results)
# Extract performance metrics from iterations with overall_summary
overall_summaries = [r['overall_summary'] for r in all_results if 'overall_summary' in r]
if overall_summaries:
avg_response_time = sum(s['duration_stats']['mean'] for s in overall_summaries) / len(overall_summaries)
avg_query_count = sum(s['query_stats']['mean'] for s in overall_summaries) / len(overall_summaries)
avg_cache_hit_rate = sum(s['cache_stats']['hit_rate'] for s in overall_summaries) / len(overall_summaries)
self.stdout.write(f'Total Benchmark Time: {total_duration:.3f}s')
self.stdout.write(f'Average Response Time: {avg_response_time:.3f}s')
self.stdout.write(f'Average Query Count: {avg_query_count:.1f}')
self.stdout.write(f'Average Cache Hit Rate: {avg_cache_hit_rate:.1f}%')
def _save_results(self, results):
"""Save benchmark results to file."""
timestamp = timezone.now().strftime('%Y%m%d_%H%M%S')
filename = f'benchmark_results_{timestamp}.json'
try:
import os
# Ensure logs directory exists
logs_dir = 'logs'
os.makedirs(logs_dir, exist_ok=True)
filepath = os.path.join(logs_dir, filename)
with open(filepath, 'w') as f:
json.dump(results, f, indent=2, default=str)
self.stdout.write(
self.style.SUCCESS(f'Results saved to {filepath}')
)
except Exception as e:
self.stdout.write(
self.style.ERROR(f'Error saving results: {e}')
)

View File

@@ -0,0 +1,54 @@
# Generated by Django 5.2.6 on 2025-09-23 22:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parks', '0001_initial'),
]
operations = [
# Performance indexes for frequently filtered fields
migrations.RunSQL(
"CREATE INDEX IF NOT EXISTS idx_parks_status_operator ON parks_park(status, operator_id);",
reverse_sql="DROP INDEX IF EXISTS idx_parks_status_operator;"
),
migrations.RunSQL(
"CREATE INDEX IF NOT EXISTS idx_parks_park_type_status ON parks_park(park_type, status);",
reverse_sql="DROP INDEX IF EXISTS idx_parks_park_type_status;"
),
migrations.RunSQL(
"CREATE INDEX IF NOT EXISTS idx_parks_opening_year_status ON parks_park(opening_year, status) WHERE opening_year IS NOT NULL;",
reverse_sql="DROP INDEX IF EXISTS idx_parks_opening_year_status;"
),
migrations.RunSQL(
"CREATE INDEX IF NOT EXISTS idx_parks_ride_count_coaster_count ON parks_park(ride_count, coaster_count) WHERE ride_count IS NOT NULL;",
reverse_sql="DROP INDEX IF EXISTS idx_parks_ride_count_coaster_count;"
),
migrations.RunSQL(
"CREATE INDEX IF NOT EXISTS idx_parks_average_rating_status ON parks_park(average_rating, status) WHERE average_rating IS NOT NULL;",
reverse_sql="DROP INDEX IF EXISTS idx_parks_average_rating_status;"
),
# Search optimization index
migrations.RunSQL(
"CREATE INDEX IF NOT EXISTS idx_parks_search_text_gin ON parks_park USING gin(search_text gin_trgm_ops);",
reverse_sql="DROP INDEX IF EXISTS idx_parks_search_text_gin;"
),
# Location-based indexes for ParkLocation
migrations.RunSQL(
"CREATE INDEX IF NOT EXISTS idx_parklocation_country_city ON parks_parklocation(country, city);",
reverse_sql="DROP INDEX IF EXISTS idx_parklocation_country_city;"
),
# Company name index for operator filtering
migrations.RunSQL(
"CREATE INDEX IF NOT EXISTS idx_company_name_roles ON parks_company USING gin(name gin_trgm_ops, roles);",
reverse_sql="DROP INDEX IF EXISTS idx_company_name_roles;"
),
# Timestamps for ordering and filtering
migrations.RunSQL(
"CREATE INDEX IF NOT EXISTS idx_parks_created_at_status ON parks_park(created_at, status);",
reverse_sql="DROP INDEX IF EXISTS idx_parks_created_at_status;"
),
]

File diff suppressed because one or more lines are too long

View File

@@ -28,7 +28,8 @@ class ParkFilterService:
self, base_queryset: Optional[QuerySet] = None
) -> Dict[str, Any]:
"""
Get counts for various filter options to show users what's available.
Get counts for various filter options with optimized single-query aggregations.
This eliminates multiple expensive COUNT queries.
Args:
base_queryset: Optional base queryset to use for calculations
@@ -42,24 +43,49 @@ class ParkFilterService:
if cached_result is not None:
return cached_result
if base_queryset is None:
base_queryset = get_base_park_queryset()
from apps.core.utils.query_optimization import track_queries
with track_queries("optimized_filter_counts"):
if base_queryset is None:
base_queryset = get_base_park_queryset()
# Calculate filter counts
filter_counts = {
"total_parks": base_queryset.count(),
"operating_parks": base_queryset.filter(status="OPERATING").count(),
"parks_with_coasters": base_queryset.filter(coaster_count__gt=0).count(),
"big_parks": base_queryset.filter(ride_count__gte=10).count(),
"highly_rated": base_queryset.filter(average_rating__gte=4.0).count(),
"park_types": self._get_park_type_counts(base_queryset),
"top_operators": self._get_top_operators(base_queryset),
"countries": self._get_country_counts(base_queryset),
}
# Use optimized single-query aggregation instead of multiple COUNT queries
aggregates = base_queryset.aggregate(
total_parks=Count('id'),
operating_parks=Count('id', filter=Q(status='OPERATING')),
parks_with_coasters=Count('id', filter=Q(coaster_count__gt=0)),
big_parks=Count('id', filter=Q(ride_count__gte=10)),
highly_rated=Count('id', filter=Q(average_rating__gte=4.0)),
disney_parks=Count('id', filter=Q(operator__name__icontains='Disney')),
universal_parks=Count('id', filter=Q(operator__name__icontains='Universal')),
six_flags_parks=Count('id', filter=Q(operator__name__icontains='Six Flags')),
cedar_fair_parks=Count('id', filter=Q(
Q(operator__name__icontains='Cedar Fair') |
Q(operator__name__icontains='Cedar Point') |
Q(operator__name__icontains='Kings Island')
))
)
# Cache the result
cache.set(cache_key, filter_counts, self.CACHE_TIMEOUT)
return filter_counts
# Calculate filter counts efficiently
filter_counts = {
"total_parks": aggregates['total_parks'],
"operating_parks": aggregates['operating_parks'],
"parks_with_coasters": aggregates['parks_with_coasters'],
"big_parks": aggregates['big_parks'],
"highly_rated": aggregates['highly_rated'],
"park_types": {
"disney": aggregates['disney_parks'],
"universal": aggregates['universal_parks'],
"six_flags": aggregates['six_flags_parks'],
"cedar_fair": aggregates['cedar_fair_parks'],
},
"top_operators": self._get_top_operators_optimized(base_queryset),
"countries": self._get_country_counts_optimized(base_queryset),
}
# Cache the result for longer since this is expensive
cache.set(cache_key, filter_counts, self.CACHE_TIMEOUT * 2)
return filter_counts
def _get_park_type_counts(self, queryset: QuerySet) -> Dict[str, int]:
"""Get counts for different park types based on operator names."""
@@ -210,9 +236,11 @@ class ParkFilterService:
for key in cache_keys:
cache.delete(key)
def get_filtered_queryset(self, filters: Dict[str, Any]) -> QuerySet: # noqa: C901
def get_optimized_filtered_queryset(self, filters: Dict[str, Any]) -> QuerySet: # noqa: C901
"""
Apply filters to get a filtered queryset with optimizations.
Apply filters to get a filtered queryset with comprehensive optimizations.
This method eliminates the expensive subquery pattern and builds an optimized
queryset from the ground up.
Args:
filters: Dictionary of filter parameters
@@ -220,6 +248,94 @@ class ParkFilterService:
Returns:
Filtered and optimized QuerySet
"""
from apps.core.utils.query_optimization import track_queries
with track_queries("optimized_filtered_queryset"):
# Start with base Park queryset and apply all optimizations at once
queryset = (
Park.objects
.select_related(
"operator",
"property_owner",
"location",
"banner_image",
"card_image"
)
.prefetch_related(
"photos",
"rides__manufacturer",
"areas"
)
.annotate(
current_ride_count=Count("rides", distinct=True),
current_coaster_count=Count(
"rides", filter=Q(rides__category="RC"), distinct=True
),
)
)
# Build optimized filter conditions
filter_conditions = Q()
# Apply status filter
if filters.get("status"):
filter_conditions &= Q(status=filters["status"])
# Apply park type filter
if filters.get("park_type"):
filter_conditions &= self._get_park_type_filter(filters["park_type"])
# Apply coaster filter
if filters.get("has_coasters"):
filter_conditions &= Q(coaster_count__gt=0)
# Apply rating filter
if filters.get("min_rating"):
try:
min_rating = float(filters["min_rating"])
filter_conditions &= Q(average_rating__gte=min_rating)
except (ValueError, TypeError):
pass
# Apply big parks filter
if filters.get("big_parks_only"):
filter_conditions &= Q(ride_count__gte=10)
# Apply optimized search using search_text field
if filters.get("search"):
search_query = filters["search"].strip()
if search_query:
# Use the computed search_text field for better performance
search_conditions = (
Q(search_text__icontains=search_query)
| Q(name__icontains=search_query)
| Q(location__city__icontains=search_query)
| Q(location__country__icontains=search_query)
)
filter_conditions &= search_conditions
# Apply location filters
if filters.get("country_filter"):
filter_conditions &= Q(
location__country__icontains=filters["country_filter"]
)
if filters.get("state_filter"):
filter_conditions &= Q(
location__state__icontains=filters["state_filter"]
)
# Apply all filters at once for better query planning
if filter_conditions:
queryset = queryset.filter(filter_conditions)
return queryset.distinct()
def get_filtered_queryset(self, filters: Dict[str, Any]) -> QuerySet: # noqa: C901
"""
Legacy method - kept for backward compatibility.
Use get_optimized_filtered_queryset for new implementations.
"""
queryset = (
get_base_park_queryset()
.select_related("operator", "property_owner", "location")
@@ -302,3 +418,50 @@ class ParkFilterService:
return queryset.filter(type_filters[park_type])
return queryset
def _get_park_type_filter(self, park_type: str) -> Q:
"""Get park type filter as Q object for optimized filtering."""
type_filters = {
"disney": Q(operator__name__icontains="Disney"),
"universal": Q(operator__name__icontains="Universal"),
"six_flags": Q(operator__name__icontains="Six Flags"),
"cedar_fair": (
Q(operator__name__icontains="Cedar Fair")
| Q(operator__name__icontains="Cedar Point")
| Q(operator__name__icontains="Kings Island")
| Q(operator__name__icontains="Canada's Wonderland")
),
"independent": ~(
Q(operator__name__icontains="Disney")
| Q(operator__name__icontains="Universal")
| Q(operator__name__icontains="Six Flags")
| Q(operator__name__icontains="Cedar Fair")
| Q(operator__name__icontains="Cedar Point")
| Q(operator__name__icontains="Kings Island")
| Q(operator__name__icontains="Canada's Wonderland")
),
}
return type_filters.get(park_type, Q())
def _get_top_operators_optimized(
self, queryset: QuerySet, limit: int = 10
) -> List[Dict[str, Any]]:
"""Get the top operators by number of parks using optimized query."""
return list(
queryset.values("operator__name", "operator__id")
.annotate(park_count=Count("id"))
.filter(park_count__gt=0)
.order_by("-park_count")[:limit]
)
def _get_country_counts_optimized(
self, queryset: QuerySet, limit: int = 10
) -> List[Dict[str, Any]]:
"""Get countries with the most parks using optimized query."""
return list(
queryset.filter(location__country__isnull=False)
.values("location__country")
.annotate(park_count=Count("id"))
.filter(park_count__gt=0)
.order_by("-park_count")[:limit]
)

View File

@@ -0,0 +1,311 @@
"""
Optimized pagination service for large datasets with efficient counting.
"""
from typing import Dict, Any, Optional, Tuple
from django.core.paginator import Paginator, Page
from django.core.cache import cache
from django.db.models import QuerySet, Count
from django.conf import settings
import hashlib
import time
import logging
logger = logging.getLogger("pagination_service")
class OptimizedPaginator(Paginator):
"""
Custom paginator that optimizes COUNT queries and provides caching.
"""
def __init__(self, object_list, per_page, cache_timeout=300, **kwargs):
super().__init__(object_list, per_page, **kwargs)
self.cache_timeout = cache_timeout
self._cached_count = None
self._count_cache_key = None
def _get_count_cache_key(self) -> str:
"""Generate cache key for count based on queryset SQL."""
if self._count_cache_key:
return self._count_cache_key
# Create cache key from queryset SQL
if hasattr(self.object_list, 'query'):
sql_hash = hashlib.md5(
str(self.object_list.query).encode('utf-8')
).hexdigest()[:16]
self._count_cache_key = f"paginator_count:{sql_hash}"
else:
# Fallback for non-queryset object lists
self._count_cache_key = f"paginator_count:list:{len(self.object_list)}"
return self._count_cache_key
@property
def count(self):
"""
Optimized count with caching for expensive querysets.
"""
if self._cached_count is not None:
return self._cached_count
cache_key = self._get_count_cache_key()
cached_count = cache.get(cache_key)
if cached_count is not None:
logger.debug(f"Cache hit for pagination count: {cache_key}")
self._cached_count = cached_count
return cached_count
# Perform optimized count
start_time = time.time()
if hasattr(self.object_list, 'count'):
# For QuerySets, try to optimize the count query
count = self._get_optimized_count()
else:
count = len(self.object_list)
execution_time = time.time() - start_time
# Cache the result
cache.set(cache_key, count, self.cache_timeout)
self._cached_count = count
if execution_time > 0.5: # Log slow count queries
logger.warning(
f"Slow pagination count query: {execution_time:.3f}s for {count} items",
extra={'cache_key': cache_key, 'execution_time': execution_time}
)
return count
def _get_optimized_count(self) -> int:
"""
Get optimized count for complex querysets.
"""
queryset = self.object_list
# For complex queries with joins, use approximate counting for very large datasets
if self._is_complex_query(queryset):
# Try to get count from a simpler subquery
try:
# Use subquery approach for complex queries
subquery = queryset.values('pk')
return subquery.count()
except Exception as e:
logger.warning(f"Optimized count failed, falling back to standard count: {e}")
return queryset.count()
else:
return queryset.count()
def _is_complex_query(self, queryset) -> bool:
"""
Determine if a queryset is complex and might benefit from optimization.
"""
if not hasattr(queryset, 'query'):
return False
sql = str(queryset.query).upper()
# Consider complex if it has multiple joins or subqueries
complexity_indicators = [
'JOIN' in sql and sql.count('JOIN') > 2,
'DISTINCT' in sql,
'GROUP BY' in sql,
'HAVING' in sql,
]
return any(complexity_indicators)
class CursorPaginator:
"""
Cursor-based pagination for very large datasets.
More efficient than offset-based pagination for large page numbers.
"""
def __init__(self, queryset: QuerySet, ordering_field: str = 'id', per_page: int = 20):
self.queryset = queryset
self.ordering_field = ordering_field
self.per_page = per_page
self.reverse = ordering_field.startswith('-')
self.field_name = ordering_field.lstrip('-')
def get_page(self, cursor: Optional[str] = None) -> Dict[str, Any]:
"""
Get a page of results using cursor-based pagination.
Args:
cursor: Base64 encoded cursor value from previous page
Returns:
Dictionary with page data and navigation cursors
"""
queryset = self.queryset.order_by(self.ordering_field)
if cursor:
# Decode cursor and filter from that point
try:
cursor_value = self._decode_cursor(cursor)
if self.reverse:
queryset = queryset.filter(**{f"{self.field_name}__lt": cursor_value})
else:
queryset = queryset.filter(**{f"{self.field_name}__gt": cursor_value})
except (ValueError, TypeError):
# Invalid cursor, start from beginning
pass
# Get one extra item to check if there's a next page
items = list(queryset[:self.per_page + 1])
has_next = len(items) > self.per_page
if has_next:
items = items[:-1] # Remove the extra item
# Generate cursors for navigation
next_cursor = None
previous_cursor = None
if items and has_next:
last_item = items[-1]
next_cursor = self._encode_cursor(getattr(last_item, self.field_name))
if items and cursor:
first_item = items[0]
previous_cursor = self._encode_cursor(getattr(first_item, self.field_name))
return {
'items': items,
'has_next': has_next,
'has_previous': cursor is not None,
'next_cursor': next_cursor,
'previous_cursor': previous_cursor,
'count': len(items)
}
def _encode_cursor(self, value) -> str:
"""Encode cursor value to base64 string."""
import base64
return base64.b64encode(str(value).encode()).decode()
def _decode_cursor(self, cursor: str):
"""Decode cursor from base64 string."""
import base64
decoded = base64.b64decode(cursor.encode()).decode()
# Try to convert to appropriate type based on field
field = self.queryset.model._meta.get_field(self.field_name)
if hasattr(field, 'to_python'):
return field.to_python(decoded)
return decoded
class PaginationCache:
"""
Advanced caching for pagination metadata and results.
"""
CACHE_PREFIX = "pagination"
DEFAULT_TIMEOUT = 300 # 5 minutes
@classmethod
def get_page_cache_key(cls, queryset_hash: str, page_num: int) -> str:
"""Generate cache key for a specific page."""
return f"{cls.CACHE_PREFIX}:page:{queryset_hash}:{page_num}"
@classmethod
def get_metadata_cache_key(cls, queryset_hash: str) -> str:
"""Generate cache key for pagination metadata."""
return f"{cls.CACHE_PREFIX}:meta:{queryset_hash}"
@classmethod
def cache_page_results(
cls,
queryset_hash: str,
page_num: int,
page_data: Dict[str, Any],
timeout: int = DEFAULT_TIMEOUT
):
"""Cache page results."""
cache_key = cls.get_page_cache_key(queryset_hash, page_num)
cache.set(cache_key, page_data, timeout)
@classmethod
def get_cached_page(cls, queryset_hash: str, page_num: int) -> Optional[Dict[str, Any]]:
"""Get cached page results."""
cache_key = cls.get_page_cache_key(queryset_hash, page_num)
return cache.get(cache_key)
@classmethod
def cache_metadata(
cls,
queryset_hash: str,
metadata: Dict[str, Any],
timeout: int = DEFAULT_TIMEOUT
):
"""Cache pagination metadata."""
cache_key = cls.get_metadata_cache_key(queryset_hash)
cache.set(cache_key, metadata, timeout)
@classmethod
def get_cached_metadata(cls, queryset_hash: str) -> Optional[Dict[str, Any]]:
"""Get cached pagination metadata."""
cache_key = cls.get_metadata_cache_key(queryset_hash)
return cache.get(cache_key)
@classmethod
def invalidate_cache(cls, queryset_hash: str):
"""Invalidate all cache entries for a queryset."""
# This would require a cache backend that supports pattern deletion
# For now, we'll rely on TTL expiration
pass
def get_optimized_page(
queryset: QuerySet,
page_number: int,
per_page: int = 20,
use_cursor: bool = False,
cursor: Optional[str] = None,
cache_timeout: int = 300
) -> Tuple[Page, Dict[str, Any]]:
"""
Get an optimized page with caching and performance monitoring.
Args:
queryset: The queryset to paginate
page_number: Page number to retrieve
per_page: Items per page
use_cursor: Whether to use cursor-based pagination
cursor: Cursor for cursor-based pagination
cache_timeout: Cache timeout in seconds
Returns:
Tuple of (Page object, metadata dict)
"""
if use_cursor:
paginator = CursorPaginator(queryset, per_page=per_page)
page_data = paginator.get_page(cursor)
return page_data, {
'pagination_type': 'cursor',
'has_next': page_data['has_next'],
'has_previous': page_data['has_previous'],
'next_cursor': page_data['next_cursor'],
'previous_cursor': page_data['previous_cursor']
}
else:
paginator = OptimizedPaginator(queryset, per_page, cache_timeout=cache_timeout)
page = paginator.get_page(page_number)
return page, {
'pagination_type': 'offset',
'total_pages': paginator.num_pages,
'total_count': paginator.count,
'has_next': page.has_next(),
'has_previous': page.has_previous(),
'current_page': page.number
}

View File

@@ -0,0 +1,402 @@
"""
Performance monitoring and benchmarking tools for park listing optimizations.
"""
import time
import logging
import statistics
from typing import Dict, List, Any, Optional, Callable
from contextlib import contextmanager
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from django.db import connection
from django.core.cache import cache
from django.conf import settings
from django.test import RequestFactory
import json
logger = logging.getLogger("performance_monitoring")
@dataclass
class PerformanceMetric:
"""Data class for storing performance metrics."""
operation: str
duration: float
query_count: int
cache_hits: int = 0
cache_misses: int = 0
memory_usage: Optional[float] = None
timestamp: datetime = field(default_factory=datetime.now)
metadata: Dict[str, Any] = field(default_factory=dict)
class PerformanceMonitor:
"""
Comprehensive performance monitoring for park listing operations.
"""
def __init__(self):
self.metrics: List[PerformanceMetric] = []
self.cache_stats = {'hits': 0, 'misses': 0}
@contextmanager
def measure_operation(self, operation_name: str, **metadata):
"""Context manager to measure operation performance."""
initial_queries = len(connection.queries) if hasattr(connection, 'queries') else 0
initial_cache_hits = self.cache_stats['hits']
initial_cache_misses = self.cache_stats['misses']
start_time = time.perf_counter()
start_memory = self._get_memory_usage()
try:
yield
finally:
end_time = time.perf_counter()
end_memory = self._get_memory_usage()
duration = end_time - start_time
query_count = (len(connection.queries) - initial_queries) if hasattr(connection, 'queries') else 0
cache_hits = self.cache_stats['hits'] - initial_cache_hits
cache_misses = self.cache_stats['misses'] - initial_cache_misses
memory_delta = end_memory - start_memory if start_memory and end_memory else None
metric = PerformanceMetric(
operation=operation_name,
duration=duration,
query_count=query_count,
cache_hits=cache_hits,
cache_misses=cache_misses,
memory_usage=memory_delta,
metadata=metadata
)
self.metrics.append(metric)
self._log_metric(metric)
def _get_memory_usage(self) -> Optional[float]:
"""Get current memory usage in MB."""
try:
import psutil
process = psutil.Process()
return process.memory_info().rss / 1024 / 1024 # Convert to MB
except ImportError:
return None
def _log_metric(self, metric: PerformanceMetric):
"""Log performance metric with appropriate level."""
message = (
f"{metric.operation}: {metric.duration:.3f}s, "
f"{metric.query_count} queries, "
f"{metric.cache_hits} cache hits"
)
if metric.memory_usage:
message += f", {metric.memory_usage:.2f}MB memory delta"
# Log as warning if performance is concerning
if metric.duration > 1.0 or metric.query_count > 10:
logger.warning(f"Performance concern: {message}")
else:
logger.info(f"Performance metric: {message}")
def get_performance_summary(self) -> Dict[str, Any]:
"""Get summary of all performance metrics."""
if not self.metrics:
return {'message': 'No metrics collected'}
durations = [m.duration for m in self.metrics]
query_counts = [m.query_count for m in self.metrics]
return {
'total_operations': len(self.metrics),
'duration_stats': {
'mean': statistics.mean(durations),
'median': statistics.median(durations),
'min': min(durations),
'max': max(durations),
'total': sum(durations)
},
'query_stats': {
'mean': statistics.mean(query_counts),
'median': statistics.median(query_counts),
'min': min(query_counts),
'max': max(query_counts),
'total': sum(query_counts)
},
'cache_stats': {
'total_hits': sum(m.cache_hits for m in self.metrics),
'total_misses': sum(m.cache_misses for m in self.metrics),
'hit_rate': self._calculate_cache_hit_rate()
},
'slowest_operations': self._get_slowest_operations(5),
'most_query_intensive': self._get_most_query_intensive(5)
}
def _calculate_cache_hit_rate(self) -> float:
"""Calculate overall cache hit rate."""
total_hits = sum(m.cache_hits for m in self.metrics)
total_requests = total_hits + sum(m.cache_misses for m in self.metrics)
return (total_hits / total_requests * 100) if total_requests > 0 else 0.0
def _get_slowest_operations(self, count: int) -> List[Dict[str, Any]]:
"""Get the slowest operations."""
sorted_metrics = sorted(self.metrics, key=lambda m: m.duration, reverse=True)
return [
{
'operation': m.operation,
'duration': m.duration,
'query_count': m.query_count,
'timestamp': m.timestamp.isoformat()
}
for m in sorted_metrics[:count]
]
def _get_most_query_intensive(self, count: int) -> List[Dict[str, Any]]:
"""Get operations with the most database queries."""
sorted_metrics = sorted(self.metrics, key=lambda m: m.query_count, reverse=True)
return [
{
'operation': m.operation,
'query_count': m.query_count,
'duration': m.duration,
'timestamp': m.timestamp.isoformat()
}
for m in sorted_metrics[:count]
]
class BenchmarkSuite:
"""
Comprehensive benchmarking suite for park listing performance.
"""
def __init__(self):
self.monitor = PerformanceMonitor()
self.factory = RequestFactory()
def run_autocomplete_benchmark(self, queries: List[str] = None) -> Dict[str, Any]:
"""Benchmark autocomplete performance with various queries."""
if not queries:
queries = [
'Di', # Short query
'Disney', # Common brand
'Universal', # Another common brand
'Cedar Point', # Specific park
'California', # Location
'Roller', # Generic term
'Xyz123' # Non-existent query
]
results = []
for query in queries:
with self.monitor.measure_operation(f"autocomplete_{query}", query=query):
# Simulate autocomplete request
from apps.parks.views_autocomplete import ParkAutocompleteView
request = self.factory.get(f'/api/parks/autocomplete/?q={query}')
view = ParkAutocompleteView()
response = view.get(request)
results.append({
'query': query,
'status_code': response.status_code,
'response_time': self.monitor.metrics[-1].duration,
'query_count': self.monitor.metrics[-1].query_count
})
return {
'benchmark_type': 'autocomplete',
'queries_tested': len(queries),
'results': results,
'summary': self.monitor.get_performance_summary()
}
def run_listing_benchmark(self, scenarios: List[Dict[str, Any]] = None) -> Dict[str, Any]:
"""Benchmark park listing performance with various filter scenarios."""
if not scenarios:
scenarios = [
{'name': 'no_filters', 'params': {}},
{'name': 'status_filter', 'params': {'status': 'OPERATING'}},
{'name': 'operator_filter', 'params': {'operator': 'Disney'}},
{'name': 'location_filter', 'params': {'country': 'United States'}},
{'name': 'complex_filter', 'params': {
'status': 'OPERATING',
'has_coasters': 'true',
'min_rating': '4.0'
}},
{'name': 'search_query', 'params': {'search': 'Magic Kingdom'}},
{'name': 'pagination_last_page', 'params': {'page': '10'}}
]
results = []
for scenario in scenarios:
with self.monitor.measure_operation(f"listing_{scenario['name']}", **scenario['params']):
# Simulate listing request
from apps.parks.views import ParkListView
query_string = '&'.join([f"{k}={v}" for k, v in scenario['params'].items()])
request = self.factory.get(f'/parks/?{query_string}')
view = ParkListView()
view.setup(request)
# Simulate getting the queryset and context
queryset = view.get_queryset()
context = view.get_context_data()
results.append({
'scenario': scenario['name'],
'params': scenario['params'],
'result_count': queryset.count() if hasattr(queryset, 'count') else len(queryset),
'response_time': self.monitor.metrics[-1].duration,
'query_count': self.monitor.metrics[-1].query_count
})
return {
'benchmark_type': 'listing',
'scenarios_tested': len(scenarios),
'results': results,
'summary': self.monitor.get_performance_summary()
}
def run_pagination_benchmark(self, page_sizes: List[int] = None, page_numbers: List[int] = None) -> Dict[str, Any]:
"""Benchmark pagination performance with different page sizes and numbers."""
if not page_sizes:
page_sizes = [10, 20, 50, 100]
if not page_numbers:
page_numbers = [1, 5, 10, 50]
results = []
for page_size in page_sizes:
for page_number in page_numbers:
scenario_name = f"page_{page_number}_size_{page_size}"
with self.monitor.measure_operation(scenario_name, page_size=page_size, page_number=page_number):
from apps.parks.services.pagination_service import get_optimized_page
from apps.parks.querysets import get_base_park_queryset
queryset = get_base_park_queryset()
page, metadata = get_optimized_page(queryset, page_number, page_size)
results.append({
'page_size': page_size,
'page_number': page_number,
'total_count': metadata.get('total_count', 0),
'response_time': self.monitor.metrics[-1].duration,
'query_count': self.monitor.metrics[-1].query_count
})
return {
'benchmark_type': 'pagination',
'configurations_tested': len(results),
'results': results,
'summary': self.monitor.get_performance_summary()
}
def run_full_benchmark_suite(self) -> Dict[str, Any]:
"""Run the complete benchmark suite."""
logger.info("Starting comprehensive benchmark suite")
suite_start = time.perf_counter()
# Run all benchmarks
autocomplete_results = self.run_autocomplete_benchmark()
listing_results = self.run_listing_benchmark()
pagination_results = self.run_pagination_benchmark()
suite_duration = time.perf_counter() - suite_start
# Generate comprehensive report
report = {
'benchmark_suite': 'Park Listing Performance',
'timestamp': datetime.now().isoformat(),
'total_duration': suite_duration,
'autocomplete': autocomplete_results,
'listing': listing_results,
'pagination': pagination_results,
'overall_summary': self.monitor.get_performance_summary(),
'recommendations': self._generate_recommendations()
}
# Save report
self._save_benchmark_report(report)
logger.info(f"Benchmark suite completed in {suite_duration:.3f}s")
return report
def _generate_recommendations(self) -> List[str]:
"""Generate performance recommendations based on benchmark results."""
recommendations = []
summary = self.monitor.get_performance_summary()
# Check average response times
if summary['duration_stats']['mean'] > 0.5:
recommendations.append("Average response time is high (>500ms). Consider implementing additional caching.")
# Check query counts
if summary['query_stats']['mean'] > 5:
recommendations.append("High average query count. Review and optimize database queries.")
# Check cache hit rate
if summary['cache_stats']['hit_rate'] < 80:
recommendations.append("Cache hit rate is low (<80%). Increase cache timeouts or improve cache key strategy.")
# Check for slow operations
slowest = summary.get('slowest_operations', [])
if slowest and slowest[0]['duration'] > 2.0:
recommendations.append(f"Slowest operation ({slowest[0]['operation']}) is very slow (>{slowest[0]['duration']:.2f}s).")
if not recommendations:
recommendations.append("Performance appears to be within acceptable ranges.")
return recommendations
def _save_benchmark_report(self, report: Dict[str, Any]):
"""Save benchmark report to file and cache."""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"benchmark_report_{timestamp}.json"
try:
# Save to logs directory
import os
logs_dir = "logs"
os.makedirs(logs_dir, exist_ok=True)
filepath = os.path.join(logs_dir, filename)
with open(filepath, 'w') as f:
json.dump(report, f, indent=2, default=str)
logger.info(f"Benchmark report saved to {filepath}")
# Also cache the report
cache.set(f"benchmark_report_latest", report, 3600) # 1 hour
except Exception as e:
logger.error(f"Error saving benchmark report: {e}")
# Global performance monitor instance
performance_monitor = PerformanceMonitor()
def benchmark_operation(operation_name: str):
"""Decorator to benchmark a function."""
def decorator(func: Callable):
def wrapper(*args, **kwargs):
with performance_monitor.measure_operation(operation_name):
return func(*args, **kwargs)
return wrapper
return decorator
# Convenience function to run benchmarks
def run_performance_benchmark():
"""Run the complete performance benchmark suite."""
suite = BenchmarkSuite()
return suite.run_full_benchmark_suite()

View File

@@ -0,0 +1,363 @@
/* Performance-optimized CSS for park listing page */
/* Critical CSS that should be inlined */
.park-listing {
/* Use GPU acceleration for smooth animations */
transform: translateZ(0);
backface-visibility: hidden;
}
/* Lazy loading image styles */
img[data-src] {
background: linear-gradient(90deg, #f0f0f0 25%, #e0e0e0 50%, #f0f0f0 75%);
background-size: 200% 100%;
animation: shimmer 1.5s infinite;
transition: opacity 0.3s ease;
}
img.loading {
opacity: 0.7;
filter: blur(2px);
}
img.loaded {
opacity: 1;
filter: none;
animation: none;
}
img.error {
background: #f5f5f5;
opacity: 0.5;
}
@keyframes shimmer {
0% {
background-position: -200% 0;
}
100% {
background-position: 200% 0;
}
}
/* Optimized grid layout using CSS Grid */
.park-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
gap: 1.5rem;
/* Use containment for better performance */
contain: layout style;
}
.park-card {
/* Optimize for animations */
will-change: transform, box-shadow;
transition: transform 0.2s ease, box-shadow 0.2s ease;
/* Enable GPU acceleration */
transform: translateZ(0);
/* Optimize paint */
contain: layout style paint;
}
.park-card:hover {
transform: translateY(-4px) translateZ(0);
box-shadow: 0 8px 25px rgba(0, 0, 0, 0.15);
}
/* Efficient loading states */
.loading {
position: relative;
overflow: hidden;
}
.loading::after {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: linear-gradient(
90deg,
transparent,
rgba(255, 255, 255, 0.4),
transparent
);
animation: loading-sweep 1.5s infinite;
pointer-events: none;
}
@keyframes loading-sweep {
0% {
transform: translateX(-100%);
}
100% {
transform: translateX(100%);
}
}
/* Optimized autocomplete dropdown */
.autocomplete-suggestions {
position: absolute;
top: 100%;
left: 0;
right: 0;
background: white;
border: 1px solid #ddd;
border-top: none;
border-radius: 0 0 4px 4px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
z-index: 1000;
max-height: 300px;
overflow-y: auto;
/* Hide by default */
opacity: 0;
visibility: hidden;
transform: translateY(-10px);
transition: all 0.2s ease;
/* Optimize scrolling */
-webkit-overflow-scrolling: touch;
contain: layout style;
}
.autocomplete-suggestions.visible {
opacity: 1;
visibility: visible;
transform: translateY(0);
}
.suggestion-item {
display: flex;
align-items: center;
padding: 0.75rem 1rem;
cursor: pointer;
border-bottom: 1px solid #f0f0f0;
transition: background-color 0.15s ease;
}
.suggestion-item:hover,
.suggestion-item.active {
background-color: #f8f9fa;
}
.suggestion-icon {
margin-right: 0.5rem;
font-size: 0.875rem;
}
.suggestion-name {
font-weight: 500;
flex-grow: 1;
}
.suggestion-details {
font-size: 0.875rem;
color: #666;
}
/* Optimized filter panel */
.filter-panel {
/* Use flexbox for efficient layout */
display: flex;
flex-wrap: wrap;
gap: 1rem;
padding: 1rem;
background: #f8f9fa;
border-radius: 8px;
/* Optimize for frequent updates */
contain: layout style;
}
.filter-group {
display: flex;
flex-direction: column;
min-width: 150px;
}
.filter-input {
padding: 0.5rem;
border: 1px solid #ddd;
border-radius: 4px;
transition: border-color 0.15s ease;
}
.filter-input:focus {
outline: none;
border-color: #007bff;
box-shadow: 0 0 0 2px rgba(0, 123, 255, 0.25);
}
/* Performance-optimized pagination */
.pagination {
display: flex;
justify-content: center;
align-items: center;
gap: 0.5rem;
margin: 2rem 0;
/* Optimize for position changes */
contain: layout;
}
.pagination-btn {
padding: 0.5rem 1rem;
border: 1px solid #ddd;
background: white;
color: #333;
text-decoration: none;
border-radius: 4px;
transition: all 0.15s ease;
/* Optimize for hover effects */
will-change: background-color, border-color;
}
.pagination-btn:hover:not(.disabled) {
background: #f8f9fa;
border-color: #bbb;
}
.pagination-btn.active {
background: #007bff;
color: white;
border-color: #007bff;
}
.pagination-btn.disabled {
opacity: 0.5;
cursor: not-allowed;
}
/* Responsive optimizations */
@media (max-width: 768px) {
.park-grid {
grid-template-columns: 1fr;
gap: 1rem;
}
.filter-panel {
flex-direction: column;
}
.suggestion-item {
padding: 1rem;
}
}
/* High DPI optimizations */
@media (-webkit-min-device-pixel-ratio: 2), (min-resolution: 192dpi) {
.park-card img {
/* Use higher quality images on retina displays */
image-rendering: -webkit-optimize-contrast;
}
}
/* Reduce motion for accessibility */
@media (prefers-reduced-motion: reduce) {
*,
*::before,
*::after {
animation-duration: 0.01ms !important;
animation-iteration-count: 1 !important;
transition-duration: 0.01ms !important;
scroll-behavior: auto !important;
}
}
/* Performance debugging styles (only in development) */
.debug-metrics {
position: fixed;
top: 10px;
right: 10px;
background: rgba(0, 0, 0, 0.8);
color: white;
padding: 0.5rem;
border-radius: 4px;
font-size: 0.75rem;
font-family: monospace;
z-index: 9999;
display: none;
}
body.debug .debug-metrics {
display: block;
}
.debug-metrics span {
display: block;
margin-bottom: 0.25rem;
}
/* Print optimizations */
@media print {
.autocomplete-suggestions,
.filter-panel,
.pagination,
.debug-metrics {
display: none;
}
.park-grid {
grid-template-columns: repeat(2, 1fr);
gap: 1rem;
}
.park-card {
break-inside: avoid;
page-break-inside: avoid;
}
}
/* Container queries for better responsive design */
@container (max-width: 400px) {
.park-card {
padding: 1rem;
}
.park-card img {
height: 150px;
}
}
/* Focus management for better accessibility */
.skip-link {
position: absolute;
top: -40px;
left: 6px;
background: #000;
color: white;
padding: 8px;
text-decoration: none;
border-radius: 4px;
z-index: 10000;
}
.skip-link:focus {
top: 6px;
}
/* Efficient animations using transform and opacity only */
.fade-in {
animation: fadeIn 0.3s ease-in-out;
}
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
/* Optimize for critical rendering path */
.above-fold {
/* Ensure critical content renders first */
contain: layout style paint;
}
.below-fold {
/* Defer non-critical content */
content-visibility: auto;
contain-intrinsic-size: 500px;
}

View File

@@ -0,0 +1,518 @@
/**
* Performance-optimized JavaScript for park listing page
* Implements lazy loading, debouncing, and efficient DOM manipulation
*/
class ParkListingPerformance {
constructor() {
this.searchTimeout = null;
this.lastScrollPosition = 0;
this.observerOptions = {
root: null,
rootMargin: '50px',
threshold: 0.1
};
this.init();
}
init() {
this.setupLazyLoading();
this.setupDebouncedSearch();
this.setupOptimizedFiltering();
this.setupProgressiveImageLoading();
this.setupPerformanceMonitoring();
}
/**
* Setup lazy loading for park images using Intersection Observer
*/
setupLazyLoading() {
if ('IntersectionObserver' in window) {
this.imageObserver = new IntersectionObserver((entries) => {
entries.forEach(entry => {
if (entry.isIntersecting) {
this.loadImage(entry.target);
this.imageObserver.unobserve(entry.target);
}
});
}, this.observerOptions);
// Observe all lazy images
document.querySelectorAll('img[data-src]').forEach(img => {
this.imageObserver.observe(img);
});
} else {
// Fallback for browsers without Intersection Observer
this.loadAllImages();
}
}
/**
* Load individual image with error handling and placeholder
*/
loadImage(img) {
const src = img.dataset.src;
const placeholder = img.dataset.placeholder;
// Start with low quality placeholder
if (placeholder && !img.src) {
img.src = placeholder;
img.classList.add('loading');
}
// Load high quality image
const highQualityImg = new Image();
highQualityImg.onload = () => {
img.src = highQualityImg.src;
img.classList.remove('loading');
img.classList.add('loaded');
};
highQualityImg.onerror = () => {
img.src = '/static/images/placeholders/park-placeholder.jpg';
img.classList.add('error');
};
highQualityImg.src = src;
}
/**
* Load all images (fallback for older browsers)
*/
loadAllImages() {
document.querySelectorAll('img[data-src]').forEach(img => {
this.loadImage(img);
});
}
/**
* Setup debounced search to reduce API calls
*/
setupDebouncedSearch() {
const searchInput = document.querySelector('[data-autocomplete]');
if (!searchInput) return;
searchInput.addEventListener('input', (e) => {
clearTimeout(this.searchTimeout);
const query = e.target.value.trim();
if (query.length < 2) {
this.hideSuggestions();
return;
}
// Debounce search requests
this.searchTimeout = setTimeout(() => {
this.performSearch(query);
}, 300);
});
// Handle keyboard navigation
searchInput.addEventListener('keydown', (e) => {
this.handleSearchKeyboard(e);
});
}
/**
* Perform optimized search with caching
*/
async performSearch(query) {
const cacheKey = `search_${query.toLowerCase()}`;
// Check session storage for cached results
const cached = sessionStorage.getItem(cacheKey);
if (cached) {
const results = JSON.parse(cached);
this.displaySuggestions(results);
return;
}
try {
const response = await fetch(`/api/parks/autocomplete/?q=${encodeURIComponent(query)}`, {
headers: {
'X-Requested-With': 'XMLHttpRequest'
}
});
if (response.ok) {
const data = await response.json();
// Cache results for session
sessionStorage.setItem(cacheKey, JSON.stringify(data));
this.displaySuggestions(data);
}
} catch (error) {
console.error('Search error:', error);
this.hideSuggestions();
}
}
/**
* Display search suggestions with efficient DOM manipulation
*/
displaySuggestions(data) {
const container = document.querySelector('[data-suggestions]');
if (!container) return;
// Use document fragment for efficient DOM updates
const fragment = document.createDocumentFragment();
if (data.suggestions && data.suggestions.length > 0) {
data.suggestions.forEach(suggestion => {
const item = this.createSuggestionItem(suggestion);
fragment.appendChild(item);
});
} else {
const noResults = document.createElement('div');
noResults.className = 'no-results';
noResults.textContent = 'No suggestions found';
fragment.appendChild(noResults);
}
// Replace content efficiently
container.innerHTML = '';
container.appendChild(fragment);
container.classList.add('visible');
}
/**
* Create suggestion item element
*/
createSuggestionItem(suggestion) {
const item = document.createElement('div');
item.className = `suggestion-item suggestion-${suggestion.type}`;
const icon = this.getSuggestionIcon(suggestion.type);
const details = suggestion.operator ? `${suggestion.operator}` :
suggestion.park_count ? `${suggestion.park_count} parks` : '';
item.innerHTML = `
<span class="suggestion-icon">${icon}</span>
<span class="suggestion-name">${this.escapeHtml(suggestion.name)}</span>
<span class="suggestion-details">${details}</span>
`;
item.addEventListener('click', () => {
this.selectSuggestion(suggestion);
});
return item;
}
/**
* Get icon for suggestion type
*/
getSuggestionIcon(type) {
const icons = {
park: '🏰',
operator: '🏢',
location: '📍'
};
return icons[type] || '🔍';
}
/**
* Handle suggestion selection
*/
selectSuggestion(suggestion) {
const searchInput = document.querySelector('[data-autocomplete]');
if (searchInput) {
searchInput.value = suggestion.name;
// Trigger search or navigation
if (suggestion.url) {
window.location.href = suggestion.url;
} else {
// Trigger filter update
this.updateFilters({ search: suggestion.name });
}
}
this.hideSuggestions();
}
/**
* Hide suggestions dropdown
*/
hideSuggestions() {
const container = document.querySelector('[data-suggestions]');
if (container) {
container.classList.remove('visible');
}
}
/**
* Setup optimized filtering with minimal reflows
*/
setupOptimizedFiltering() {
const filterForm = document.querySelector('[data-filter-form]');
if (!filterForm) return;
// Debounce filter changes
filterForm.addEventListener('change', (e) => {
clearTimeout(this.filterTimeout);
this.filterTimeout = setTimeout(() => {
this.updateFilters();
}, 150);
});
}
/**
* Update filters using HTMX with loading states
*/
updateFilters(extraParams = {}) {
const form = document.querySelector('[data-filter-form]');
const resultsContainer = document.querySelector('[data-results]');
if (!form || !resultsContainer) return;
// Show loading state
resultsContainer.classList.add('loading');
const formData = new FormData(form);
// Add extra parameters
Object.entries(extraParams).forEach(([key, value]) => {
formData.set(key, value);
});
// Use HTMX for efficient partial updates
if (window.htmx) {
htmx.ajax('GET', form.action + '?' + new URLSearchParams(formData), {
target: '[data-results]',
swap: 'innerHTML'
}).then(() => {
resultsContainer.classList.remove('loading');
this.setupLazyLoading(); // Re-initialize for new content
this.updatePerformanceMetrics();
});
}
}
/**
* Setup progressive image loading with CloudFlare optimization
*/
setupProgressiveImageLoading() {
// Use CloudFlare's automatic image optimization
document.querySelectorAll('img[data-cf-image]').forEach(img => {
const imageId = img.dataset.cfImage;
const width = img.dataset.width || 400;
// Start with low quality
img.src = this.getCloudFlareImageUrl(imageId, width, 'low');
// Load high quality when in viewport
if (this.imageObserver) {
this.imageObserver.observe(img);
}
});
}
/**
* Get optimized CloudFlare image URL
*/
getCloudFlareImageUrl(imageId, width, quality = 'high') {
const baseUrl = window.CLOUDFLARE_IMAGES_BASE_URL || '/images';
const qualityMap = {
low: 20,
medium: 60,
high: 85
};
return `${baseUrl}/${imageId}/w=${width},quality=${qualityMap[quality]}`;
}
/**
* Setup performance monitoring
*/
setupPerformanceMonitoring() {
// Track page load performance
if ('performance' in window) {
window.addEventListener('load', () => {
setTimeout(() => {
this.reportPerformanceMetrics();
}, 100);
});
}
// Track user interactions
this.setupInteractionTracking();
}
/**
* Report performance metrics
*/
reportPerformanceMetrics() {
if (!('performance' in window)) return;
const navigation = performance.getEntriesByType('navigation')[0];
const paint = performance.getEntriesByType('paint');
const metrics = {
loadTime: navigation.loadEventEnd - navigation.loadEventStart,
domContentLoaded: navigation.domContentLoadedEventEnd - navigation.domContentLoadedEventStart,
firstPaint: paint.find(p => p.name === 'first-paint')?.startTime || 0,
firstContentfulPaint: paint.find(p => p.name === 'first-contentful-paint')?.startTime || 0,
timestamp: Date.now(),
page: 'park-listing'
};
// Send metrics to analytics (if configured)
this.sendAnalytics('performance', metrics);
}
/**
* Setup interaction tracking for performance insights
*/
setupInteractionTracking() {
const startTime = performance.now();
['click', 'input', 'scroll'].forEach(eventType => {
document.addEventListener(eventType, (e) => {
this.trackInteraction(eventType, e.target, performance.now() - startTime);
}, { passive: true });
});
}
/**
* Track user interactions
*/
trackInteraction(type, target, time) {
// Throttle interaction tracking
if (!this.lastInteractionTime || time - this.lastInteractionTime > 100) {
this.lastInteractionTime = time;
const interaction = {
type,
element: target.tagName.toLowerCase(),
class: target.className,
time: Math.round(time),
page: 'park-listing'
};
this.sendAnalytics('interaction', interaction);
}
}
/**
* Send analytics data
*/
sendAnalytics(event, data) {
// Only send in production and if analytics is configured
if (window.ENABLE_ANALYTICS && navigator.sendBeacon) {
const payload = JSON.stringify({
event,
data,
timestamp: Date.now(),
url: window.location.pathname
});
navigator.sendBeacon('/api/analytics/', payload);
}
}
/**
* Update performance metrics display
*/
updatePerformanceMetrics() {
const metricsDisplay = document.querySelector('[data-performance-metrics]');
if (!metricsDisplay || !window.SHOW_DEBUG) return;
const imageCount = document.querySelectorAll('img').length;
const loadedImages = document.querySelectorAll('img.loaded').length;
const cacheHits = Object.keys(sessionStorage).filter(k => k.startsWith('search_')).length;
metricsDisplay.innerHTML = `
<div class="debug-metrics">
<span>Images: ${loadedImages}/${imageCount}</span>
<span>Cache hits: ${cacheHits}</span>
<span>Memory: ${this.getMemoryUsage()}MB</span>
</div>
`;
}
/**
* Get approximate memory usage
*/
getMemoryUsage() {
if ('memory' in performance) {
return Math.round(performance.memory.usedJSHeapSize / 1024 / 1024);
}
return 'N/A';
}
/**
* Handle keyboard navigation in search
*/
handleSearchKeyboard(e) {
const suggestions = document.querySelectorAll('.suggestion-item');
const active = document.querySelector('.suggestion-item.active');
switch (e.key) {
case 'ArrowDown':
e.preventDefault();
this.navigateSuggestions(suggestions, active, 1);
break;
case 'ArrowUp':
e.preventDefault();
this.navigateSuggestions(suggestions, active, -1);
break;
case 'Enter':
e.preventDefault();
if (active) {
active.click();
}
break;
case 'Escape':
this.hideSuggestions();
break;
}
}
/**
* Navigate through suggestions with keyboard
*/
navigateSuggestions(suggestions, active, direction) {
if (active) {
active.classList.remove('active');
}
let index = active ? Array.from(suggestions).indexOf(active) : -1;
index += direction;
if (index < 0) index = suggestions.length - 1;
if (index >= suggestions.length) index = 0;
if (suggestions[index]) {
suggestions[index].classList.add('active');
suggestions[index].scrollIntoView({ block: 'nearest' });
}
}
/**
* Utility function to escape HTML
*/
escapeHtml(text) {
const div = document.createElement('div');
div.textContent = text;
return div.innerHTML;
}
}
// Initialize performance optimizations when DOM is ready
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', () => {
new ParkListingPerformance();
});
} else {
new ParkListingPerformance();
}
// Export for testing
if (typeof module !== 'undefined' && module.exports) {
module.exports = ParkListingPerformance;
}

View File

@@ -229,10 +229,16 @@ class ParkListView(HTMXFilterableMixin, ListView):
context_object_name = "parks"
filter_class = ParkFilter
paginate_by = 20
# Use optimized pagination
paginator_class = None # Will be set dynamically
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.filter_service = ParkFilterService()
# Import here to avoid circular imports
from .services.pagination_service import OptimizedPaginator
self.paginator_class = OptimizedPaginator
def get_template_names(self) -> list[str]:
"""Return park_list.html for HTMX requests"""
@@ -246,54 +252,35 @@ class ParkListView(HTMXFilterableMixin, ListView):
def get_queryset(self) -> QuerySet[Park]:
"""Get optimized queryset with enhanced filtering and proper relations"""
from apps.core.utils.query_optimization import monitor_db_performance
try:
# Start with optimized base queryset
queryset = (
get_base_park_queryset()
.select_related(
'operator',
'property_owner',
'location',
'banner_image',
'card_image'
)
.prefetch_related(
'photos',
'rides__manufacturer',
'areas'
)
)
# Use filter service for enhanced filtering
filter_params = self._get_clean_filter_params()
# Apply ordering
ordering = self.request.GET.get('ordering', 'name')
if ordering:
# Validate ordering to prevent SQL injection
valid_orderings = [
'name', '-name',
'average_rating', '-average_rating',
'coaster_count', '-coaster_count',
'ride_count', '-ride_count',
'opening_date', '-opening_date'
]
if ordering in valid_orderings:
queryset = queryset.order_by(ordering)
else:
queryset = queryset.order_by('name') # Default fallback
# Apply other filters through service
filtered_queryset = self.filter_service.get_filtered_queryset(filter_params)
# Combine with optimized queryset maintaining the optimizations
final_queryset = queryset.filter(
pk__in=filtered_queryset.values_list('pk', flat=True)
)
# Create filterset for form rendering
self.filterset = self.filter_class(self.request.GET, queryset=final_queryset)
return self.filterset.qs
with monitor_db_performance("park_list_queryset"):
# Get clean filter parameters
filter_params = self._get_clean_filter_params()
# Use filter service to build optimized queryset directly
# This eliminates the expensive pk__in subquery anti-pattern
queryset = self.filter_service.get_optimized_filtered_queryset(filter_params)
# Apply ordering with validation
ordering = self.request.GET.get('ordering', 'name')
if ordering:
valid_orderings = [
'name', '-name',
'average_rating', '-average_rating',
'coaster_count', '-coaster_count',
'ride_count', '-ride_count',
'opening_date', '-opening_date'
]
if ordering in valid_orderings:
queryset = queryset.order_by(ordering)
else:
queryset = queryset.order_by('name') # Default fallback
# Create filterset for form rendering
self.filterset = self.filter_class(self.request.GET, queryset=queryset)
return self.filterset.qs
except Exception as e:
messages.error(self.request, f"Error loading parks: {str(e)}")