feat: Refactor rides app with unique constraints, mixins, and enhanced documentation

- Added migration to convert unique_together constraints to UniqueConstraint for RideModel.
- Introduced RideFormMixin for handling entity suggestions in ride forms.
- Created comprehensive code standards documentation outlining formatting, docstring requirements, complexity guidelines, and testing requirements.
- Established error handling guidelines with a structured exception hierarchy and best practices for API and view error handling.
- Documented view pattern guidelines, emphasizing the use of CBVs, FBVs, and ViewSets with examples.
- Implemented a benchmarking script for query performance analysis and optimization.
- Developed security documentation detailing measures, configurations, and a security checklist.
- Compiled a database optimization guide covering indexing strategies, query optimization patterns, and computed fields.
This commit is contained in:
pacnpal
2025-12-22 11:17:31 -05:00
parent 45d97b6e68
commit 2e35f8c5d9
71 changed files with 8036 additions and 1462 deletions

View File

@@ -0,0 +1,204 @@
#!/usr/bin/env python
"""
Query Performance Benchmarking Script
This script measures query performance for key operations in the ThrillWiki
application to help identify optimization opportunities and verify improvements.
Usage:
python manage.py shell < scripts/benchmark_queries.py
Or in Django shell:
exec(open('scripts/benchmark_queries.py').read())
"""
import time
import statistics
from functools import wraps
from typing import Callable, Any, List, Dict
from django.db import connection, reset_queries
from django.test.utils import CaptureQueriesContext
from django.conf import settings
# Ensure debug mode for query logging
if not settings.DEBUG:
print("Warning: DEBUG mode is not enabled. Query counts may not be accurate.")
def benchmark(name: str, iterations: int = 5):
"""Decorator to benchmark a function."""
def decorator(func: Callable) -> Callable:
@wraps(func)
def wrapper(*args, **kwargs) -> Dict[str, Any]:
times = []
query_counts = []
for _ in range(iterations):
reset_queries()
with CaptureQueriesContext(connection) as context:
start = time.perf_counter()
result = func(*args, **kwargs)
end = time.perf_counter()
times.append((end - start) * 1000) # Convert to ms
query_counts.append(len(context.captured_queries))
return {
'name': name,
'avg_time_ms': statistics.mean(times),
'min_time_ms': min(times),
'max_time_ms': max(times),
'std_dev_ms': statistics.stdev(times) if len(times) > 1 else 0,
'avg_queries': statistics.mean(query_counts),
'min_queries': min(query_counts),
'max_queries': max(query_counts),
'iterations': iterations,
}
return wrapper
return decorator
def print_benchmark_result(result: Dict[str, Any]) -> None:
"""Pretty print benchmark results."""
print(f"\n{'='*60}")
print(f"Benchmark: {result['name']}")
print(f"{'='*60}")
print(f" Time (ms): avg={result['avg_time_ms']:.2f}, min={result['min_time_ms']:.2f}, max={result['max_time_ms']:.2f}")
print(f" Std Dev (ms): {result['std_dev_ms']:.2f}")
print(f" Queries: avg={result['avg_queries']:.1f}, min={result['min_queries']}, max={result['max_queries']}")
print(f" Iterations: {result['iterations']}")
def run_benchmarks() -> List[Dict[str, Any]]:
"""Run all benchmarks and return results."""
from apps.parks.models import Park, Company
from apps.rides.models import Ride
results = []
# Park List Optimized
@benchmark("Park.objects.optimized_for_list()")
def bench_park_list_optimized():
parks = Park.objects.optimized_for_list()[:50]
for park in parks:
_ = park.operator
_ = park.coaster_count_calculated if hasattr(park, 'coaster_count_calculated') else None
return list(parks)
results.append(bench_park_list_optimized())
# Park List Non-Optimized (for comparison)
@benchmark("Park.objects.all() (non-optimized)")
def bench_park_list_non_optimized():
parks = Park.objects.all()[:50]
for park in parks:
_ = park.operator # This will cause N+1 queries
return list(parks)
results.append(bench_park_list_non_optimized())
# Park Detail Optimized
@benchmark("Park.objects.optimized_for_detail()")
def bench_park_detail_optimized():
park = Park.objects.optimized_for_detail().first()
if park:
_ = park.operator
_ = list(park.areas.all())
_ = list(park.rides.all())
return park
results.append(bench_park_detail_optimized())
# Ride List Optimized
@benchmark("Ride.objects.optimized_for_list()")
def bench_ride_list_optimized():
rides = Ride.objects.optimized_for_list()[:50]
for ride in rides:
_ = ride.park
_ = ride.manufacturer
return list(rides)
results.append(bench_ride_list_optimized())
# Ride Detail with Coaster Stats
@benchmark("Ride.objects.with_coaster_stats()")
def bench_ride_with_coaster_stats():
rides = Ride.objects.with_coaster_stats()[:20]
for ride in rides:
_ = ride.park
try:
_ = ride.coaster_stats
except Exception:
pass
return list(rides)
results.append(bench_ride_with_coaster_stats())
# Company Manufacturers with Ride Count
@benchmark("Company.objects.manufacturers_with_ride_count()")
def bench_manufacturers_with_count():
companies = Company.objects.manufacturers_with_ride_count()[:20]
for company in companies:
_ = company.ride_count
return list(companies)
results.append(bench_manufacturers_with_count())
# Park Search Autocomplete
@benchmark("Park search_autocomplete()")
def bench_park_autocomplete():
result = Park.objects.get_queryset().search_autocomplete(query="park", limit=10)
return list(result)
results.append(bench_park_autocomplete())
# Park Map Display
@benchmark("Park.objects.for_map_display()")
def bench_park_map_display():
result = Park.objects.for_map_display()
return list(result)
results.append(bench_park_map_display())
return results
def print_summary(results: List[Dict[str, Any]]) -> None:
"""Print a summary table of all benchmarks."""
print("\n" + "="*80)
print("BENCHMARK SUMMARY")
print("="*80)
print(f"{'Benchmark':<45} {'Avg Time (ms)':<15} {'Avg Queries':<15}")
print("-"*80)
for result in results:
print(f"{result['name']:<45} {result['avg_time_ms']:<15.2f} {result['avg_queries']:<15.1f}")
print("="*80)
if __name__ == "__main__" or True: # Always run when executed
print("\n" + "="*80)
print("THRILLWIKI QUERY PERFORMANCE BENCHMARKS")
print("="*80)
print("\nRunning benchmarks...")
try:
results = run_benchmarks()
# Print individual results
for result in results:
print_benchmark_result(result)
# Print summary
print_summary(results)
print("\nBenchmarks complete!")
except Exception as e:
print(f"\nError running benchmarks: {e}")
import traceback
traceback.print_exc()