feat: Implement MFA authentication, add ride statistics model, and update various services, APIs, and tests across the application.

This commit is contained in:
pacnpal
2025-12-28 17:32:53 -05:00
parent aa56c46c27
commit c95f99ca10
452 changed files with 7948 additions and 6073 deletions

View File

@@ -12,15 +12,16 @@ Or in Django shell:
exec(open('scripts/benchmark_queries.py').read())
"""
import time
import contextlib
import statistics
import time
from collections.abc import Callable
from functools import wraps
from typing import Callable, Any, List, Dict
from typing import Any
from django.conf import settings
from django.db import connection, reset_queries
from django.test.utils import CaptureQueriesContext
from django.conf import settings
# Ensure debug mode for query logging
if not settings.DEBUG:
@@ -31,7 +32,7 @@ def benchmark(name: str, iterations: int = 5):
"""Decorator to benchmark a function."""
def decorator(func: Callable) -> Callable:
@wraps(func)
def wrapper(*args, **kwargs) -> Dict[str, Any]:
def wrapper(*args, **kwargs) -> dict[str, Any]:
times = []
query_counts = []
@@ -40,7 +41,7 @@ def benchmark(name: str, iterations: int = 5):
with CaptureQueriesContext(connection) as context:
start = time.perf_counter()
result = func(*args, **kwargs)
func(*args, **kwargs)
end = time.perf_counter()
times.append((end - start) * 1000) # Convert to ms
@@ -61,7 +62,7 @@ def benchmark(name: str, iterations: int = 5):
return decorator
def print_benchmark_result(result: Dict[str, Any]) -> None:
def print_benchmark_result(result: dict[str, Any]) -> None:
"""Pretty print benchmark results."""
print(f"\n{'='*60}")
print(f"Benchmark: {result['name']}")
@@ -72,9 +73,9 @@ def print_benchmark_result(result: Dict[str, Any]) -> None:
print(f" Iterations: {result['iterations']}")
def run_benchmarks() -> List[Dict[str, Any]]:
def run_benchmarks() -> list[dict[str, Any]]:
"""Run all benchmarks and return results."""
from apps.parks.models import Park, Company
from apps.parks.models import Company, Park
from apps.rides.models import Ride
results = []
@@ -129,10 +130,8 @@ def run_benchmarks() -> List[Dict[str, Any]]:
rides = Ride.objects.with_coaster_stats()[:20]
for ride in rides:
_ = ride.park
try:
with contextlib.suppress(Exception):
_ = ride.coaster_stats
except Exception:
pass
return list(rides)
results.append(bench_ride_with_coaster_stats())
@@ -166,7 +165,7 @@ def run_benchmarks() -> List[Dict[str, Any]]:
return results
def print_summary(results: List[Dict[str, Any]]) -> None:
def print_summary(results: list[dict[str, Any]]) -> None:
"""Print a summary table of all benchmarks."""
print("\n" + "="*80)
print("BENCHMARK SUMMARY")
@@ -180,7 +179,7 @@ def print_summary(results: List[Dict[str, Any]]) -> None:
print("="*80)
if __name__ == "__main__" or True: # Always run when executed
if True: # Always run when executed
print("\n" + "="*80)
print("THRILLWIKI QUERY PERFORMANCE BENCHMARKS")
print("="*80)