Add comprehensive tests for Parks API and models

- Implemented extensive test cases for the Parks API, covering endpoints for listing, retrieving, creating, updating, and deleting parks.
- Added tests for filtering, searching, and ordering parks in the API.
- Created tests for error handling in the API, including malformed JSON and unsupported methods.
- Developed model tests for Park, ParkArea, Company, and ParkReview models, ensuring validation and constraints are enforced.
- Introduced utility mixins for API and model testing to streamline assertions and enhance test readability.
- Included integration tests to validate complete workflows involving park creation, retrieval, updating, and deletion.
This commit is contained in:
pacnpal
2025-08-17 19:36:20 -04:00
parent 17228e9935
commit c26414ff74
210 changed files with 24155 additions and 833 deletions

1
core/api/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Core API infrastructure for ThrillWiki

172
core/api/exceptions.py Normal file
View File

@@ -0,0 +1,172 @@
"""
Custom exception handling for ThrillWiki API.
Provides standardized error responses following Django styleguide patterns.
"""
import logging
from typing import Any, Dict, Optional
from django.http import Http404
from django.core.exceptions import PermissionDenied, ValidationError as DjangoValidationError
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import exception_handler
from rest_framework.exceptions import ValidationError as DRFValidationError, NotFound, PermissionDenied as DRFPermissionDenied
from ..exceptions import ThrillWikiException
from ..logging import get_logger, log_exception
logger = get_logger(__name__)
def custom_exception_handler(exc: Exception, context: Dict[str, Any]) -> Optional[Response]:
"""
Custom exception handler for DRF that provides standardized error responses.
Returns:
Response with standardized error format or None to fallback to default handler
"""
# Call REST framework's default exception handler first
response = exception_handler(exc, context)
if response is not None:
# Standardize the error response format
custom_response_data = {
'status': 'error',
'error': {
'code': _get_error_code(exc),
'message': _get_error_message(exc, response.data),
'details': _get_error_details(exc, response.data),
},
'data': None,
}
# Add request context for debugging
if hasattr(context.get('request'), 'user'):
custom_response_data['error']['request_user'] = str(context['request'].user)
# Log the error for monitoring
log_exception(logger, exc, context={'response_status': response.status_code}, request=context.get('request'))
response.data = custom_response_data
# Handle ThrillWiki custom exceptions
elif isinstance(exc, ThrillWikiException):
custom_response_data = {
'status': 'error',
'error': exc.to_dict(),
'data': None,
}
log_exception(logger, exc, context={'response_status': exc.status_code}, request=context.get('request'))
response = Response(custom_response_data, status=exc.status_code)
# Handle specific Django exceptions that DRF doesn't catch
elif isinstance(exc, DjangoValidationError):
custom_response_data = {
'status': 'error',
'error': {
'code': 'VALIDATION_ERROR',
'message': 'Validation failed',
'details': _format_django_validation_errors(exc),
},
'data': None,
}
log_exception(logger, exc, context={'response_status': status.HTTP_400_BAD_REQUEST}, request=context.get('request'))
response = Response(custom_response_data, status=status.HTTP_400_BAD_REQUEST)
elif isinstance(exc, Http404):
custom_response_data = {
'status': 'error',
'error': {
'code': 'NOT_FOUND',
'message': 'Resource not found',
'details': str(exc) if str(exc) else None,
},
'data': None,
}
log_exception(logger, exc, context={'response_status': status.HTTP_404_NOT_FOUND}, request=context.get('request'))
response = Response(custom_response_data, status=status.HTTP_404_NOT_FOUND)
elif isinstance(exc, PermissionDenied):
custom_response_data = {
'status': 'error',
'error': {
'code': 'PERMISSION_DENIED',
'message': 'Permission denied',
'details': str(exc) if str(exc) else None,
},
'data': None,
}
log_exception(logger, exc, context={'response_status': status.HTTP_403_FORBIDDEN}, request=context.get('request'))
response = Response(custom_response_data, status=status.HTTP_403_FORBIDDEN)
return response
def _get_error_code(exc: Exception) -> str:
"""Extract or determine error code from exception."""
if hasattr(exc, 'default_code'):
return exc.default_code.upper()
if isinstance(exc, DRFValidationError):
return 'VALIDATION_ERROR'
elif isinstance(exc, NotFound):
return 'NOT_FOUND'
elif isinstance(exc, DRFPermissionDenied):
return 'PERMISSION_DENIED'
return exc.__class__.__name__.upper()
def _get_error_message(exc: Exception, response_data: Any) -> str:
"""Extract user-friendly error message."""
if isinstance(response_data, dict):
# Handle DRF validation errors
if 'detail' in response_data:
return str(response_data['detail'])
elif 'non_field_errors' in response_data:
errors = response_data['non_field_errors']
return errors[0] if isinstance(errors, list) and errors else str(errors)
elif isinstance(response_data, dict) and len(response_data) == 1:
key, value = next(iter(response_data.items()))
if isinstance(value, list) and value:
return f"{key}: {value[0]}"
return f"{key}: {value}"
# Fallback to exception message
return str(exc) if str(exc) else 'An error occurred'
def _get_error_details(exc: Exception, response_data: Any) -> Optional[Dict[str, Any]]:
"""Extract detailed error information for debugging."""
if isinstance(response_data, dict) and len(response_data) > 1:
return response_data
if hasattr(exc, 'detail') and isinstance(exc.detail, dict):
return exc.detail
return None
def _format_django_validation_errors(exc: DjangoValidationError) -> Dict[str, Any]:
"""Format Django ValidationError for API response."""
if hasattr(exc, 'error_dict'):
# Field-specific errors
return {
field: [str(error) for error in errors]
for field, errors in exc.error_dict.items()
}
elif hasattr(exc, 'error_list'):
# Non-field errors
return {
'non_field_errors': [str(error) for error in exc.error_list]
}
return {'non_field_errors': [str(exc)]}
# Removed _log_api_error - using centralized logging instead

252
core/api/mixins.py Normal file
View File

@@ -0,0 +1,252 @@
"""
Common mixins for API views following Django styleguide patterns.
"""
from typing import Dict, Any, Optional
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework import status
class ApiMixin:
"""
Base mixin for API views providing standardized response formatting.
"""
def create_response(
self,
*,
data: Any = None,
message: Optional[str] = None,
status_code: int = status.HTTP_200_OK,
pagination: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None
) -> Response:
"""
Create standardized API response.
Args:
data: Response data
message: Optional success message
status_code: HTTP status code
pagination: Pagination information
metadata: Additional metadata
Returns:
Standardized Response object
"""
response_data = {
'status': 'success' if status_code < 400 else 'error',
'data': data,
}
if message:
response_data['message'] = message
if pagination:
response_data['pagination'] = pagination
if metadata:
response_data['metadata'] = metadata
return Response(response_data, status=status_code)
def create_error_response(
self,
*,
message: str,
status_code: int = status.HTTP_400_BAD_REQUEST,
error_code: Optional[str] = None,
details: Optional[Dict[str, Any]] = None
) -> Response:
"""
Create standardized error response.
Args:
message: Error message
status_code: HTTP status code
error_code: Optional error code
details: Additional error details
Returns:
Standardized error Response object
"""
error_data = {
'code': error_code or 'GENERIC_ERROR',
'message': message,
}
if details:
error_data['details'] = details
response_data = {
'status': 'error',
'error': error_data,
'data': None,
}
return Response(response_data, status=status_code)
class CreateApiMixin(ApiMixin):
"""
Mixin for create API endpoints with standardized input/output handling.
"""
def create(self, request: Request, *args, **kwargs) -> Response:
"""Handle POST requests for creating resources."""
serializer = self.get_input_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
# Create the object using the service layer
obj = self.perform_create(**serializer.validated_data)
# Serialize the output
output_serializer = self.get_output_serializer(obj)
return self.create_response(
data=output_serializer.data,
status_code=status.HTTP_201_CREATED,
message="Resource created successfully"
)
def perform_create(self, **validated_data):
"""
Override this method to implement object creation logic.
Should use service layer methods.
"""
raise NotImplementedError("Subclasses must implement perform_create")
def get_input_serializer(self, *args, **kwargs):
"""Get the input serializer for validation."""
return self.InputSerializer(*args, **kwargs)
def get_output_serializer(self, *args, **kwargs):
"""Get the output serializer for response."""
return self.OutputSerializer(*args, **kwargs)
class UpdateApiMixin(ApiMixin):
"""
Mixin for update API endpoints with standardized input/output handling.
"""
def update(self, request: Request, *args, **kwargs) -> Response:
"""Handle PUT/PATCH requests for updating resources."""
instance = self.get_object()
serializer = self.get_input_serializer(data=request.data, partial=kwargs.get('partial', False))
serializer.is_valid(raise_exception=True)
# Update the object using the service layer
updated_obj = self.perform_update(instance, **serializer.validated_data)
# Serialize the output
output_serializer = self.get_output_serializer(updated_obj)
return self.create_response(
data=output_serializer.data,
message="Resource updated successfully"
)
def perform_update(self, instance, **validated_data):
"""
Override this method to implement object update logic.
Should use service layer methods.
"""
raise NotImplementedError("Subclasses must implement perform_update")
def get_input_serializer(self, *args, **kwargs):
"""Get the input serializer for validation."""
return self.InputSerializer(*args, **kwargs)
def get_output_serializer(self, *args, **kwargs):
"""Get the output serializer for response."""
return self.OutputSerializer(*args, **kwargs)
class ListApiMixin(ApiMixin):
"""
Mixin for list API endpoints with pagination and filtering.
"""
def list(self, request: Request, *args, **kwargs) -> Response:
"""Handle GET requests for listing resources."""
# Use selector to get filtered queryset
queryset = self.get_queryset()
# Apply pagination
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_output_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
# No pagination
serializer = self.get_output_serializer(queryset, many=True)
return self.create_response(data=serializer.data)
def get_queryset(self):
"""
Override this method to use selector patterns.
Should call selector functions, not access model managers directly.
"""
raise NotImplementedError("Subclasses must implement get_queryset using selectors")
def get_output_serializer(self, *args, **kwargs):
"""Get the output serializer for response."""
return self.OutputSerializer(*args, **kwargs)
class RetrieveApiMixin(ApiMixin):
"""
Mixin for retrieve API endpoints.
"""
def retrieve(self, request: Request, *args, **kwargs) -> Response:
"""Handle GET requests for retrieving a single resource."""
instance = self.get_object()
serializer = self.get_output_serializer(instance)
return self.create_response(data=serializer.data)
def get_object(self):
"""
Override this method to use selector patterns.
Should call selector functions for optimized queries.
"""
raise NotImplementedError("Subclasses must implement get_object using selectors")
def get_output_serializer(self, *args, **kwargs):
"""Get the output serializer for response."""
return self.OutputSerializer(*args, **kwargs)
class DestroyApiMixin(ApiMixin):
"""
Mixin for delete API endpoints.
"""
def destroy(self, request: Request, *args, **kwargs) -> Response:
"""Handle DELETE requests for destroying resources."""
instance = self.get_object()
# Delete using service layer
self.perform_destroy(instance)
return self.create_response(
status_code=status.HTTP_204_NO_CONTENT,
message="Resource deleted successfully"
)
def perform_destroy(self, instance):
"""
Override this method to implement object deletion logic.
Should use service layer methods.
"""
raise NotImplementedError("Subclasses must implement perform_destroy")
def get_object(self):
"""
Override this method to use selector patterns.
Should call selector functions for optimized queries.
"""
raise NotImplementedError("Subclasses must implement get_object using selectors")

View File

@@ -0,0 +1 @@
# Decorators module

View File

@@ -0,0 +1,343 @@
"""
Advanced caching decorators for API views and functions.
"""
import hashlib
import json
import time
from functools import wraps
from typing import Optional, List, Callable, Any
from django.core.cache import cache
from django.http import JsonResponse
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_control, never_cache
from django.views.decorators.vary import vary_on_headers
from rest_framework.response import Response
from core.services.enhanced_cache_service import EnhancedCacheService
import logging
logger = logging.getLogger(__name__)
def cache_api_response(timeout=1800, vary_on=None, key_prefix='api', cache_backend='api'):
"""
Advanced decorator for caching API responses with flexible configuration
Args:
timeout: Cache timeout in seconds
vary_on: List of request attributes to vary cache on
key_prefix: Prefix for cache keys
cache_backend: Cache backend to use
"""
def decorator(view_func):
@wraps(view_func)
def wrapper(self, request, *args, **kwargs):
# Only cache GET requests
if request.method != 'GET':
return view_func(self, request, *args, **kwargs)
# Generate cache key based on view, user, and parameters
cache_key_parts = [
key_prefix,
view_func.__name__,
str(request.user.id) if request.user.is_authenticated else 'anonymous',
str(hash(frozenset(request.GET.items()))),
]
# Add URL parameters to cache key
if args:
cache_key_parts.append(str(hash(args)))
if kwargs:
cache_key_parts.append(str(hash(frozenset(kwargs.items()))))
# Add custom vary_on fields
if vary_on:
for field in vary_on:
value = getattr(request, field, '')
cache_key_parts.append(str(value))
cache_key = ':'.join(cache_key_parts)
# Try to get from cache
cache_service = EnhancedCacheService()
cached_response = getattr(cache_service, cache_backend + '_cache').get(cache_key)
if cached_response:
logger.debug(f"Cache hit for API view {view_func.__name__}", extra={
'cache_key': cache_key,
'view': view_func.__name__,
'cache_hit': True
})
return cached_response
# Execute view and cache result
start_time = time.time()
response = view_func(self, request, *args, **kwargs)
execution_time = time.time() - start_time
# Only cache successful responses
if hasattr(response, 'status_code') and response.status_code == 200:
getattr(cache_service, cache_backend + '_cache').set(cache_key, response, timeout)
logger.debug(f"Cached API response for view {view_func.__name__}", extra={
'cache_key': cache_key,
'view': view_func.__name__,
'execution_time': execution_time,
'cache_timeout': timeout,
'cache_miss': True
})
else:
logger.debug(f"Not caching response for view {view_func.__name__} (status: {getattr(response, 'status_code', 'unknown')})")
return response
return wrapper
return decorator
def cache_queryset_result(cache_key_template: str, timeout: int = 3600, cache_backend='default'):
"""
Decorator for caching expensive queryset operations
Args:
cache_key_template: Template for cache key (can use format placeholders)
timeout: Cache timeout in seconds
cache_backend: Cache backend to use
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Generate cache key from template and arguments
try:
cache_key = cache_key_template.format(*args, **kwargs)
except (KeyError, IndexError):
# Fallback to simpler key generation
cache_key = f"{cache_key_template}:{hash(str(args) + str(kwargs))}"
cache_service = EnhancedCacheService()
cached_result = getattr(cache_service, cache_backend + '_cache').get(cache_key)
if cached_result is not None:
logger.debug(f"Cache hit for queryset operation: {func.__name__}")
return cached_result
# Execute function and cache result
start_time = time.time()
result = func(*args, **kwargs)
execution_time = time.time() - start_time
getattr(cache_service, cache_backend + '_cache').set(cache_key, result, timeout)
logger.debug(f"Cached queryset result for {func.__name__}", extra={
'cache_key': cache_key,
'function': func.__name__,
'execution_time': execution_time,
'cache_timeout': timeout
})
return result
return wrapper
return decorator
def invalidate_cache_on_save(model_name: str, cache_patterns: List[str] = None):
"""
Decorator to invalidate cache when model instances are saved
Args:
model_name: Name of the model
cache_patterns: List of cache key patterns to invalidate
"""
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
# Invalidate related cache entries
cache_service = EnhancedCacheService()
# Standard model cache invalidation
instance_id = getattr(self, 'id', None)
cache_service.invalidate_model_cache(model_name, instance_id)
# Custom pattern invalidation
if cache_patterns:
for pattern in cache_patterns:
if instance_id:
pattern = pattern.format(model=model_name, id=instance_id)
cache_service.invalidate_pattern(pattern)
logger.info(f"Invalidated cache for {model_name} after save", extra={
'model': model_name,
'instance_id': instance_id,
'patterns': cache_patterns
})
return result
return wrapper
return decorator
class CachedAPIViewMixin:
"""Mixin to add caching capabilities to API views"""
cache_timeout = 1800 # 30 minutes default
cache_vary_on = ['version']
cache_key_prefix = 'api'
cache_backend = 'api'
@method_decorator(vary_on_headers('User-Agent', 'Accept-Language'))
def dispatch(self, request, *args, **kwargs):
"""Add caching to the dispatch method"""
if request.method == 'GET' and getattr(self, 'enable_caching', True):
return self._cached_dispatch(request, *args, **kwargs)
return super().dispatch(request, *args, **kwargs)
def _cached_dispatch(self, request, *args, **kwargs):
"""Handle cached dispatch for GET requests"""
cache_key = self._generate_cache_key(request, *args, **kwargs)
cache_service = EnhancedCacheService()
cached_response = getattr(cache_service, self.cache_backend + '_cache').get(cache_key)
if cached_response:
logger.debug(f"Cache hit for view {self.__class__.__name__}")
return cached_response
# Execute view
response = super().dispatch(request, *args, **kwargs)
# Cache successful responses
if hasattr(response, 'status_code') and response.status_code == 200:
getattr(cache_service, self.cache_backend + '_cache').set(
cache_key, response, self.cache_timeout
)
logger.debug(f"Cached response for view {self.__class__.__name__}")
return response
def _generate_cache_key(self, request, *args, **kwargs):
"""Generate cache key for the request"""
key_parts = [
self.cache_key_prefix,
self.__class__.__name__,
request.method,
str(request.user.id) if request.user.is_authenticated else 'anonymous',
str(hash(frozenset(request.GET.items()))),
]
if args:
key_parts.append(str(hash(args)))
if kwargs:
key_parts.append(str(hash(frozenset(kwargs.items()))))
# Add vary_on fields
for field in self.cache_vary_on:
value = getattr(request, field, '')
key_parts.append(str(value))
return ':'.join(key_parts)
def smart_cache(
timeout: int = 3600,
key_func: Optional[Callable] = None,
invalidate_on: Optional[List[str]] = None,
cache_backend: str = 'default'
):
"""
Smart caching decorator that adapts to function arguments
Args:
timeout: Cache timeout in seconds
key_func: Custom function to generate cache key
invalidate_on: List of signals to invalidate cache on
cache_backend: Cache backend to use
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Generate cache key
if key_func:
cache_key = key_func(*args, **kwargs)
else:
# Default key generation
key_data = {
'func': f"{func.__module__}.{func.__name__}",
'args': str(args),
'kwargs': json.dumps(kwargs, sort_keys=True, default=str)
}
key_string = json.dumps(key_data, sort_keys=True)
cache_key = f"smart_cache:{hashlib.md5(key_string.encode()).hexdigest()}"
# Try to get from cache
cache_service = EnhancedCacheService()
cached_result = getattr(cache_service, cache_backend + '_cache').get(cache_key)
if cached_result is not None:
logger.debug(f"Smart cache hit for {func.__name__}")
return cached_result
# Execute function
start_time = time.time()
result = func(*args, **kwargs)
execution_time = time.time() - start_time
# Cache result
getattr(cache_service, cache_backend + '_cache').set(cache_key, result, timeout)
logger.debug(f"Smart cached result for {func.__name__}", extra={
'cache_key': cache_key,
'execution_time': execution_time,
'function': func.__name__
})
return result
# Add cache invalidation if specified
if invalidate_on:
wrapper._cache_invalidate_on = invalidate_on
wrapper._cache_backend = cache_backend
return wrapper
return decorator
def conditional_cache(condition_func: Callable, **cache_kwargs):
"""
Cache decorator that only caches when condition is met
Args:
condition_func: Function that returns True if caching should be applied
**cache_kwargs: Arguments passed to smart_cache
"""
def decorator(func):
cached_func = smart_cache(**cache_kwargs)(func)
@wraps(func)
def wrapper(*args, **kwargs):
if condition_func(*args, **kwargs):
return cached_func(*args, **kwargs)
else:
return func(*args, **kwargs)
return wrapper
return decorator
# Utility functions for cache key generation
def generate_user_cache_key(user, suffix: str = ''):
"""Generate cache key based on user"""
user_id = user.id if user.is_authenticated else 'anonymous'
return f"user:{user_id}:{suffix}" if suffix else f"user:{user_id}"
def generate_model_cache_key(model_instance, suffix: str = ''):
"""Generate cache key based on model instance"""
model_name = model_instance._meta.model_name
instance_id = model_instance.id
return f"{model_name}:{instance_id}:{suffix}" if suffix else f"{model_name}:{instance_id}"
def generate_queryset_cache_key(queryset, params: dict = None):
"""Generate cache key for queryset with parameters"""
model_name = queryset.model._meta.model_name
params_str = json.dumps(params or {}, sort_keys=True, default=str)
params_hash = hashlib.md5(params_str.encode()).hexdigest()
return f"queryset:{model_name}:{params_hash}"

213
core/exceptions.py Normal file
View File

@@ -0,0 +1,213 @@
"""
Custom exception classes for ThrillWiki.
Provides domain-specific exceptions with proper error codes and messages.
"""
from typing import Optional, Dict, Any
class ThrillWikiException(Exception):
"""Base exception for all ThrillWiki-specific errors."""
default_message = "An error occurred"
error_code = "THRILLWIKI_ERROR"
status_code = 500
def __init__(
self,
message: Optional[str] = None,
error_code: Optional[str] = None,
details: Optional[Dict[str, Any]] = None
):
self.message = message or self.default_message
self.error_code = error_code or self.error_code
self.details = details or {}
super().__init__(self.message)
def to_dict(self) -> Dict[str, Any]:
"""Convert exception to dictionary for API responses."""
return {
'error_code': self.error_code,
'message': self.message,
'details': self.details
}
class ValidationException(ThrillWikiException):
"""Raised when data validation fails."""
default_message = "Validation failed"
error_code = "VALIDATION_ERROR"
status_code = 400
class NotFoundError(ThrillWikiException):
"""Raised when a requested resource is not found."""
default_message = "Resource not found"
error_code = "NOT_FOUND"
status_code = 404
class PermissionDeniedError(ThrillWikiException):
"""Raised when user lacks permission for an operation."""
default_message = "Permission denied"
error_code = "PERMISSION_DENIED"
status_code = 403
class BusinessLogicError(ThrillWikiException):
"""Raised when business logic constraints are violated."""
default_message = "Business logic violation"
error_code = "BUSINESS_LOGIC_ERROR"
status_code = 400
class ExternalServiceError(ThrillWikiException):
"""Raised when external service calls fail."""
default_message = "External service error"
error_code = "EXTERNAL_SERVICE_ERROR"
status_code = 502
# Domain-specific exceptions
class ParkError(ThrillWikiException):
"""Base exception for park-related errors."""
error_code = "PARK_ERROR"
class ParkNotFoundError(NotFoundError):
"""Raised when a park is not found."""
default_message = "Park not found"
error_code = "PARK_NOT_FOUND"
def __init__(self, park_slug: Optional[str] = None, **kwargs):
if park_slug:
kwargs['details'] = {'park_slug': park_slug}
kwargs['message'] = f"Park with slug '{park_slug}' not found"
super().__init__(**kwargs)
class ParkOperationError(BusinessLogicError):
"""Raised when park operation constraints are violated."""
default_message = "Invalid park operation"
error_code = "PARK_OPERATION_ERROR"
class RideError(ThrillWikiException):
"""Base exception for ride-related errors."""
error_code = "RIDE_ERROR"
class RideNotFoundError(NotFoundError):
"""Raised when a ride is not found."""
default_message = "Ride not found"
error_code = "RIDE_NOT_FOUND"
def __init__(self, ride_slug: Optional[str] = None, **kwargs):
if ride_slug:
kwargs['details'] = {'ride_slug': ride_slug}
kwargs['message'] = f"Ride with slug '{ride_slug}' not found"
super().__init__(**kwargs)
class RideOperationError(BusinessLogicError):
"""Raised when ride operation constraints are violated."""
default_message = "Invalid ride operation"
error_code = "RIDE_OPERATION_ERROR"
class LocationError(ThrillWikiException):
"""Base exception for location-related errors."""
error_code = "LOCATION_ERROR"
class InvalidCoordinatesError(ValidationException):
"""Raised when geographic coordinates are invalid."""
default_message = "Invalid geographic coordinates"
error_code = "INVALID_COORDINATES"
def __init__(self, latitude: Optional[float] = None, longitude: Optional[float] = None, **kwargs):
if latitude is not None or longitude is not None:
kwargs['details'] = {'latitude': latitude, 'longitude': longitude}
super().__init__(**kwargs)
class GeolocationError(ExternalServiceError):
"""Raised when geolocation services fail."""
default_message = "Geolocation service unavailable"
error_code = "GEOLOCATION_ERROR"
class ReviewError(ThrillWikiException):
"""Base exception for review-related errors."""
error_code = "REVIEW_ERROR"
class ReviewModerationError(BusinessLogicError):
"""Raised when review moderation constraints are violated."""
default_message = "Review moderation error"
error_code = "REVIEW_MODERATION_ERROR"
class DuplicateReviewError(BusinessLogicError):
"""Raised when user tries to create duplicate reviews."""
default_message = "User has already reviewed this item"
error_code = "DUPLICATE_REVIEW"
class AccountError(ThrillWikiException):
"""Base exception for account-related errors."""
error_code = "ACCOUNT_ERROR"
class InsufficientPermissionsError(PermissionDeniedError):
"""Raised when user lacks required permissions."""
default_message = "Insufficient permissions"
error_code = "INSUFFICIENT_PERMISSIONS"
def __init__(self, required_permission: Optional[str] = None, **kwargs):
if required_permission:
kwargs['details'] = {'required_permission': required_permission}
kwargs['message'] = f"Permission '{required_permission}' required"
super().__init__(**kwargs)
class EmailError(ExternalServiceError):
"""Raised when email operations fail."""
default_message = "Email service error"
error_code = "EMAIL_ERROR"
class CacheError(ThrillWikiException):
"""Raised when cache operations fail."""
default_message = "Cache operation failed"
error_code = "CACHE_ERROR"
status_code = 500
class RoadTripError(ExternalServiceError):
"""Raised when road trip planning fails."""
default_message = "Road trip planning error"
error_code = "ROADTRIP_ERROR"
def __init__(self, service_name: Optional[str] = None, **kwargs):
if service_name:
kwargs['details'] = {'service': service_name}
super().__init__(**kwargs)

View File

@@ -0,0 +1 @@
# Health checks module

View File

@@ -0,0 +1,275 @@
"""
Custom health checks for ThrillWiki application.
"""
import time
import logging
from django.core.cache import cache
from django.db import connection
from health_check.backends import BaseHealthCheckBackend
from health_check.exceptions import ServiceUnavailable, ServiceReturnedUnexpectedResult
logger = logging.getLogger(__name__)
class CacheHealthCheck(BaseHealthCheckBackend):
"""Check Redis cache connectivity and performance"""
critical_service = True
def check_status(self):
try:
# Test cache write/read performance
test_key = 'health_check_test'
test_value = 'test_value_' + str(int(time.time()))
start_time = time.time()
cache.set(test_key, test_value, timeout=30)
cached_value = cache.get(test_key)
cache_time = time.time() - start_time
if cached_value != test_value:
self.add_error("Cache read/write test failed - values don't match")
return
# Check cache performance
if cache_time > 0.1: # Warn if cache operations take more than 100ms
self.add_error(f"Cache performance degraded: {cache_time:.3f}s for read/write operation")
return
# Clean up test key
cache.delete(test_key)
# Additional Redis-specific checks if using django-redis
try:
from django_redis import get_redis_connection
redis_client = get_redis_connection("default")
info = redis_client.info()
# Check memory usage
used_memory = info.get('used_memory', 0)
max_memory = info.get('maxmemory', 0)
if max_memory > 0:
memory_usage_percent = (used_memory / max_memory) * 100
if memory_usage_percent > 90:
self.add_error(f"Redis memory usage critical: {memory_usage_percent:.1f}%")
elif memory_usage_percent > 80:
logger.warning(f"Redis memory usage high: {memory_usage_percent:.1f}%")
except ImportError:
# django-redis not available, skip additional checks
pass
except Exception as e:
logger.warning(f"Could not get Redis info: {e}")
except Exception as e:
self.add_error(f"Cache service unavailable: {e}")
class DatabasePerformanceCheck(BaseHealthCheckBackend):
"""Check database performance and connectivity"""
critical_service = False
def check_status(self):
try:
start_time = time.time()
# Test basic connectivity
with connection.cursor() as cursor:
cursor.execute("SELECT 1")
result = cursor.fetchone()
if result[0] != 1:
self.add_error("Database connectivity test failed")
return
basic_query_time = time.time() - start_time
# Test a more complex query (if it takes too long, there might be performance issues)
start_time = time.time()
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM django_content_type")
cursor.fetchone()
complex_query_time = time.time() - start_time
# Performance thresholds
if basic_query_time > 1.0:
self.add_error(f"Database responding slowly: basic query took {basic_query_time:.2f}s")
elif basic_query_time > 0.5:
logger.warning(f"Database performance degraded: basic query took {basic_query_time:.2f}s")
if complex_query_time > 2.0:
self.add_error(f"Database performance critical: complex query took {complex_query_time:.2f}s")
elif complex_query_time > 1.0:
logger.warning(f"Database performance slow: complex query took {complex_query_time:.2f}s")
# Check database version and settings if possible
try:
with connection.cursor() as cursor:
cursor.execute("SELECT version()")
version = cursor.fetchone()[0]
logger.debug(f"Database version: {version}")
except Exception as e:
logger.debug(f"Could not get database version: {e}")
except Exception as e:
self.add_error(f"Database performance check failed: {e}")
class ApplicationHealthCheck(BaseHealthCheckBackend):
"""Check application-specific health indicators"""
critical_service = False
def check_status(self):
try:
# Check if we can import critical modules
critical_modules = [
'parks.models',
'rides.models',
'accounts.models',
'core.services',
]
for module_name in critical_modules:
try:
__import__(module_name)
except ImportError as e:
self.add_error(f"Critical module import failed: {module_name} - {e}")
# Check if we can access critical models
try:
from parks.models import Park
from rides.models import Ride
from django.contrib.auth import get_user_model
User = get_user_model()
# Test that we can query these models (just count, don't load data)
park_count = Park.objects.count()
ride_count = Ride.objects.count()
user_count = User.objects.count()
logger.debug(f"Model counts - Parks: {park_count}, Rides: {ride_count}, Users: {user_count}")
except Exception as e:
self.add_error(f"Model access check failed: {e}")
# Check media and static file configuration
from django.conf import settings
import os
if not os.path.exists(settings.MEDIA_ROOT):
self.add_error(f"Media directory does not exist: {settings.MEDIA_ROOT}")
if not os.path.exists(settings.STATIC_ROOT) and not settings.DEBUG:
self.add_error(f"Static directory does not exist: {settings.STATIC_ROOT}")
except Exception as e:
self.add_error(f"Application health check failed: {e}")
class ExternalServiceHealthCheck(BaseHealthCheckBackend):
"""Check external services and dependencies"""
critical_service = False
def check_status(self):
# Check email service if configured
try:
from django.core.mail import get_connection
from django.conf import settings
if hasattr(settings, 'EMAIL_BACKEND') and 'console' not in settings.EMAIL_BACKEND:
# Only check if not using console backend
connection = get_connection()
if hasattr(connection, 'open'):
try:
connection.open()
connection.close()
except Exception as e:
logger.warning(f"Email service check failed: {e}")
# Don't fail the health check for email issues in development
except Exception as e:
logger.debug(f"Email service check error: {e}")
# Check if Sentry is configured and working
try:
import sentry_sdk
if sentry_sdk.Hub.current.client:
# Sentry is configured
try:
# Test that we can capture a test message (this won't actually send to Sentry)
with sentry_sdk.push_scope() as scope:
scope.set_tag("health_check", True)
# Don't actually send a message, just verify the SDK is working
logger.debug("Sentry SDK is operational")
except Exception as e:
logger.warning(f"Sentry SDK check failed: {e}")
except ImportError:
logger.debug("Sentry SDK not installed")
except Exception as e:
logger.debug(f"Sentry check error: {e}")
# Check Redis connection if configured
try:
from django.core.cache import caches
from django.conf import settings
cache_config = settings.CACHES.get('default', {})
if 'redis' in cache_config.get('BACKEND', '').lower():
# Redis is configured, test basic connectivity
redis_cache = caches['default']
redis_cache.set('health_check_redis', 'test', 10)
value = redis_cache.get('health_check_redis')
if value != 'test':
self.add_error("Redis cache connectivity test failed")
else:
redis_cache.delete('health_check_redis')
except Exception as e:
logger.warning(f"Redis connectivity check failed: {e}")
class DiskSpaceHealthCheck(BaseHealthCheckBackend):
"""Check available disk space"""
critical_service = False
def check_status(self):
try:
import shutil
from django.conf import settings
# Check disk space for media directory
media_usage = shutil.disk_usage(settings.MEDIA_ROOT)
media_free_percent = (media_usage.free / media_usage.total) * 100
# Check disk space for logs directory if it exists
logs_dir = getattr(settings, 'BASE_DIR', '/tmp') / 'logs'
if logs_dir.exists():
logs_usage = shutil.disk_usage(logs_dir)
logs_free_percent = (logs_usage.free / logs_usage.total) * 100
else:
logs_free_percent = media_free_percent # Use same as media
# Alert thresholds
if media_free_percent < 10:
self.add_error(f"Critical disk space: {media_free_percent:.1f}% free in media directory")
elif media_free_percent < 20:
logger.warning(f"Low disk space: {media_free_percent:.1f}% free in media directory")
if logs_free_percent < 10:
self.add_error(f"Critical disk space: {logs_free_percent:.1f}% free in logs directory")
elif logs_free_percent < 20:
logger.warning(f"Low disk space: {logs_free_percent:.1f}% free in logs directory")
except Exception as e:
logger.warning(f"Disk space check failed: {e}")
# Don't fail health check for disk space issues in development

233
core/logging.py Normal file
View File

@@ -0,0 +1,233 @@
"""
Centralized logging configuration for ThrillWiki.
Provides structured logging with proper formatting and context.
"""
import logging
import sys
from typing import Dict, Any, Optional
from django.conf import settings
from django.utils import timezone
class ThrillWikiFormatter(logging.Formatter):
"""Custom formatter for ThrillWiki logs with structured output."""
def format(self, record):
# Add timestamp if not present
if not hasattr(record, 'timestamp'):
record.timestamp = timezone.now().isoformat()
# Add request context if available
if hasattr(record, 'request'):
record.request_id = getattr(record.request, 'id', 'unknown')
record.user_id = getattr(record.request.user, 'id', 'anonymous') if hasattr(record.request, 'user') else 'unknown'
record.path = getattr(record.request, 'path', 'unknown')
record.method = getattr(record.request, 'method', 'unknown')
# Structure the log message
if hasattr(record, 'extra_data'):
record.structured_data = record.extra_data
return super().format(record)
def get_logger(name: str) -> logging.Logger:
"""
Get a configured logger for ThrillWiki components.
Args:
name: Logger name (usually __name__)
Returns:
Configured logger instance
"""
logger = logging.getLogger(name)
# Only configure if not already configured
if not logger.handlers:
handler = logging.StreamHandler(sys.stdout)
formatter = ThrillWikiFormatter(
fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO if settings.DEBUG else logging.WARNING)
return logger
def log_exception(
logger: logging.Logger,
exception: Exception,
*,
context: Optional[Dict[str, Any]] = None,
request=None,
level: int = logging.ERROR
) -> None:
"""
Log an exception with structured context.
Args:
logger: Logger instance
exception: Exception to log
context: Additional context data
request: Django request object
level: Log level
"""
log_data = {
'exception_type': exception.__class__.__name__,
'exception_message': str(exception),
'context': context or {}
}
if request:
log_data.update({
'request_path': getattr(request, 'path', 'unknown'),
'request_method': getattr(request, 'method', 'unknown'),
'user_id': getattr(request.user, 'id', 'anonymous') if hasattr(request, 'user') else 'unknown'
})
logger.log(level, f"Exception occurred: {exception}", extra={'extra_data': log_data}, exc_info=True)
def log_business_event(
logger: logging.Logger,
event_type: str,
*,
message: str,
context: Optional[Dict[str, Any]] = None,
request=None,
level: int = logging.INFO
) -> None:
"""
Log a business event with structured context.
Args:
logger: Logger instance
event_type: Type of business event
message: Event message
context: Additional context data
request: Django request object
level: Log level
"""
log_data = {
'event_type': event_type,
'context': context or {}
}
if request:
log_data.update({
'request_path': getattr(request, 'path', 'unknown'),
'request_method': getattr(request, 'method', 'unknown'),
'user_id': getattr(request.user, 'id', 'anonymous') if hasattr(request, 'user') else 'unknown'
})
logger.log(level, message, extra={'extra_data': log_data})
def log_performance_metric(
logger: logging.Logger,
operation: str,
*,
duration_ms: float,
context: Optional[Dict[str, Any]] = None,
level: int = logging.INFO
) -> None:
"""
Log a performance metric.
Args:
logger: Logger instance
operation: Operation name
duration_ms: Duration in milliseconds
context: Additional context data
level: Log level
"""
log_data = {
'metric_type': 'performance',
'operation': operation,
'duration_ms': duration_ms,
'context': context or {}
}
message = f"Performance: {operation} took {duration_ms:.2f}ms"
logger.log(level, message, extra={'extra_data': log_data})
def log_api_request(
logger: logging.Logger,
request,
*,
response_status: Optional[int] = None,
duration_ms: Optional[float] = None,
level: int = logging.INFO
) -> None:
"""
Log an API request with context.
Args:
logger: Logger instance
request: Django request object
response_status: HTTP response status code
duration_ms: Request duration in milliseconds
level: Log level
"""
log_data = {
'request_type': 'api',
'path': getattr(request, 'path', 'unknown'),
'method': getattr(request, 'method', 'unknown'),
'user_id': getattr(request.user, 'id', 'anonymous') if hasattr(request, 'user') else 'unknown',
'response_status': response_status,
'duration_ms': duration_ms
}
message = f"API Request: {request.method} {request.path}"
if response_status:
message += f" -> {response_status}"
if duration_ms:
message += f" ({duration_ms:.2f}ms)"
logger.log(level, message, extra={'extra_data': log_data})
def log_security_event(
logger: logging.Logger,
event_type: str,
*,
message: str,
severity: str = 'medium',
context: Optional[Dict[str, Any]] = None,
request=None
) -> None:
"""
Log a security-related event.
Args:
logger: Logger instance
event_type: Type of security event
message: Event message
severity: Event severity (low, medium, high, critical)
context: Additional context data
request: Django request object
"""
log_data = {
'security_event': True,
'event_type': event_type,
'severity': severity,
'context': context or {}
}
if request:
log_data.update({
'request_path': getattr(request, 'path', 'unknown'),
'request_method': getattr(request, 'method', 'unknown'),
'user_id': getattr(request.user, 'id', 'anonymous') if hasattr(request, 'user') else 'unknown',
'remote_addr': request.META.get('REMOTE_ADDR', 'unknown'),
'user_agent': request.META.get('HTTP_USER_AGENT', 'unknown')
})
# Use WARNING for medium/high, ERROR for critical
level = logging.ERROR if severity in ['high', 'critical'] else logging.WARNING
logger.log(level, f"SECURITY: {message}", extra={'extra_data': log_data})

263
core/managers.py Normal file
View File

@@ -0,0 +1,263 @@
"""
Custom managers and QuerySets for optimized database patterns.
Following Django styleguide best practices for database access.
"""
from typing import Optional, List, Dict, Any, Union
from django.db import models
from django.db.models import Q, F, Count, Avg, Max, Min, Sum, Prefetch
from django.contrib.gis.geos import Point
from django.contrib.gis.measure import Distance
from django.utils import timezone
from datetime import timedelta
class BaseQuerySet(models.QuerySet):
"""Base QuerySet with common optimizations and patterns."""
def active(self):
"""Filter for active/enabled records."""
if hasattr(self.model, 'is_active'):
return self.filter(is_active=True)
return self
def published(self):
"""Filter for published records."""
if hasattr(self.model, 'is_published'):
return self.filter(is_published=True)
return self
def recent(self, *, days: int = 30):
"""Filter for recently created records."""
cutoff_date = timezone.now() - timedelta(days=days)
return self.filter(created_at__gte=cutoff_date)
def search(self, *, query: str, fields: Optional[List[str]] = None):
"""
Full-text search across specified fields.
Args:
query: Search query string
fields: List of field names to search (defaults to name, description)
"""
if not query:
return self
if fields is None:
fields = ['name', 'description'] if hasattr(self.model, 'name') else []
q_objects = Q()
for field in fields:
if hasattr(self.model, field):
q_objects |= Q(**{f"{field}__icontains": query})
return self.filter(q_objects) if q_objects else self
def with_stats(self):
"""Add basic statistics annotations."""
return self
def optimized_for_list(self):
"""Optimize queryset for list display."""
return self.select_related().prefetch_related()
def optimized_for_detail(self):
"""Optimize queryset for detail display."""
return self.select_related().prefetch_related()
class BaseManager(models.Manager):
"""Base manager with common patterns."""
def get_queryset(self):
return BaseQuerySet(self.model, using=self._db)
def active(self):
return self.get_queryset().active()
def published(self):
return self.get_queryset().published()
def recent(self, *, days: int = 30):
return self.get_queryset().recent(days=days)
def search(self, *, query: str, fields: Optional[List[str]] = None):
return self.get_queryset().search(query=query, fields=fields)
class LocationQuerySet(BaseQuerySet):
"""QuerySet for location-based models with geographic functionality."""
def near_point(self, *, point: Point, distance_km: float = 50):
"""Filter locations near a geographic point."""
if hasattr(self.model, 'point'):
return self.filter(
point__distance_lte=(point, Distance(km=distance_km))
).distance(point).order_by('distance')
return self
def within_bounds(self, *, north: float, south: float, east: float, west: float):
"""Filter locations within geographic bounds."""
if hasattr(self.model, 'point'):
return self.filter(
point__latitude__gte=south,
point__latitude__lte=north,
point__longitude__gte=west,
point__longitude__lte=east
)
return self
def by_country(self, *, country: str):
"""Filter by country."""
if hasattr(self.model, 'country'):
return self.filter(country__iexact=country)
return self
def by_region(self, *, state: str):
"""Filter by state/region."""
if hasattr(self.model, 'state'):
return self.filter(state__iexact=state)
return self
def by_city(self, *, city: str):
"""Filter by city."""
if hasattr(self.model, 'city'):
return self.filter(city__iexact=city)
return self
class LocationManager(BaseManager):
"""Manager for location-based models."""
def get_queryset(self):
return LocationQuerySet(self.model, using=self._db)
def near_point(self, *, point: Point, distance_km: float = 50):
return self.get_queryset().near_point(point=point, distance_km=distance_km)
def within_bounds(self, *, north: float, south: float, east: float, west: float):
return self.get_queryset().within_bounds(north=north, south=south, east=east, west=west)
class ReviewableQuerySet(BaseQuerySet):
"""QuerySet for models that can be reviewed."""
def with_review_stats(self):
"""Add review statistics annotations."""
return self.annotate(
review_count=Count('reviews', filter=Q(reviews__is_published=True)),
average_rating=Avg('reviews__rating', filter=Q(reviews__is_published=True)),
latest_review_date=Max('reviews__created_at', filter=Q(reviews__is_published=True))
)
def highly_rated(self, *, min_rating: float = 8.0):
"""Filter for highly rated items."""
return self.with_review_stats().filter(average_rating__gte=min_rating)
def recently_reviewed(self, *, days: int = 30):
"""Filter for items with recent reviews."""
cutoff_date = timezone.now() - timedelta(days=days)
return self.filter(reviews__created_at__gte=cutoff_date, reviews__is_published=True).distinct()
class ReviewableManager(BaseManager):
"""Manager for reviewable models."""
def get_queryset(self):
return ReviewableQuerySet(self.model, using=self._db)
def with_review_stats(self):
return self.get_queryset().with_review_stats()
def highly_rated(self, *, min_rating: float = 8.0):
return self.get_queryset().highly_rated(min_rating=min_rating)
class HierarchicalQuerySet(BaseQuerySet):
"""QuerySet for hierarchical models (with parent/child relationships)."""
def root_level(self):
"""Filter for root-level items (no parent)."""
if hasattr(self.model, 'parent'):
return self.filter(parent__isnull=True)
return self
def children_of(self, *, parent_id: int):
"""Get children of a specific parent."""
if hasattr(self.model, 'parent'):
return self.filter(parent_id=parent_id)
return self
def with_children_count(self):
"""Add count of children."""
if hasattr(self.model, 'children'):
return self.annotate(children_count=Count('children'))
return self
class HierarchicalManager(BaseManager):
"""Manager for hierarchical models."""
def get_queryset(self):
return HierarchicalQuerySet(self.model, using=self._db)
def root_level(self):
return self.get_queryset().root_level()
class TimestampedQuerySet(BaseQuerySet):
"""QuerySet for models with created_at/updated_at timestamps."""
def created_between(self, *, start_date, end_date):
"""Filter by creation date range."""
return self.filter(created_at__date__range=[start_date, end_date])
def updated_since(self, *, since_date):
"""Filter for records updated since a date."""
return self.filter(updated_at__gte=since_date)
def by_creation_date(self, *, descending: bool = True):
"""Order by creation date."""
order = '-created_at' if descending else 'created_at'
return self.order_by(order)
class TimestampedManager(BaseManager):
"""Manager for timestamped models."""
def get_queryset(self):
return TimestampedQuerySet(self.model, using=self._db)
def created_between(self, *, start_date, end_date):
return self.get_queryset().created_between(start_date=start_date, end_date=end_date)
class StatusQuerySet(BaseQuerySet):
"""QuerySet for models with status fields."""
def with_status(self, *, status: Union[str, List[str]]):
"""Filter by status."""
if isinstance(status, list):
return self.filter(status__in=status)
return self.filter(status=status)
def operating(self):
"""Filter for operating/active status."""
return self.filter(status='OPERATING')
def closed(self):
"""Filter for closed status."""
return self.filter(status__in=['CLOSED_TEMP', 'CLOSED_PERM'])
class StatusManager(BaseManager):
"""Manager for status-based models."""
def get_queryset(self):
return StatusQuerySet(self.model, using=self._db)
def operating(self):
return self.get_queryset().operating()
def closed(self):
return self.get_queryset().closed()

View File

@@ -0,0 +1,22 @@
# Core middleware modules
# Import middleware classes from the analytics module
from .analytics import PageViewMiddleware, PgHistoryContextMiddleware
# Import middleware classes from the performance_middleware.py module
from .performance_middleware import (
PerformanceMiddleware,
QueryCountMiddleware,
DatabaseConnectionMiddleware,
CachePerformanceMiddleware
)
# Make all middleware classes available at the package level
__all__ = [
'PageViewMiddleware',
'PgHistoryContextMiddleware',
'PerformanceMiddleware',
'QueryCountMiddleware',
'DatabaseConnectionMiddleware',
'CachePerformanceMiddleware'
]

View File

@@ -1,3 +1,7 @@
"""
Analytics and tracking middleware for Django application.
"""
import pghistory
from django.contrib.auth.models import AnonymousUser
from django.core.handlers.wsgi import WSGIRequest
@@ -6,6 +10,7 @@ from django.contrib.contenttypes.models import ContentType
from django.views.generic.detail import DetailView
from core.analytics import PageView
class RequestContextProvider(pghistory.context):
"""Custom context provider for pghistory that extracts information from the request."""
def __call__(self, request: WSGIRequest) -> dict:
@@ -16,9 +21,11 @@ class RequestContextProvider(pghistory.context):
'session_key': request.session.session_key if hasattr(request, 'session') else None
}
# Initialize the context provider
request_context = RequestContextProvider()
class PgHistoryContextMiddleware:
"""
Middleware that ensures request object is available to pghistory context.
@@ -30,7 +37,10 @@ class PgHistoryContextMiddleware:
response = self.get_response(request)
return response
class PageViewMiddleware(MiddlewareMixin):
"""Middleware to track page views for DetailView-based pages."""
def process_view(self, request, view_func, view_args, view_kwargs):
# Only track GET requests
if request.method != 'GET':
@@ -63,4 +73,4 @@ class PageViewMiddleware(MiddlewareMixin):
# Fail silently to not interrupt the request
pass
return None
return None

View File

@@ -0,0 +1,268 @@
"""
Performance monitoring middleware for tracking request metrics.
"""
import time
import logging
from django.db import connection
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
performance_logger = logging.getLogger('performance')
logger = logging.getLogger(__name__)
class PerformanceMiddleware(MiddlewareMixin):
"""Middleware to collect performance metrics for each request"""
def process_request(self, request):
"""Initialize performance tracking for the request"""
request._performance_start_time = time.time()
request._performance_initial_queries = len(connection.queries) if hasattr(connection, 'queries') else 0
return None
def process_response(self, request, response):
"""Log performance metrics after response is ready"""
# Skip performance tracking for certain paths
skip_paths = ['/health/', '/admin/jsi18n/', '/static/', '/media/', '/__debug__/']
if any(request.path.startswith(path) for path in skip_paths):
return response
# Calculate metrics
end_time = time.time()
start_time = getattr(request, '_performance_start_time', end_time)
duration = end_time - start_time
initial_queries = getattr(request, '_performance_initial_queries', 0)
total_queries = len(connection.queries) - initial_queries if hasattr(connection, 'queries') else 0
# Get content length
content_length = 0
if hasattr(response, 'content'):
content_length = len(response.content)
elif hasattr(response, 'streaming_content'):
# For streaming responses, we can't easily measure content length
content_length = -1
# Build performance data
performance_data = {
'path': request.path,
'method': request.method,
'status_code': response.status_code,
'duration_ms': round(duration * 1000, 2),
'duration_seconds': round(duration, 3),
'query_count': total_queries,
'content_length_bytes': content_length,
'user_id': getattr(request.user, 'id', None) if hasattr(request, 'user') and request.user.is_authenticated else None,
'user_agent': request.META.get('HTTP_USER_AGENT', '')[:100], # Truncate user agent
'remote_addr': self._get_client_ip(request),
}
# Add query details in debug mode
if settings.DEBUG and hasattr(connection, 'queries') and total_queries > 0:
recent_queries = connection.queries[-total_queries:]
performance_data['queries'] = [
{
'sql': query['sql'][:200] + '...' if len(query['sql']) > 200 else query['sql'],
'time': float(query['time'])
}
for query in recent_queries[-10:] # Last 10 queries only
]
# Identify slow queries
slow_queries = [q for q in recent_queries if float(q['time']) > 0.1]
if slow_queries:
performance_data['slow_query_count'] = len(slow_queries)
performance_data['slowest_query_time'] = max(float(q['time']) for q in slow_queries)
# Determine log level based on performance
log_level = self._get_log_level(duration, total_queries, response.status_code)
# Log the performance data
performance_logger.log(
log_level,
f"Request performance: {request.method} {request.path} - "
f"{duration:.3f}s, {total_queries} queries, {response.status_code}",
extra=performance_data
)
# Add performance headers for debugging (only in debug mode)
if settings.DEBUG:
response['X-Response-Time'] = f"{duration * 1000:.2f}ms"
response['X-Query-Count'] = str(total_queries)
if total_queries > 0 and hasattr(connection, 'queries'):
total_query_time = sum(float(q['time']) for q in connection.queries[-total_queries:])
response['X-Query-Time'] = f"{total_query_time * 1000:.2f}ms"
return response
def process_exception(self, request, exception):
"""Log performance data even when an exception occurs"""
end_time = time.time()
start_time = getattr(request, '_performance_start_time', end_time)
duration = end_time - start_time
initial_queries = getattr(request, '_performance_initial_queries', 0)
total_queries = len(connection.queries) - initial_queries if hasattr(connection, 'queries') else 0
performance_data = {
'path': request.path,
'method': request.method,
'status_code': 500, # Exception occurred
'duration_ms': round(duration * 1000, 2),
'query_count': total_queries,
'exception': str(exception),
'exception_type': type(exception).__name__,
'user_id': getattr(request.user, 'id', None) if hasattr(request, 'user') and request.user.is_authenticated else None,
}
performance_logger.error(
f"Request exception: {request.method} {request.path} - "
f"{duration:.3f}s, {total_queries} queries, {type(exception).__name__}: {exception}",
extra=performance_data
)
return None # Don't handle the exception, just log it
def _get_client_ip(self, request):
"""Extract client IP address from request"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0].strip()
else:
ip = request.META.get('REMOTE_ADDR', '')
return ip
def _get_log_level(self, duration, query_count, status_code):
"""Determine appropriate log level based on performance metrics"""
# Error responses
if status_code >= 500:
return logging.ERROR
elif status_code >= 400:
return logging.WARNING
# Performance-based log levels
if duration > 5.0: # Very slow requests
return logging.ERROR
elif duration > 2.0 or query_count > 20: # Slow requests or high query count
return logging.WARNING
elif duration > 1.0 or query_count > 10: # Moderately slow
return logging.INFO
else:
return logging.DEBUG
class QueryCountMiddleware(MiddlewareMixin):
"""Middleware to track and limit query counts per request"""
def __init__(self, get_response):
self.get_response = get_response
self.query_limit = getattr(settings, 'MAX_QUERIES_PER_REQUEST', 50)
super().__init__(get_response)
def process_request(self, request):
"""Initialize query tracking"""
request._query_count_start = len(connection.queries) if hasattr(connection, 'queries') else 0
return None
def process_response(self, request, response):
"""Check query count and warn if excessive"""
if not hasattr(connection, 'queries'):
return response
start_count = getattr(request, '_query_count_start', 0)
current_count = len(connection.queries)
request_query_count = current_count - start_count
if request_query_count > self.query_limit:
logger.warning(
f"Excessive query count: {request.path} executed {request_query_count} queries "
f"(limit: {self.query_limit})",
extra={
'path': request.path,
'method': request.method,
'query_count': request_query_count,
'query_limit': self.query_limit,
'excessive_queries': True
}
)
return response
class DatabaseConnectionMiddleware(MiddlewareMixin):
"""Middleware to monitor database connection health"""
def process_request(self, request):
"""Check database connection at start of request"""
try:
# Simple connection test
from django.db import connection
with connection.cursor() as cursor:
cursor.execute("SELECT 1")
cursor.fetchone()
except Exception as e:
logger.error(
f"Database connection failed at request start: {e}",
extra={
'path': request.path,
'method': request.method,
'database_error': str(e)
}
)
# Don't block the request, let Django handle the database error
return None
def process_response(self, request, response):
"""Close database connections properly"""
try:
from django.db import connection
connection.close()
except Exception as e:
logger.warning(f"Error closing database connection: {e}")
return response
class CachePerformanceMiddleware(MiddlewareMixin):
"""Middleware to monitor cache performance"""
def process_request(self, request):
"""Initialize cache performance tracking"""
request._cache_hits = 0
request._cache_misses = 0
request._cache_start_time = time.time()
return None
def process_response(self, request, response):
"""Log cache performance metrics"""
cache_duration = time.time() - getattr(request, '_cache_start_time', time.time())
cache_hits = getattr(request, '_cache_hits', 0)
cache_misses = getattr(request, '_cache_misses', 0)
if cache_hits + cache_misses > 0:
hit_rate = (cache_hits / (cache_hits + cache_misses)) * 100
cache_data = {
'path': request.path,
'cache_hits': cache_hits,
'cache_misses': cache_misses,
'cache_hit_rate': round(hit_rate, 2),
'cache_operations': cache_hits + cache_misses,
'cache_duration': round(cache_duration * 1000, 2) # milliseconds
}
# Log cache performance
if hit_rate < 50 and cache_hits + cache_misses > 5:
logger.warning(
f"Low cache hit rate for {request.path}: {hit_rate:.1f}%",
extra=cache_data
)
else:
logger.debug(
f"Cache performance for {request.path}: {hit_rate:.1f}% hit rate",
extra=cache_data
)
return response

299
core/selectors.py Normal file
View File

@@ -0,0 +1,299 @@
"""
Selectors for core functionality including map services and analytics.
Following Django styleguide pattern for separating data access from business logic.
"""
from typing import Optional, Dict, Any, List, Union
from django.db.models import QuerySet, Q, F, Count, Avg
from django.contrib.gis.geos import Point, Polygon
from django.contrib.gis.measure import Distance
from django.utils import timezone
from datetime import timedelta
from .analytics import PageView
from parks.models import Park
from rides.models import Ride
def unified_locations_for_map(
*,
bounds: Optional[Polygon] = None,
location_types: Optional[List[str]] = None,
filters: Optional[Dict[str, Any]] = None
) -> Dict[str, QuerySet]:
"""
Get unified location data for map display across all location types.
Args:
bounds: Geographic boundary polygon
location_types: List of location types to include ('park', 'ride')
filters: Additional filter parameters
Returns:
Dictionary containing querysets for each location type
"""
results = {}
# Default to all location types if none specified
if not location_types:
location_types = ['park', 'ride']
# Parks
if 'park' in location_types:
park_queryset = Park.objects.select_related(
'operator'
).prefetch_related(
'location'
).annotate(
ride_count_calculated=Count('rides')
)
if bounds:
park_queryset = park_queryset.filter(
location__coordinates__within=bounds
)
if filters:
if 'status' in filters:
park_queryset = park_queryset.filter(status=filters['status'])
if 'operator' in filters:
park_queryset = park_queryset.filter(operator=filters['operator'])
results['parks'] = park_queryset.order_by('name')
# Rides
if 'ride' in location_types:
ride_queryset = Ride.objects.select_related(
'park',
'manufacturer'
).prefetch_related(
'park__location',
'location'
)
if bounds:
ride_queryset = ride_queryset.filter(
Q(location__coordinates__within=bounds) |
Q(park__location__coordinates__within=bounds)
)
if filters:
if 'category' in filters:
ride_queryset = ride_queryset.filter(category=filters['category'])
if 'manufacturer' in filters:
ride_queryset = ride_queryset.filter(manufacturer=filters['manufacturer'])
if 'park' in filters:
ride_queryset = ride_queryset.filter(park=filters['park'])
results['rides'] = ride_queryset.order_by('park__name', 'name')
return results
def locations_near_point(
*,
point: Point,
distance_km: float = 50,
location_types: Optional[List[str]] = None,
limit: int = 20
) -> Dict[str, QuerySet]:
"""
Get locations near a specific geographic point across all types.
Args:
point: Geographic point (longitude, latitude)
distance_km: Maximum distance in kilometers
location_types: List of location types to include
limit: Maximum number of results per type
Returns:
Dictionary containing nearby locations by type
"""
results = {}
if not location_types:
location_types = ['park', 'ride']
# Parks near point
if 'park' in location_types:
results['parks'] = Park.objects.filter(
location__coordinates__distance_lte=(point, Distance(km=distance_km))
).select_related(
'operator'
).prefetch_related(
'location'
).distance(point).order_by('distance')[:limit]
# Rides near point
if 'ride' in location_types:
results['rides'] = Ride.objects.filter(
Q(location__coordinates__distance_lte=(point, Distance(km=distance_km))) |
Q(park__location__coordinates__distance_lte=(point, Distance(km=distance_km)))
).select_related(
'park',
'manufacturer'
).prefetch_related(
'park__location'
).distance(point).order_by('distance')[:limit]
return results
def search_all_locations(*, query: str, limit: int = 20) -> Dict[str, QuerySet]:
"""
Search across all location types for a query string.
Args:
query: Search string
limit: Maximum results per type
Returns:
Dictionary containing search results by type
"""
results = {}
# Search parks
results['parks'] = Park.objects.filter(
Q(name__icontains=query) |
Q(description__icontains=query) |
Q(location__city__icontains=query) |
Q(location__region__icontains=query)
).select_related(
'operator'
).prefetch_related(
'location'
).order_by('name')[:limit]
# Search rides
results['rides'] = Ride.objects.filter(
Q(name__icontains=query) |
Q(description__icontains=query) |
Q(park__name__icontains=query) |
Q(manufacturer__name__icontains=query)
).select_related(
'park',
'manufacturer'
).prefetch_related(
'park__location'
).order_by('park__name', 'name')[:limit]
return results
def page_views_for_analytics(
*,
start_date: Optional[timezone.datetime] = None,
end_date: Optional[timezone.datetime] = None,
path_pattern: Optional[str] = None
) -> QuerySet[PageView]:
"""
Get page views for analytics with optional filtering.
Args:
start_date: Start date for filtering
end_date: End date for filtering
path_pattern: URL path pattern to filter by
Returns:
QuerySet of page views
"""
queryset = PageView.objects.all()
if start_date:
queryset = queryset.filter(timestamp__gte=start_date)
if end_date:
queryset = queryset.filter(timestamp__lte=end_date)
if path_pattern:
queryset = queryset.filter(path__icontains=path_pattern)
return queryset.order_by('-timestamp')
def popular_pages_summary(*, days: int = 30) -> Dict[str, Any]:
"""
Get summary of most popular pages in the last N days.
Args:
days: Number of days to analyze
Returns:
Dictionary containing popular pages statistics
"""
cutoff_date = timezone.now() - timedelta(days=days)
# Most viewed pages
popular_pages = PageView.objects.filter(
timestamp__gte=cutoff_date
).values('path').annotate(
view_count=Count('id')
).order_by('-view_count')[:10]
# Total page views
total_views = PageView.objects.filter(
timestamp__gte=cutoff_date
).count()
# Unique visitors (based on IP)
unique_visitors = PageView.objects.filter(
timestamp__gte=cutoff_date
).values('ip_address').distinct().count()
return {
'popular_pages': list(popular_pages),
'total_views': total_views,
'unique_visitors': unique_visitors,
'period_days': days
}
def geographic_distribution_summary() -> Dict[str, Any]:
"""
Get geographic distribution statistics for all locations.
Returns:
Dictionary containing geographic statistics
"""
# Parks by country
parks_by_country = Park.objects.filter(
location__country__isnull=False
).values('location__country').annotate(
count=Count('id')
).order_by('-count')
# Rides by country (through park location)
rides_by_country = Ride.objects.filter(
park__location__country__isnull=False
).values('park__location__country').annotate(
count=Count('id')
).order_by('-count')
return {
'parks_by_country': list(parks_by_country),
'rides_by_country': list(rides_by_country)
}
def system_health_metrics() -> Dict[str, Any]:
"""
Get system health and activity metrics.
Returns:
Dictionary containing system health statistics
"""
now = timezone.now()
last_24h = now - timedelta(hours=24)
last_7d = now - timedelta(days=7)
return {
'total_parks': Park.objects.count(),
'operating_parks': Park.objects.filter(status='OPERATING').count(),
'total_rides': Ride.objects.count(),
'page_views_24h': PageView.objects.filter(timestamp__gte=last_24h).count(),
'page_views_7d': PageView.objects.filter(timestamp__gte=last_7d).count(),
'data_freshness': {
'latest_park_update': Park.objects.order_by('-updated_at').first().updated_at if Park.objects.exists() else None,
'latest_ride_update': Ride.objects.order_by('-updated_at').first().updated_at if Ride.objects.exists() else None,
}
}

View File

@@ -0,0 +1,254 @@
"""
Enhanced caching service with multiple cache backends and strategies.
"""
from typing import Optional, Any, Dict, List, Callable
from django.core.cache import caches
from django.core.cache.utils import make_template_fragment_key
from django.conf import settings
import hashlib
import json
import logging
import time
from functools import wraps
logger = logging.getLogger(__name__)
# Define GeoBounds for type hinting
class GeoBounds:
def __init__(self, min_lat: float, min_lng: float, max_lat: float, max_lng: float):
self.min_lat = min_lat
self.min_lng = min_lng
self.max_lat = max_lat
self.max_lng = max_lng
class EnhancedCacheService:
"""Comprehensive caching service with multiple cache backends"""
def __init__(self):
self.default_cache = caches['default']
try:
self.api_cache = caches['api']
except Exception:
# Fallback to default cache if api cache not configured
self.api_cache = self.default_cache
# L1: Query-level caching
def cache_queryset(self, cache_key: str, queryset_func: Callable, timeout: int = 3600, **kwargs) -> Any:
"""Cache expensive querysets"""
cached_result = self.default_cache.get(cache_key)
if cached_result is None:
start_time = time.time()
result = queryset_func(**kwargs)
duration = time.time() - start_time
# Log cache miss and function execution time
logger.info(
f"Cache miss for key '{cache_key}', executed in {duration:.3f}s",
extra={'cache_key': cache_key, 'execution_time': duration}
)
self.default_cache.set(cache_key, result, timeout)
return result
logger.debug(f"Cache hit for key '{cache_key}'")
return cached_result
# L2: API response caching
def cache_api_response(self, view_name: str, params: Dict, response_data: Any, timeout: int = 1800):
"""Cache API responses based on view and parameters"""
cache_key = self._generate_api_cache_key(view_name, params)
self.api_cache.set(cache_key, response_data, timeout)
logger.debug(f"Cached API response for view '{view_name}'")
def get_cached_api_response(self, view_name: str, params: Dict) -> Optional[Any]:
"""Retrieve cached API response"""
cache_key = self._generate_api_cache_key(view_name, params)
result = self.api_cache.get(cache_key)
if result:
logger.debug(f"Cache hit for API view '{view_name}'")
else:
logger.debug(f"Cache miss for API view '{view_name}'")
return result
# L3: Geographic caching (building on existing MapCacheService)
def cache_geographic_data(self, bounds: 'GeoBounds', data: Any, zoom_level: int, timeout: int = 1800):
"""Cache geographic data with spatial keys"""
# Generate spatial cache key based on bounds and zoom level
cache_key = f"geo:{bounds.min_lat}:{bounds.min_lng}:{bounds.max_lat}:{bounds.max_lng}:z{zoom_level}"
self.default_cache.set(cache_key, data, timeout)
logger.debug(f"Cached geographic data for bounds {bounds}")
def get_cached_geographic_data(self, bounds: 'GeoBounds', zoom_level: int) -> Optional[Any]:
"""Retrieve cached geographic data"""
cache_key = f"geo:{bounds.min_lat}:{bounds.min_lng}:{bounds.max_lat}:{bounds.max_lng}:z{zoom_level}"
return self.default_cache.get(cache_key)
# Cache invalidation utilities
def invalidate_pattern(self, pattern: str):
"""Invalidate cache keys matching a pattern (if backend supports it)"""
try:
# For Redis cache backends
if hasattr(self.default_cache, 'delete_pattern'):
deleted_count = self.default_cache.delete_pattern(pattern)
logger.info(f"Invalidated {deleted_count} cache keys matching pattern '{pattern}'")
return deleted_count
else:
logger.warning(f"Cache backend does not support pattern deletion for pattern '{pattern}'")
except Exception as e:
logger.error(f"Error invalidating cache pattern '{pattern}': {e}")
def invalidate_model_cache(self, model_name: str, instance_id: Optional[int] = None):
"""Invalidate cache keys related to a specific model"""
if instance_id:
pattern = f"*{model_name}:{instance_id}*"
else:
pattern = f"*{model_name}*"
self.invalidate_pattern(pattern)
# Cache warming utilities
def warm_cache(self, cache_key: str, warm_func: Callable, timeout: int = 3600, **kwargs):
"""Proactively warm cache with data"""
try:
data = warm_func(**kwargs)
self.default_cache.set(cache_key, data, timeout)
logger.info(f"Warmed cache for key '{cache_key}'")
except Exception as e:
logger.error(f"Error warming cache for key '{cache_key}': {e}")
def _generate_api_cache_key(self, view_name: str, params: Dict) -> str:
"""Generate consistent cache keys for API responses"""
# Sort params to ensure consistent key generation
params_str = json.dumps(params, sort_keys=True, default=str)
params_hash = hashlib.md5(params_str.encode()).hexdigest()
return f"api:{view_name}:{params_hash}"
# Cache decorators
def cache_api_response(timeout=1800, vary_on=None, key_prefix=''):
"""Decorator for caching API responses"""
def decorator(view_func):
@wraps(view_func)
def wrapper(self, request, *args, **kwargs):
if request.method != 'GET':
return view_func(self, request, *args, **kwargs)
# Generate cache key based on view, user, and parameters
cache_key_parts = [
key_prefix or view_func.__name__,
str(request.user.id) if request.user.is_authenticated else 'anonymous',
str(hash(frozenset(request.GET.items())))
]
if vary_on:
for field in vary_on:
cache_key_parts.append(str(getattr(request, field, '')))
cache_key = ':'.join(cache_key_parts)
# Try to get from cache
cache_service = EnhancedCacheService()
cached_response = cache_service.api_cache.get(cache_key)
if cached_response:
logger.debug(f"Cache hit for API view {view_func.__name__}")
return cached_response
# Execute view and cache result
response = view_func(self, request, *args, **kwargs)
if hasattr(response, 'status_code') and response.status_code == 200:
cache_service.api_cache.set(cache_key, response, timeout)
logger.debug(f"Cached API response for view {view_func.__name__}")
return response
return wrapper
return decorator
def cache_queryset_result(cache_key_template: str, timeout: int = 3600):
"""Decorator for caching queryset results"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Generate cache key from template and arguments
cache_key = cache_key_template.format(*args, **kwargs)
cache_service = EnhancedCacheService()
return cache_service.cache_queryset(cache_key, func, timeout, *args, **kwargs)
return wrapper
return decorator
# Context manager for cache warming
class CacheWarmer:
"""Context manager for batch cache warming operations"""
def __init__(self):
self.cache_service = EnhancedCacheService()
self.warm_operations = []
def add(self, cache_key: str, warm_func: Callable, timeout: int = 3600, **kwargs):
"""Add a cache warming operation to the batch"""
self.warm_operations.append({
'cache_key': cache_key,
'warm_func': warm_func,
'timeout': timeout,
'kwargs': kwargs
})
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Execute all cache warming operations"""
logger.info(f"Warming {len(self.warm_operations)} cache entries")
for operation in self.warm_operations:
try:
self.cache_service.warm_cache(**operation)
except Exception as e:
logger.error(f"Error warming cache for {operation['cache_key']}: {e}")
# Cache statistics and monitoring
class CacheMonitor:
"""Monitor cache performance and statistics"""
def __init__(self):
self.cache_service = EnhancedCacheService()
def get_cache_stats(self) -> Dict[str, Any]:
"""Get cache statistics if available"""
stats = {}
try:
# Redis cache stats
if hasattr(self.cache_service.default_cache, '_cache'):
redis_client = self.cache_service.default_cache._cache.get_client()
info = redis_client.info()
stats['redis'] = {
'used_memory': info.get('used_memory_human'),
'connected_clients': info.get('connected_clients'),
'total_commands_processed': info.get('total_commands_processed'),
'keyspace_hits': info.get('keyspace_hits'),
'keyspace_misses': info.get('keyspace_misses'),
}
# Calculate hit rate
hits = info.get('keyspace_hits', 0)
misses = info.get('keyspace_misses', 0)
if hits + misses > 0:
stats['redis']['hit_rate'] = hits / (hits + misses) * 100
except Exception as e:
logger.error(f"Error getting cache stats: {e}")
return stats
def log_cache_performance(self):
"""Log cache performance metrics"""
stats = self.get_cache_stats()
if stats:
logger.info("Cache performance statistics", extra=stats)

View File

@@ -39,6 +39,7 @@ class UnifiedMapService:
def get_map_data(
self,
*,
bounds: Optional[GeoBounds] = None,
filters: Optional[MapFilters] = None,
zoom_level: int = DEFAULT_ZOOM_LEVEL,

View File

@@ -0,0 +1,370 @@
"""
Performance monitoring utilities and context managers.
"""
import time
import logging
from contextlib import contextmanager
from functools import wraps
from typing import Optional, Dict, Any, List
from django.db import connection
from django.conf import settings
from django.utils import timezone
logger = logging.getLogger('performance')
@contextmanager
def monitor_performance(operation_name: str, **tags):
"""Context manager for monitoring operation performance"""
start_time = time.time()
initial_queries = len(connection.queries)
# Create performance context
performance_context = {
'operation': operation_name,
'start_time': start_time,
'timestamp': timezone.now().isoformat(),
**tags
}
try:
yield performance_context
except Exception as e:
performance_context['error'] = str(e)
performance_context['status'] = 'error'
raise
else:
performance_context['status'] = 'success'
finally:
end_time = time.time()
duration = end_time - start_time
total_queries = len(connection.queries) - initial_queries
# Update performance context with final metrics
performance_context.update({
'duration_seconds': duration,
'duration_ms': round(duration * 1000, 2),
'query_count': total_queries,
'end_time': end_time,
})
# Log performance data
log_level = logging.WARNING if duration > 2.0 or total_queries > 10 else logging.INFO
logger.log(
log_level,
f"Performance: {operation_name} completed in {duration:.3f}s with {total_queries} queries",
extra=performance_context
)
# Log slow operations with additional detail
if duration > 2.0:
logger.warning(
f"Slow operation detected: {operation_name} took {duration:.3f}s",
extra={
'slow_operation': True,
'threshold_exceeded': 'duration',
**performance_context
}
)
if total_queries > 10:
logger.warning(
f"High query count: {operation_name} executed {total_queries} queries",
extra={
'high_query_count': True,
'threshold_exceeded': 'query_count',
**performance_context
}
)
@contextmanager
def track_queries(operation_name: str, warn_threshold: int = 10):
"""Context manager to track database queries for specific operations"""
if not settings.DEBUG:
yield
return
initial_queries = len(connection.queries)
start_time = time.time()
try:
yield
finally:
end_time = time.time()
total_queries = len(connection.queries) - initial_queries
execution_time = end_time - start_time
query_details = []
if hasattr(connection, 'queries') and total_queries > 0:
recent_queries = connection.queries[-total_queries:]
query_details = [
{
'sql': query['sql'][:200] + '...' if len(query['sql']) > 200 else query['sql'],
'time': float(query['time'])
}
for query in recent_queries
]
performance_data = {
'operation': operation_name,
'query_count': total_queries,
'execution_time': execution_time,
'queries': query_details if settings.DEBUG else []
}
if total_queries > warn_threshold or execution_time > 1.0:
logger.warning(
f"Performance concern in {operation_name}: "
f"{total_queries} queries, {execution_time:.2f}s",
extra=performance_data
)
else:
logger.debug(
f"Query tracking for {operation_name}: "
f"{total_queries} queries, {execution_time:.2f}s",
extra=performance_data
)
class PerformanceProfiler:
"""Advanced performance profiling with detailed metrics"""
def __init__(self, name: str):
self.name = name
self.start_time = None
self.end_time = None
self.checkpoints = []
self.initial_queries = 0
self.memory_usage = {}
def start(self):
"""Start profiling"""
self.start_time = time.time()
self.initial_queries = len(connection.queries)
# Track memory usage if psutil is available
try:
import psutil
process = psutil.Process()
self.memory_usage['start'] = process.memory_info().rss
except ImportError:
pass
logger.debug(f"Started profiling: {self.name}")
def checkpoint(self, name: str):
"""Add a checkpoint"""
if self.start_time is None:
logger.warning(f"Checkpoint '{name}' called before profiling started")
return
current_time = time.time()
elapsed = current_time - self.start_time
queries_since_start = len(connection.queries) - self.initial_queries
checkpoint = {
'name': name,
'timestamp': current_time,
'elapsed_seconds': elapsed,
'queries_since_start': queries_since_start,
}
# Memory usage if available
try:
import psutil
process = psutil.Process()
checkpoint['memory_rss'] = process.memory_info().rss
except ImportError:
pass
self.checkpoints.append(checkpoint)
logger.debug(f"Checkpoint '{name}' at {elapsed:.3f}s")
def stop(self):
"""Stop profiling and log results"""
if self.start_time is None:
logger.warning("Profiling stopped before it was started")
return
self.end_time = time.time()
total_duration = self.end_time - self.start_time
total_queries = len(connection.queries) - self.initial_queries
# Final memory usage
try:
import psutil
process = psutil.Process()
self.memory_usage['end'] = process.memory_info().rss
except ImportError:
pass
# Create detailed profiling report
report = {
'profiler_name': self.name,
'total_duration': total_duration,
'total_queries': total_queries,
'checkpoints': self.checkpoints,
'memory_usage': self.memory_usage,
'queries_per_second': total_queries / total_duration if total_duration > 0 else 0,
}
# Calculate checkpoint intervals
if len(self.checkpoints) > 1:
intervals = []
for i in range(1, len(self.checkpoints)):
prev = self.checkpoints[i-1]
curr = self.checkpoints[i]
intervals.append({
'from': prev['name'],
'to': curr['name'],
'duration': curr['elapsed_seconds'] - prev['elapsed_seconds'],
'queries': curr['queries_since_start'] - prev['queries_since_start'],
})
report['checkpoint_intervals'] = intervals
# Log the complete report
log_level = logging.WARNING if total_duration > 1.0 else logging.INFO
logger.log(
log_level,
f"Profiling complete: {self.name} took {total_duration:.3f}s with {total_queries} queries",
extra=report
)
return report
@contextmanager
def profile_operation(name: str):
"""Context manager for detailed operation profiling"""
profiler = PerformanceProfiler(name)
profiler.start()
try:
yield profiler
finally:
profiler.stop()
class DatabaseQueryAnalyzer:
"""Analyze database query patterns and performance"""
@staticmethod
def analyze_queries(queries: List[Dict]) -> Dict[str, Any]:
"""Analyze a list of queries for patterns and issues"""
if not queries:
return {}
total_time = sum(float(q.get('time', 0)) for q in queries)
query_count = len(queries)
# Group queries by type
query_types = {}
for query in queries:
sql = query.get('sql', '').strip().upper()
query_type = sql.split()[0] if sql else 'UNKNOWN'
query_types[query_type] = query_types.get(query_type, 0) + 1
# Find slow queries (top 10% by time)
sorted_queries = sorted(queries, key=lambda q: float(q.get('time', 0)), reverse=True)
slow_query_count = max(1, query_count // 10)
slow_queries = sorted_queries[:slow_query_count]
# Detect duplicate queries
query_signatures = {}
for query in queries:
# Simplified signature - remove literals and normalize whitespace
sql = query.get('sql', '')
signature = ' '.join(sql.split()) # Normalize whitespace
query_signatures[signature] = query_signatures.get(signature, 0) + 1
duplicates = {sig: count for sig, count in query_signatures.items() if count > 1}
analysis = {
'total_queries': query_count,
'total_time': total_time,
'average_time': total_time / query_count if query_count > 0 else 0,
'query_types': query_types,
'slow_queries': [
{
'sql': q.get('sql', '')[:200] + '...' if len(q.get('sql', '')) > 200 else q.get('sql', ''),
'time': float(q.get('time', 0))
}
for q in slow_queries
],
'duplicate_query_count': len(duplicates),
'duplicate_queries': duplicates if len(duplicates) <= 10 else dict(list(duplicates.items())[:10]),
}
return analysis
@classmethod
def analyze_current_queries(cls) -> Dict[str, Any]:
"""Analyze the current request's queries"""
if hasattr(connection, 'queries'):
return cls.analyze_queries(connection.queries)
return {}
# Performance monitoring decorators
def monitor_function_performance(operation_name: Optional[str] = None):
"""Decorator to monitor function performance"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
name = operation_name or f"{func.__module__}.{func.__name__}"
with monitor_performance(name, function=func.__name__, module=func.__module__):
return func(*args, **kwargs)
return wrapper
return decorator
def track_database_queries(warn_threshold: int = 10):
"""Decorator to track database queries for a function"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
operation_name = f"{func.__module__}.{func.__name__}"
with track_queries(operation_name, warn_threshold):
return func(*args, **kwargs)
return wrapper
return decorator
# Performance metrics collection
class PerformanceMetrics:
"""Collect and aggregate performance metrics"""
def __init__(self):
self.metrics = []
def record_metric(self, name: str, value: float, tags: Optional[Dict] = None):
"""Record a performance metric"""
metric = {
'name': name,
'value': value,
'timestamp': timezone.now().isoformat(),
'tags': tags or {}
}
self.metrics.append(metric)
# Log the metric
logger.info(
f"Performance metric: {name} = {value}",
extra=metric
)
def get_metrics(self, name: Optional[str] = None) -> List[Dict]:
"""Get recorded metrics, optionally filtered by name"""
if name:
return [m for m in self.metrics if m['name'] == name]
return self.metrics.copy()
def clear_metrics(self):
"""Clear all recorded metrics"""
self.metrics.clear()
# Global performance metrics instance
performance_metrics = PerformanceMetrics()

1
core/utils/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Core utilities

View File

@@ -0,0 +1,385 @@
"""
Database query optimization utilities and helpers.
"""
import time
import logging
from contextlib import contextmanager
from typing import Optional, Dict, Any, List, Type
from django.db import connection, models
from django.db.models import QuerySet, Prefetch, Count, Avg, Max, Min
from django.conf import settings
from django.core.cache import cache
logger = logging.getLogger('query_optimization')
@contextmanager
def track_queries(operation_name: str, warn_threshold: int = 10, time_threshold: float = 1.0):
"""
Context manager to track database queries for specific operations
Args:
operation_name: Name of the operation being tracked
warn_threshold: Number of queries that triggers a warning
time_threshold: Execution time in seconds that triggers a warning
"""
if not settings.DEBUG:
yield
return
initial_queries = len(connection.queries)
start_time = time.time()
try:
yield
finally:
end_time = time.time()
total_queries = len(connection.queries) - initial_queries
execution_time = end_time - start_time
# Collect query details
query_details = []
if hasattr(connection, 'queries') and total_queries > 0:
recent_queries = connection.queries[-total_queries:]
query_details = [
{
'sql': query['sql'][:500] + '...' if len(query['sql']) > 500 else query['sql'],
'time': float(query['time']),
'duplicate_count': sum(1 for q in recent_queries if q['sql'] == query['sql'])
}
for query in recent_queries
]
performance_data = {
'operation': operation_name,
'query_count': total_queries,
'execution_time': execution_time,
'queries': query_details if settings.DEBUG else [],
'slow_queries': [q for q in query_details if q['time'] > 0.1], # Queries slower than 100ms
}
# Log warnings for performance issues
if total_queries > warn_threshold or execution_time > time_threshold:
logger.warning(
f"Performance concern in {operation_name}: "
f"{total_queries} queries, {execution_time:.2f}s",
extra=performance_data
)
else:
logger.debug(
f"Query tracking for {operation_name}: "
f"{total_queries} queries, {execution_time:.2f}s",
extra=performance_data
)
class QueryOptimizer:
"""Utility class for common query optimization patterns"""
@staticmethod
def optimize_park_queryset(queryset: QuerySet) -> QuerySet:
"""
Optimize Park queryset with proper select_related and prefetch_related
"""
return queryset.select_related(
'location',
'operator',
'created_by'
).prefetch_related(
'areas',
'rides__manufacturer',
'reviews__user'
).annotate(
ride_count=Count('rides'),
average_rating=Avg('reviews__rating'),
latest_review_date=Max('reviews__created_at')
)
@staticmethod
def optimize_ride_queryset(queryset: QuerySet) -> QuerySet:
"""
Optimize Ride queryset with proper relationships
"""
return queryset.select_related(
'park',
'park__location',
'manufacturer',
'created_by'
).prefetch_related(
'reviews__user',
'media_items'
).annotate(
review_count=Count('reviews'),
average_rating=Avg('reviews__rating'),
latest_review_date=Max('reviews__created_at')
)
@staticmethod
def optimize_user_queryset(queryset: QuerySet) -> QuerySet:
"""
Optimize User queryset for profile views
"""
return queryset.prefetch_related(
Prefetch('park_reviews', to_attr='cached_park_reviews'),
Prefetch('ride_reviews', to_attr='cached_ride_reviews'),
'authored_parks',
'authored_rides'
).annotate(
total_reviews=Count('park_reviews') + Count('ride_reviews'),
parks_authored=Count('authored_parks'),
rides_authored=Count('authored_rides')
)
@staticmethod
def create_bulk_queryset(model: Type[models.Model], ids: List[int]) -> QuerySet:
"""
Create an optimized queryset for bulk operations
"""
queryset = model.objects.filter(id__in=ids)
# Apply model-specific optimizations
if hasattr(model, '_meta') and model._meta.model_name == 'park':
return QueryOptimizer.optimize_park_queryset(queryset)
elif hasattr(model, '_meta') and model._meta.model_name == 'ride':
return QueryOptimizer.optimize_ride_queryset(queryset)
elif hasattr(model, '_meta') and model._meta.model_name == 'user':
return QueryOptimizer.optimize_user_queryset(queryset)
return queryset
class QueryCache:
"""Caching utilities for expensive queries"""
@staticmethod
def cache_queryset_result(cache_key: str, queryset_func, timeout: int = 3600, **kwargs):
"""
Cache the result of an expensive queryset operation
Args:
cache_key: Unique key for caching
queryset_func: Function that returns the queryset result
timeout: Cache timeout in seconds
**kwargs: Arguments to pass to queryset_func
"""
# Try to get from cache first
cached_result = cache.get(cache_key)
if cached_result is not None:
logger.debug(f"Cache hit for queryset: {cache_key}")
return cached_result
# Execute the expensive operation
with track_queries(f"cache_miss_{cache_key}"):
result = queryset_func(**kwargs)
# Cache the result
cache.set(cache_key, result, timeout)
logger.debug(f"Cached queryset result: {cache_key}")
return result
@staticmethod
def invalidate_model_cache(model_name: str, instance_id: Optional[int] = None):
"""
Invalidate cache keys related to a specific model
Args:
model_name: Name of the model (e.g., 'park', 'ride')
instance_id: Specific instance ID, if applicable
"""
# Pattern-based cache invalidation (works with Redis)
if instance_id:
pattern = f"*{model_name}_{instance_id}*"
else:
pattern = f"*{model_name}*"
try:
# For Redis cache backends that support pattern deletion
if hasattr(cache, 'delete_pattern'):
deleted_count = cache.delete_pattern(pattern)
logger.info(f"Invalidated {deleted_count} cache keys for pattern: {pattern}")
else:
logger.warning(f"Cache backend does not support pattern deletion: {pattern}")
except Exception as e:
logger.error(f"Error invalidating cache pattern {pattern}: {e}")
class IndexAnalyzer:
"""Analyze and suggest database indexes"""
@staticmethod
def analyze_slow_queries(min_time: float = 0.1) -> List[Dict[str, Any]]:
"""
Analyze slow queries from the current request
Args:
min_time: Minimum query time in seconds to consider "slow"
"""
if not hasattr(connection, 'queries'):
return []
slow_queries = []
for query in connection.queries:
query_time = float(query.get('time', 0))
if query_time >= min_time:
slow_queries.append({
'sql': query['sql'],
'time': query_time,
'analysis': IndexAnalyzer._analyze_query_sql(query['sql'])
})
return slow_queries
@staticmethod
def _analyze_query_sql(sql: str) -> Dict[str, Any]:
"""
Analyze SQL to suggest potential optimizations
"""
sql_upper = sql.upper()
analysis = {
'has_where_clause': 'WHERE' in sql_upper,
'has_join': any(join in sql_upper for join in ['JOIN', 'INNER JOIN', 'LEFT JOIN', 'RIGHT JOIN']),
'has_order_by': 'ORDER BY' in sql_upper,
'has_group_by': 'GROUP BY' in sql_upper,
'has_like': 'LIKE' in sql_upper,
'table_scans': [],
'suggestions': []
}
# Detect potential table scans
if 'WHERE' not in sql_upper and 'SELECT COUNT(*) FROM' not in sql_upper:
analysis['table_scans'].append("Query may be doing a full table scan")
# Suggest indexes based on patterns
if analysis['has_where_clause'] and not analysis['has_join']:
analysis['suggestions'].append("Consider adding indexes on WHERE clause columns")
if analysis['has_order_by']:
analysis['suggestions'].append("Consider adding indexes on ORDER BY columns")
if analysis['has_like'] and '%' not in sql[:sql.find('LIKE') + 10]:
analysis['suggestions'].append("LIKE queries with leading wildcards cannot use indexes efficiently")
return analysis
@staticmethod
def suggest_model_indexes(model: Type[models.Model]) -> List[str]:
"""
Suggest database indexes for a Django model based on its fields
"""
suggestions = []
opts = model._meta
# Foreign key fields should have indexes (Django adds these automatically)
for field in opts.fields:
if isinstance(field, models.ForeignKey):
suggestions.append(f"Index on {field.name} (automatically created by Django)")
# Suggest composite indexes for common query patterns
date_fields = [f.name for f in opts.fields if isinstance(f, (models.DateField, models.DateTimeField))]
status_fields = [f.name for f in opts.fields if f.name in ['status', 'is_active', 'is_published']]
if date_fields and status_fields:
for date_field in date_fields:
for status_field in status_fields:
suggestions.append(f"Composite index on ({status_field}, {date_field}) for filtered date queries")
# Suggest indexes for fields commonly used in WHERE clauses
common_filter_fields = ['slug', 'name', 'created_at', 'updated_at']
for field in opts.fields:
if field.name in common_filter_fields and not field.db_index:
suggestions.append(f"Consider adding db_index=True to {field.name}")
return suggestions
def log_query_performance():
"""Decorator to log query performance for a function"""
def decorator(func):
def wrapper(*args, **kwargs):
operation_name = f"{func.__module__}.{func.__name__}"
with track_queries(operation_name):
return func(*args, **kwargs)
return wrapper
return decorator
def optimize_queryset_for_serialization(queryset: QuerySet, fields: List[str]) -> QuerySet:
"""
Optimize a queryset for API serialization by only selecting needed fields
Args:
queryset: The queryset to optimize
fields: List of field names that will be serialized
"""
# Extract foreign key fields that need select_related
model = queryset.model
opts = model._meta
select_related_fields = []
prefetch_related_fields = []
for field_name in fields:
try:
field = opts.get_field(field_name)
if isinstance(field, models.ForeignKey):
select_related_fields.append(field_name)
elif isinstance(field, (models.ManyToManyField, models.reverse.ManyToManyRel)):
prefetch_related_fields.append(field_name)
except models.FieldDoesNotExist:
# Field might be a property or method, skip optimization
continue
# Apply optimizations
if select_related_fields:
queryset = queryset.select_related(*select_related_fields)
if prefetch_related_fields:
queryset = queryset.prefetch_related(*prefetch_related_fields)
return queryset
# Query performance monitoring context manager
@contextmanager
def monitor_db_performance(operation_name: str):
"""
Context manager that monitors database performance for an operation
"""
initial_queries = len(connection.queries) if hasattr(connection, 'queries') else 0
start_time = time.time()
try:
yield
finally:
end_time = time.time()
duration = end_time - start_time
if hasattr(connection, 'queries'):
total_queries = len(connection.queries) - initial_queries
# Analyze queries for performance issues
slow_queries = IndexAnalyzer.analyze_slow_queries(0.05) # 50ms threshold
performance_data = {
'operation': operation_name,
'duration': duration,
'query_count': total_queries,
'slow_query_count': len(slow_queries),
'slow_queries': slow_queries[:5] # Limit to top 5 slow queries
}
# Log performance data
if duration > 1.0 or total_queries > 15 or slow_queries:
logger.warning(
f"Performance issue in {operation_name}: "
f"{duration:.3f}s, {total_queries} queries, {len(slow_queries)} slow",
extra=performance_data
)
else:
logger.debug(
f"DB performance for {operation_name}: "
f"{duration:.3f}s, {total_queries} queries",
extra=performance_data
)

View File

@@ -1,2 +1 @@
from .search import *
from .views import *
# Core views

256
core/views/health_views.py Normal file
View File

@@ -0,0 +1,256 @@
"""
Enhanced health check views for API monitoring.
"""
import time
from django.http import JsonResponse
from django.utils import timezone
from django.views import View
from django.conf import settings
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from health_check.views import MainView
from core.services.enhanced_cache_service import CacheMonitor
from core.utils.query_optimization import IndexAnalyzer
class HealthCheckAPIView(APIView):
"""
Enhanced API endpoint for health checks with detailed JSON response
"""
permission_classes = [AllowAny] # Public endpoint
def get(self, request):
"""Return comprehensive health check information"""
start_time = time.time()
# Get basic health check results
main_view = MainView()
main_view.request = request
plugins = main_view.plugins
errors = main_view.errors
# Collect additional performance metrics
cache_monitor = CacheMonitor()
cache_stats = cache_monitor.get_cache_stats()
# Build comprehensive health data
health_data = {
'status': 'healthy' if not errors else 'unhealthy',
'timestamp': timezone.now().isoformat(),
'version': getattr(settings, 'VERSION', '1.0.0'),
'environment': getattr(settings, 'ENVIRONMENT', 'development'),
'response_time_ms': 0, # Will be calculated at the end
'checks': {},
'metrics': {
'cache': cache_stats,
'database': self._get_database_metrics(),
'system': self._get_system_metrics(),
}
}
# Process individual health checks
for plugin in plugins:
plugin_name = plugin.identifier()
plugin_errors = errors.get(plugin.__class__.__name__, [])
health_data['checks'][plugin_name] = {
'status': 'healthy' if not plugin_errors else 'unhealthy',
'critical': getattr(plugin, 'critical_service', False),
'errors': [str(error) for error in plugin_errors],
'response_time_ms': getattr(plugin, '_response_time', None)
}
# Calculate total response time
health_data['response_time_ms'] = round((time.time() - start_time) * 1000, 2)
# Determine HTTP status code
status_code = 200
if errors:
# Check if any critical services are failing
critical_errors = any(
getattr(plugin, 'critical_service', False)
for plugin in plugins
if errors.get(plugin.__class__.__name__)
)
status_code = 503 if critical_errors else 200
return Response(health_data, status=status_code)
def _get_database_metrics(self):
"""Get database performance metrics"""
try:
from django.db import connection
# Get basic connection info
metrics = {
'vendor': connection.vendor,
'connection_status': 'connected',
}
# Test query performance
start_time = time.time()
with connection.cursor() as cursor:
cursor.execute("SELECT 1")
cursor.fetchone()
query_time = (time.time() - start_time) * 1000
metrics['test_query_time_ms'] = round(query_time, 2)
# PostgreSQL specific metrics
if connection.vendor == 'postgresql':
try:
with connection.cursor() as cursor:
cursor.execute("""
SELECT
numbackends as active_connections,
xact_commit as transactions_committed,
xact_rollback as transactions_rolled_back,
blks_read as blocks_read,
blks_hit as blocks_hit
FROM pg_stat_database
WHERE datname = current_database()
""")
row = cursor.fetchone()
if row:
metrics.update({
'active_connections': row[0],
'transactions_committed': row[1],
'transactions_rolled_back': row[2],
'cache_hit_ratio': round((row[4] / (row[3] + row[4])) * 100, 2) if (row[3] + row[4]) > 0 else 0
})
except Exception:
pass # Skip advanced metrics if not available
return metrics
except Exception as e:
return {
'connection_status': 'error',
'error': str(e)
}
def _get_system_metrics(self):
"""Get system performance metrics"""
metrics = {
'debug_mode': settings.DEBUG,
'allowed_hosts': settings.ALLOWED_HOSTS if settings.DEBUG else ['hidden'],
}
try:
import psutil
# Memory metrics
memory = psutil.virtual_memory()
metrics['memory'] = {
'total_mb': round(memory.total / 1024 / 1024, 2),
'available_mb': round(memory.available / 1024 / 1024, 2),
'percent_used': memory.percent,
}
# CPU metrics
metrics['cpu'] = {
'percent_used': psutil.cpu_percent(interval=0.1),
'core_count': psutil.cpu_count(),
}
# Disk metrics
disk = psutil.disk_usage('/')
metrics['disk'] = {
'total_gb': round(disk.total / 1024 / 1024 / 1024, 2),
'free_gb': round(disk.free / 1024 / 1024 / 1024, 2),
'percent_used': round((disk.used / disk.total) * 100, 2),
}
except ImportError:
metrics['system_monitoring'] = 'psutil not available'
except Exception as e:
metrics['system_error'] = str(e)
return metrics
class PerformanceMetricsView(APIView):
"""
API view for performance metrics and database analysis
"""
permission_classes = [AllowAny] if settings.DEBUG else []
def get(self, request):
"""Return performance metrics and analysis"""
if not settings.DEBUG:
return Response({'error': 'Only available in debug mode'}, status=403)
metrics = {
'timestamp': timezone.now().isoformat(),
'database_analysis': self._get_database_analysis(),
'cache_performance': self._get_cache_performance(),
'recent_slow_queries': self._get_slow_queries(),
}
return Response(metrics)
def _get_database_analysis(self):
"""Analyze database performance"""
try:
from django.db import connection
analysis = {
'total_queries': len(connection.queries),
'query_analysis': IndexAnalyzer.analyze_slow_queries(0.05),
}
if connection.queries:
query_times = [float(q.get('time', 0)) for q in connection.queries]
analysis.update({
'total_query_time': sum(query_times),
'average_query_time': sum(query_times) / len(query_times),
'slowest_query_time': max(query_times),
'fastest_query_time': min(query_times),
})
return analysis
except Exception as e:
return {'error': str(e)}
def _get_cache_performance(self):
"""Get cache performance metrics"""
try:
cache_monitor = CacheMonitor()
return cache_monitor.get_cache_stats()
except Exception as e:
return {'error': str(e)}
def _get_slow_queries(self):
"""Get recent slow queries"""
try:
return IndexAnalyzer.analyze_slow_queries(0.1) # 100ms threshold
except Exception as e:
return {'error': str(e)}
class SimpleHealthView(View):
"""
Simple health check endpoint for load balancers
"""
def get(self, request):
"""Return simple OK status"""
try:
# Basic database connectivity test
from django.db import connection
with connection.cursor() as cursor:
cursor.execute("SELECT 1")
cursor.fetchone()
return JsonResponse({'status': 'ok', 'timestamp': timezone.now().isoformat()})
except Exception as e:
return JsonResponse(
{'status': 'error', 'error': str(e), 'timestamp': timezone.now().isoformat()},
status=503
)