Add comprehensive tests for Parks API and models

- Implemented extensive test cases for the Parks API, covering endpoints for listing, retrieving, creating, updating, and deleting parks.
- Added tests for filtering, searching, and ordering parks in the API.
- Created tests for error handling in the API, including malformed JSON and unsupported methods.
- Developed model tests for Park, ParkArea, Company, and ParkReview models, ensuring validation and constraints are enforced.
- Introduced utility mixins for API and model testing to streamline assertions and enhance test readability.
- Included integration tests to validate complete workflows involving park creation, retrieval, updating, and deletion.
This commit is contained in:
pacnpal
2025-08-17 19:36:20 -04:00
parent 17228e9935
commit c26414ff74
210 changed files with 24155 additions and 833 deletions

View File

@@ -0,0 +1 @@
# Health checks module

View File

@@ -0,0 +1,275 @@
"""
Custom health checks for ThrillWiki application.
"""
import time
import logging
from django.core.cache import cache
from django.db import connection
from health_check.backends import BaseHealthCheckBackend
from health_check.exceptions import ServiceUnavailable, ServiceReturnedUnexpectedResult
logger = logging.getLogger(__name__)
class CacheHealthCheck(BaseHealthCheckBackend):
"""Check Redis cache connectivity and performance"""
critical_service = True
def check_status(self):
try:
# Test cache write/read performance
test_key = 'health_check_test'
test_value = 'test_value_' + str(int(time.time()))
start_time = time.time()
cache.set(test_key, test_value, timeout=30)
cached_value = cache.get(test_key)
cache_time = time.time() - start_time
if cached_value != test_value:
self.add_error("Cache read/write test failed - values don't match")
return
# Check cache performance
if cache_time > 0.1: # Warn if cache operations take more than 100ms
self.add_error(f"Cache performance degraded: {cache_time:.3f}s for read/write operation")
return
# Clean up test key
cache.delete(test_key)
# Additional Redis-specific checks if using django-redis
try:
from django_redis import get_redis_connection
redis_client = get_redis_connection("default")
info = redis_client.info()
# Check memory usage
used_memory = info.get('used_memory', 0)
max_memory = info.get('maxmemory', 0)
if max_memory > 0:
memory_usage_percent = (used_memory / max_memory) * 100
if memory_usage_percent > 90:
self.add_error(f"Redis memory usage critical: {memory_usage_percent:.1f}%")
elif memory_usage_percent > 80:
logger.warning(f"Redis memory usage high: {memory_usage_percent:.1f}%")
except ImportError:
# django-redis not available, skip additional checks
pass
except Exception as e:
logger.warning(f"Could not get Redis info: {e}")
except Exception as e:
self.add_error(f"Cache service unavailable: {e}")
class DatabasePerformanceCheck(BaseHealthCheckBackend):
"""Check database performance and connectivity"""
critical_service = False
def check_status(self):
try:
start_time = time.time()
# Test basic connectivity
with connection.cursor() as cursor:
cursor.execute("SELECT 1")
result = cursor.fetchone()
if result[0] != 1:
self.add_error("Database connectivity test failed")
return
basic_query_time = time.time() - start_time
# Test a more complex query (if it takes too long, there might be performance issues)
start_time = time.time()
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM django_content_type")
cursor.fetchone()
complex_query_time = time.time() - start_time
# Performance thresholds
if basic_query_time > 1.0:
self.add_error(f"Database responding slowly: basic query took {basic_query_time:.2f}s")
elif basic_query_time > 0.5:
logger.warning(f"Database performance degraded: basic query took {basic_query_time:.2f}s")
if complex_query_time > 2.0:
self.add_error(f"Database performance critical: complex query took {complex_query_time:.2f}s")
elif complex_query_time > 1.0:
logger.warning(f"Database performance slow: complex query took {complex_query_time:.2f}s")
# Check database version and settings if possible
try:
with connection.cursor() as cursor:
cursor.execute("SELECT version()")
version = cursor.fetchone()[0]
logger.debug(f"Database version: {version}")
except Exception as e:
logger.debug(f"Could not get database version: {e}")
except Exception as e:
self.add_error(f"Database performance check failed: {e}")
class ApplicationHealthCheck(BaseHealthCheckBackend):
"""Check application-specific health indicators"""
critical_service = False
def check_status(self):
try:
# Check if we can import critical modules
critical_modules = [
'parks.models',
'rides.models',
'accounts.models',
'core.services',
]
for module_name in critical_modules:
try:
__import__(module_name)
except ImportError as e:
self.add_error(f"Critical module import failed: {module_name} - {e}")
# Check if we can access critical models
try:
from parks.models import Park
from rides.models import Ride
from django.contrib.auth import get_user_model
User = get_user_model()
# Test that we can query these models (just count, don't load data)
park_count = Park.objects.count()
ride_count = Ride.objects.count()
user_count = User.objects.count()
logger.debug(f"Model counts - Parks: {park_count}, Rides: {ride_count}, Users: {user_count}")
except Exception as e:
self.add_error(f"Model access check failed: {e}")
# Check media and static file configuration
from django.conf import settings
import os
if not os.path.exists(settings.MEDIA_ROOT):
self.add_error(f"Media directory does not exist: {settings.MEDIA_ROOT}")
if not os.path.exists(settings.STATIC_ROOT) and not settings.DEBUG:
self.add_error(f"Static directory does not exist: {settings.STATIC_ROOT}")
except Exception as e:
self.add_error(f"Application health check failed: {e}")
class ExternalServiceHealthCheck(BaseHealthCheckBackend):
"""Check external services and dependencies"""
critical_service = False
def check_status(self):
# Check email service if configured
try:
from django.core.mail import get_connection
from django.conf import settings
if hasattr(settings, 'EMAIL_BACKEND') and 'console' not in settings.EMAIL_BACKEND:
# Only check if not using console backend
connection = get_connection()
if hasattr(connection, 'open'):
try:
connection.open()
connection.close()
except Exception as e:
logger.warning(f"Email service check failed: {e}")
# Don't fail the health check for email issues in development
except Exception as e:
logger.debug(f"Email service check error: {e}")
# Check if Sentry is configured and working
try:
import sentry_sdk
if sentry_sdk.Hub.current.client:
# Sentry is configured
try:
# Test that we can capture a test message (this won't actually send to Sentry)
with sentry_sdk.push_scope() as scope:
scope.set_tag("health_check", True)
# Don't actually send a message, just verify the SDK is working
logger.debug("Sentry SDK is operational")
except Exception as e:
logger.warning(f"Sentry SDK check failed: {e}")
except ImportError:
logger.debug("Sentry SDK not installed")
except Exception as e:
logger.debug(f"Sentry check error: {e}")
# Check Redis connection if configured
try:
from django.core.cache import caches
from django.conf import settings
cache_config = settings.CACHES.get('default', {})
if 'redis' in cache_config.get('BACKEND', '').lower():
# Redis is configured, test basic connectivity
redis_cache = caches['default']
redis_cache.set('health_check_redis', 'test', 10)
value = redis_cache.get('health_check_redis')
if value != 'test':
self.add_error("Redis cache connectivity test failed")
else:
redis_cache.delete('health_check_redis')
except Exception as e:
logger.warning(f"Redis connectivity check failed: {e}")
class DiskSpaceHealthCheck(BaseHealthCheckBackend):
"""Check available disk space"""
critical_service = False
def check_status(self):
try:
import shutil
from django.conf import settings
# Check disk space for media directory
media_usage = shutil.disk_usage(settings.MEDIA_ROOT)
media_free_percent = (media_usage.free / media_usage.total) * 100
# Check disk space for logs directory if it exists
logs_dir = getattr(settings, 'BASE_DIR', '/tmp') / 'logs'
if logs_dir.exists():
logs_usage = shutil.disk_usage(logs_dir)
logs_free_percent = (logs_usage.free / logs_usage.total) * 100
else:
logs_free_percent = media_free_percent # Use same as media
# Alert thresholds
if media_free_percent < 10:
self.add_error(f"Critical disk space: {media_free_percent:.1f}% free in media directory")
elif media_free_percent < 20:
logger.warning(f"Low disk space: {media_free_percent:.1f}% free in media directory")
if logs_free_percent < 10:
self.add_error(f"Critical disk space: {logs_free_percent:.1f}% free in logs directory")
elif logs_free_percent < 20:
logger.warning(f"Low disk space: {logs_free_percent:.1f}% free in logs directory")
except Exception as e:
logger.warning(f"Disk space check failed: {e}")
# Don't fail health check for disk space issues in development