mirror of
https://github.com/pacnpal/thrillwiki_django_no_react.git
synced 2025-12-23 11:11:10 -05:00
Add comprehensive tests for Parks API and models
- Implemented extensive test cases for the Parks API, covering endpoints for listing, retrieving, creating, updating, and deleting parks. - Added tests for filtering, searching, and ordering parks in the API. - Created tests for error handling in the API, including malformed JSON and unsupported methods. - Developed model tests for Park, ParkArea, Company, and ParkReview models, ensuring validation and constraints are enforced. - Introduced utility mixins for API and model testing to streamline assertions and enhance test readability. - Included integration tests to validate complete workflows involving park creation, retrieval, updating, and deletion.
This commit is contained in:
254
core/services/enhanced_cache_service.py
Normal file
254
core/services/enhanced_cache_service.py
Normal file
@@ -0,0 +1,254 @@
|
||||
"""
|
||||
Enhanced caching service with multiple cache backends and strategies.
|
||||
"""
|
||||
|
||||
from typing import Optional, Any, Dict, List, Callable
|
||||
from django.core.cache import caches
|
||||
from django.core.cache.utils import make_template_fragment_key
|
||||
from django.conf import settings
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from functools import wraps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Define GeoBounds for type hinting
|
||||
class GeoBounds:
|
||||
def __init__(self, min_lat: float, min_lng: float, max_lat: float, max_lng: float):
|
||||
self.min_lat = min_lat
|
||||
self.min_lng = min_lng
|
||||
self.max_lat = max_lat
|
||||
self.max_lng = max_lng
|
||||
|
||||
|
||||
class EnhancedCacheService:
|
||||
"""Comprehensive caching service with multiple cache backends"""
|
||||
|
||||
def __init__(self):
|
||||
self.default_cache = caches['default']
|
||||
try:
|
||||
self.api_cache = caches['api']
|
||||
except Exception:
|
||||
# Fallback to default cache if api cache not configured
|
||||
self.api_cache = self.default_cache
|
||||
|
||||
# L1: Query-level caching
|
||||
def cache_queryset(self, cache_key: str, queryset_func: Callable, timeout: int = 3600, **kwargs) -> Any:
|
||||
"""Cache expensive querysets"""
|
||||
cached_result = self.default_cache.get(cache_key)
|
||||
if cached_result is None:
|
||||
start_time = time.time()
|
||||
result = queryset_func(**kwargs)
|
||||
duration = time.time() - start_time
|
||||
|
||||
# Log cache miss and function execution time
|
||||
logger.info(
|
||||
f"Cache miss for key '{cache_key}', executed in {duration:.3f}s",
|
||||
extra={'cache_key': cache_key, 'execution_time': duration}
|
||||
)
|
||||
|
||||
self.default_cache.set(cache_key, result, timeout)
|
||||
return result
|
||||
|
||||
logger.debug(f"Cache hit for key '{cache_key}'")
|
||||
return cached_result
|
||||
|
||||
# L2: API response caching
|
||||
def cache_api_response(self, view_name: str, params: Dict, response_data: Any, timeout: int = 1800):
|
||||
"""Cache API responses based on view and parameters"""
|
||||
cache_key = self._generate_api_cache_key(view_name, params)
|
||||
self.api_cache.set(cache_key, response_data, timeout)
|
||||
logger.debug(f"Cached API response for view '{view_name}'")
|
||||
|
||||
def get_cached_api_response(self, view_name: str, params: Dict) -> Optional[Any]:
|
||||
"""Retrieve cached API response"""
|
||||
cache_key = self._generate_api_cache_key(view_name, params)
|
||||
result = self.api_cache.get(cache_key)
|
||||
|
||||
if result:
|
||||
logger.debug(f"Cache hit for API view '{view_name}'")
|
||||
else:
|
||||
logger.debug(f"Cache miss for API view '{view_name}'")
|
||||
|
||||
return result
|
||||
|
||||
# L3: Geographic caching (building on existing MapCacheService)
|
||||
def cache_geographic_data(self, bounds: 'GeoBounds', data: Any, zoom_level: int, timeout: int = 1800):
|
||||
"""Cache geographic data with spatial keys"""
|
||||
# Generate spatial cache key based on bounds and zoom level
|
||||
cache_key = f"geo:{bounds.min_lat}:{bounds.min_lng}:{bounds.max_lat}:{bounds.max_lng}:z{zoom_level}"
|
||||
self.default_cache.set(cache_key, data, timeout)
|
||||
logger.debug(f"Cached geographic data for bounds {bounds}")
|
||||
|
||||
def get_cached_geographic_data(self, bounds: 'GeoBounds', zoom_level: int) -> Optional[Any]:
|
||||
"""Retrieve cached geographic data"""
|
||||
cache_key = f"geo:{bounds.min_lat}:{bounds.min_lng}:{bounds.max_lat}:{bounds.max_lng}:z{zoom_level}"
|
||||
return self.default_cache.get(cache_key)
|
||||
|
||||
# Cache invalidation utilities
|
||||
def invalidate_pattern(self, pattern: str):
|
||||
"""Invalidate cache keys matching a pattern (if backend supports it)"""
|
||||
try:
|
||||
# For Redis cache backends
|
||||
if hasattr(self.default_cache, 'delete_pattern'):
|
||||
deleted_count = self.default_cache.delete_pattern(pattern)
|
||||
logger.info(f"Invalidated {deleted_count} cache keys matching pattern '{pattern}'")
|
||||
return deleted_count
|
||||
else:
|
||||
logger.warning(f"Cache backend does not support pattern deletion for pattern '{pattern}'")
|
||||
except Exception as e:
|
||||
logger.error(f"Error invalidating cache pattern '{pattern}': {e}")
|
||||
|
||||
def invalidate_model_cache(self, model_name: str, instance_id: Optional[int] = None):
|
||||
"""Invalidate cache keys related to a specific model"""
|
||||
if instance_id:
|
||||
pattern = f"*{model_name}:{instance_id}*"
|
||||
else:
|
||||
pattern = f"*{model_name}*"
|
||||
|
||||
self.invalidate_pattern(pattern)
|
||||
|
||||
# Cache warming utilities
|
||||
def warm_cache(self, cache_key: str, warm_func: Callable, timeout: int = 3600, **kwargs):
|
||||
"""Proactively warm cache with data"""
|
||||
try:
|
||||
data = warm_func(**kwargs)
|
||||
self.default_cache.set(cache_key, data, timeout)
|
||||
logger.info(f"Warmed cache for key '{cache_key}'")
|
||||
except Exception as e:
|
||||
logger.error(f"Error warming cache for key '{cache_key}': {e}")
|
||||
|
||||
def _generate_api_cache_key(self, view_name: str, params: Dict) -> str:
|
||||
"""Generate consistent cache keys for API responses"""
|
||||
# Sort params to ensure consistent key generation
|
||||
params_str = json.dumps(params, sort_keys=True, default=str)
|
||||
params_hash = hashlib.md5(params_str.encode()).hexdigest()
|
||||
return f"api:{view_name}:{params_hash}"
|
||||
|
||||
|
||||
# Cache decorators
|
||||
def cache_api_response(timeout=1800, vary_on=None, key_prefix=''):
|
||||
"""Decorator for caching API responses"""
|
||||
def decorator(view_func):
|
||||
@wraps(view_func)
|
||||
def wrapper(self, request, *args, **kwargs):
|
||||
if request.method != 'GET':
|
||||
return view_func(self, request, *args, **kwargs)
|
||||
|
||||
# Generate cache key based on view, user, and parameters
|
||||
cache_key_parts = [
|
||||
key_prefix or view_func.__name__,
|
||||
str(request.user.id) if request.user.is_authenticated else 'anonymous',
|
||||
str(hash(frozenset(request.GET.items())))
|
||||
]
|
||||
|
||||
if vary_on:
|
||||
for field in vary_on:
|
||||
cache_key_parts.append(str(getattr(request, field, '')))
|
||||
|
||||
cache_key = ':'.join(cache_key_parts)
|
||||
|
||||
# Try to get from cache
|
||||
cache_service = EnhancedCacheService()
|
||||
cached_response = cache_service.api_cache.get(cache_key)
|
||||
if cached_response:
|
||||
logger.debug(f"Cache hit for API view {view_func.__name__}")
|
||||
return cached_response
|
||||
|
||||
# Execute view and cache result
|
||||
response = view_func(self, request, *args, **kwargs)
|
||||
if hasattr(response, 'status_code') and response.status_code == 200:
|
||||
cache_service.api_cache.set(cache_key, response, timeout)
|
||||
logger.debug(f"Cached API response for view {view_func.__name__}")
|
||||
|
||||
return response
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def cache_queryset_result(cache_key_template: str, timeout: int = 3600):
|
||||
"""Decorator for caching queryset results"""
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
# Generate cache key from template and arguments
|
||||
cache_key = cache_key_template.format(*args, **kwargs)
|
||||
|
||||
cache_service = EnhancedCacheService()
|
||||
return cache_service.cache_queryset(cache_key, func, timeout, *args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
# Context manager for cache warming
|
||||
class CacheWarmer:
|
||||
"""Context manager for batch cache warming operations"""
|
||||
|
||||
def __init__(self):
|
||||
self.cache_service = EnhancedCacheService()
|
||||
self.warm_operations = []
|
||||
|
||||
def add(self, cache_key: str, warm_func: Callable, timeout: int = 3600, **kwargs):
|
||||
"""Add a cache warming operation to the batch"""
|
||||
self.warm_operations.append({
|
||||
'cache_key': cache_key,
|
||||
'warm_func': warm_func,
|
||||
'timeout': timeout,
|
||||
'kwargs': kwargs
|
||||
})
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Execute all cache warming operations"""
|
||||
logger.info(f"Warming {len(self.warm_operations)} cache entries")
|
||||
|
||||
for operation in self.warm_operations:
|
||||
try:
|
||||
self.cache_service.warm_cache(**operation)
|
||||
except Exception as e:
|
||||
logger.error(f"Error warming cache for {operation['cache_key']}: {e}")
|
||||
|
||||
|
||||
# Cache statistics and monitoring
|
||||
class CacheMonitor:
|
||||
"""Monitor cache performance and statistics"""
|
||||
|
||||
def __init__(self):
|
||||
self.cache_service = EnhancedCacheService()
|
||||
|
||||
def get_cache_stats(self) -> Dict[str, Any]:
|
||||
"""Get cache statistics if available"""
|
||||
stats = {}
|
||||
|
||||
try:
|
||||
# Redis cache stats
|
||||
if hasattr(self.cache_service.default_cache, '_cache'):
|
||||
redis_client = self.cache_service.default_cache._cache.get_client()
|
||||
info = redis_client.info()
|
||||
stats['redis'] = {
|
||||
'used_memory': info.get('used_memory_human'),
|
||||
'connected_clients': info.get('connected_clients'),
|
||||
'total_commands_processed': info.get('total_commands_processed'),
|
||||
'keyspace_hits': info.get('keyspace_hits'),
|
||||
'keyspace_misses': info.get('keyspace_misses'),
|
||||
}
|
||||
|
||||
# Calculate hit rate
|
||||
hits = info.get('keyspace_hits', 0)
|
||||
misses = info.get('keyspace_misses', 0)
|
||||
if hits + misses > 0:
|
||||
stats['redis']['hit_rate'] = hits / (hits + misses) * 100
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting cache stats: {e}")
|
||||
|
||||
return stats
|
||||
|
||||
def log_cache_performance(self):
|
||||
"""Log cache performance metrics"""
|
||||
stats = self.get_cache_stats()
|
||||
if stats:
|
||||
logger.info("Cache performance statistics", extra=stats)
|
||||
@@ -39,6 +39,7 @@ class UnifiedMapService:
|
||||
|
||||
def get_map_data(
|
||||
self,
|
||||
*,
|
||||
bounds: Optional[GeoBounds] = None,
|
||||
filters: Optional[MapFilters] = None,
|
||||
zoom_level: int = DEFAULT_ZOOM_LEVEL,
|
||||
|
||||
370
core/services/performance_monitoring.py
Normal file
370
core/services/performance_monitoring.py
Normal file
@@ -0,0 +1,370 @@
|
||||
"""
|
||||
Performance monitoring utilities and context managers.
|
||||
"""
|
||||
|
||||
import time
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
from functools import wraps
|
||||
from typing import Optional, Dict, Any, List
|
||||
from django.db import connection
|
||||
from django.conf import settings
|
||||
from django.utils import timezone
|
||||
|
||||
logger = logging.getLogger('performance')
|
||||
|
||||
|
||||
@contextmanager
|
||||
def monitor_performance(operation_name: str, **tags):
|
||||
"""Context manager for monitoring operation performance"""
|
||||
start_time = time.time()
|
||||
initial_queries = len(connection.queries)
|
||||
|
||||
# Create performance context
|
||||
performance_context = {
|
||||
'operation': operation_name,
|
||||
'start_time': start_time,
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
**tags
|
||||
}
|
||||
|
||||
try:
|
||||
yield performance_context
|
||||
except Exception as e:
|
||||
performance_context['error'] = str(e)
|
||||
performance_context['status'] = 'error'
|
||||
raise
|
||||
else:
|
||||
performance_context['status'] = 'success'
|
||||
finally:
|
||||
end_time = time.time()
|
||||
duration = end_time - start_time
|
||||
total_queries = len(connection.queries) - initial_queries
|
||||
|
||||
# Update performance context with final metrics
|
||||
performance_context.update({
|
||||
'duration_seconds': duration,
|
||||
'duration_ms': round(duration * 1000, 2),
|
||||
'query_count': total_queries,
|
||||
'end_time': end_time,
|
||||
})
|
||||
|
||||
# Log performance data
|
||||
log_level = logging.WARNING if duration > 2.0 or total_queries > 10 else logging.INFO
|
||||
logger.log(
|
||||
log_level,
|
||||
f"Performance: {operation_name} completed in {duration:.3f}s with {total_queries} queries",
|
||||
extra=performance_context
|
||||
)
|
||||
|
||||
# Log slow operations with additional detail
|
||||
if duration > 2.0:
|
||||
logger.warning(
|
||||
f"Slow operation detected: {operation_name} took {duration:.3f}s",
|
||||
extra={
|
||||
'slow_operation': True,
|
||||
'threshold_exceeded': 'duration',
|
||||
**performance_context
|
||||
}
|
||||
)
|
||||
|
||||
if total_queries > 10:
|
||||
logger.warning(
|
||||
f"High query count: {operation_name} executed {total_queries} queries",
|
||||
extra={
|
||||
'high_query_count': True,
|
||||
'threshold_exceeded': 'query_count',
|
||||
**performance_context
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def track_queries(operation_name: str, warn_threshold: int = 10):
|
||||
"""Context manager to track database queries for specific operations"""
|
||||
if not settings.DEBUG:
|
||||
yield
|
||||
return
|
||||
|
||||
initial_queries = len(connection.queries)
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
end_time = time.time()
|
||||
total_queries = len(connection.queries) - initial_queries
|
||||
execution_time = end_time - start_time
|
||||
|
||||
query_details = []
|
||||
if hasattr(connection, 'queries') and total_queries > 0:
|
||||
recent_queries = connection.queries[-total_queries:]
|
||||
query_details = [
|
||||
{
|
||||
'sql': query['sql'][:200] + '...' if len(query['sql']) > 200 else query['sql'],
|
||||
'time': float(query['time'])
|
||||
}
|
||||
for query in recent_queries
|
||||
]
|
||||
|
||||
performance_data = {
|
||||
'operation': operation_name,
|
||||
'query_count': total_queries,
|
||||
'execution_time': execution_time,
|
||||
'queries': query_details if settings.DEBUG else []
|
||||
}
|
||||
|
||||
if total_queries > warn_threshold or execution_time > 1.0:
|
||||
logger.warning(
|
||||
f"Performance concern in {operation_name}: "
|
||||
f"{total_queries} queries, {execution_time:.2f}s",
|
||||
extra=performance_data
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
f"Query tracking for {operation_name}: "
|
||||
f"{total_queries} queries, {execution_time:.2f}s",
|
||||
extra=performance_data
|
||||
)
|
||||
|
||||
|
||||
class PerformanceProfiler:
|
||||
"""Advanced performance profiling with detailed metrics"""
|
||||
|
||||
def __init__(self, name: str):
|
||||
self.name = name
|
||||
self.start_time = None
|
||||
self.end_time = None
|
||||
self.checkpoints = []
|
||||
self.initial_queries = 0
|
||||
self.memory_usage = {}
|
||||
|
||||
def start(self):
|
||||
"""Start profiling"""
|
||||
self.start_time = time.time()
|
||||
self.initial_queries = len(connection.queries)
|
||||
|
||||
# Track memory usage if psutil is available
|
||||
try:
|
||||
import psutil
|
||||
process = psutil.Process()
|
||||
self.memory_usage['start'] = process.memory_info().rss
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
logger.debug(f"Started profiling: {self.name}")
|
||||
|
||||
def checkpoint(self, name: str):
|
||||
"""Add a checkpoint"""
|
||||
if self.start_time is None:
|
||||
logger.warning(f"Checkpoint '{name}' called before profiling started")
|
||||
return
|
||||
|
||||
current_time = time.time()
|
||||
elapsed = current_time - self.start_time
|
||||
queries_since_start = len(connection.queries) - self.initial_queries
|
||||
|
||||
checkpoint = {
|
||||
'name': name,
|
||||
'timestamp': current_time,
|
||||
'elapsed_seconds': elapsed,
|
||||
'queries_since_start': queries_since_start,
|
||||
}
|
||||
|
||||
# Memory usage if available
|
||||
try:
|
||||
import psutil
|
||||
process = psutil.Process()
|
||||
checkpoint['memory_rss'] = process.memory_info().rss
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
self.checkpoints.append(checkpoint)
|
||||
logger.debug(f"Checkpoint '{name}' at {elapsed:.3f}s")
|
||||
|
||||
def stop(self):
|
||||
"""Stop profiling and log results"""
|
||||
if self.start_time is None:
|
||||
logger.warning("Profiling stopped before it was started")
|
||||
return
|
||||
|
||||
self.end_time = time.time()
|
||||
total_duration = self.end_time - self.start_time
|
||||
total_queries = len(connection.queries) - self.initial_queries
|
||||
|
||||
# Final memory usage
|
||||
try:
|
||||
import psutil
|
||||
process = psutil.Process()
|
||||
self.memory_usage['end'] = process.memory_info().rss
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Create detailed profiling report
|
||||
report = {
|
||||
'profiler_name': self.name,
|
||||
'total_duration': total_duration,
|
||||
'total_queries': total_queries,
|
||||
'checkpoints': self.checkpoints,
|
||||
'memory_usage': self.memory_usage,
|
||||
'queries_per_second': total_queries / total_duration if total_duration > 0 else 0,
|
||||
}
|
||||
|
||||
# Calculate checkpoint intervals
|
||||
if len(self.checkpoints) > 1:
|
||||
intervals = []
|
||||
for i in range(1, len(self.checkpoints)):
|
||||
prev = self.checkpoints[i-1]
|
||||
curr = self.checkpoints[i]
|
||||
intervals.append({
|
||||
'from': prev['name'],
|
||||
'to': curr['name'],
|
||||
'duration': curr['elapsed_seconds'] - prev['elapsed_seconds'],
|
||||
'queries': curr['queries_since_start'] - prev['queries_since_start'],
|
||||
})
|
||||
report['checkpoint_intervals'] = intervals
|
||||
|
||||
# Log the complete report
|
||||
log_level = logging.WARNING if total_duration > 1.0 else logging.INFO
|
||||
logger.log(
|
||||
log_level,
|
||||
f"Profiling complete: {self.name} took {total_duration:.3f}s with {total_queries} queries",
|
||||
extra=report
|
||||
)
|
||||
|
||||
return report
|
||||
|
||||
|
||||
@contextmanager
|
||||
def profile_operation(name: str):
|
||||
"""Context manager for detailed operation profiling"""
|
||||
profiler = PerformanceProfiler(name)
|
||||
profiler.start()
|
||||
|
||||
try:
|
||||
yield profiler
|
||||
finally:
|
||||
profiler.stop()
|
||||
|
||||
|
||||
class DatabaseQueryAnalyzer:
|
||||
"""Analyze database query patterns and performance"""
|
||||
|
||||
@staticmethod
|
||||
def analyze_queries(queries: List[Dict]) -> Dict[str, Any]:
|
||||
"""Analyze a list of queries for patterns and issues"""
|
||||
if not queries:
|
||||
return {}
|
||||
|
||||
total_time = sum(float(q.get('time', 0)) for q in queries)
|
||||
query_count = len(queries)
|
||||
|
||||
# Group queries by type
|
||||
query_types = {}
|
||||
for query in queries:
|
||||
sql = query.get('sql', '').strip().upper()
|
||||
query_type = sql.split()[0] if sql else 'UNKNOWN'
|
||||
query_types[query_type] = query_types.get(query_type, 0) + 1
|
||||
|
||||
# Find slow queries (top 10% by time)
|
||||
sorted_queries = sorted(queries, key=lambda q: float(q.get('time', 0)), reverse=True)
|
||||
slow_query_count = max(1, query_count // 10)
|
||||
slow_queries = sorted_queries[:slow_query_count]
|
||||
|
||||
# Detect duplicate queries
|
||||
query_signatures = {}
|
||||
for query in queries:
|
||||
# Simplified signature - remove literals and normalize whitespace
|
||||
sql = query.get('sql', '')
|
||||
signature = ' '.join(sql.split()) # Normalize whitespace
|
||||
query_signatures[signature] = query_signatures.get(signature, 0) + 1
|
||||
|
||||
duplicates = {sig: count for sig, count in query_signatures.items() if count > 1}
|
||||
|
||||
analysis = {
|
||||
'total_queries': query_count,
|
||||
'total_time': total_time,
|
||||
'average_time': total_time / query_count if query_count > 0 else 0,
|
||||
'query_types': query_types,
|
||||
'slow_queries': [
|
||||
{
|
||||
'sql': q.get('sql', '')[:200] + '...' if len(q.get('sql', '')) > 200 else q.get('sql', ''),
|
||||
'time': float(q.get('time', 0))
|
||||
}
|
||||
for q in slow_queries
|
||||
],
|
||||
'duplicate_query_count': len(duplicates),
|
||||
'duplicate_queries': duplicates if len(duplicates) <= 10 else dict(list(duplicates.items())[:10]),
|
||||
}
|
||||
|
||||
return analysis
|
||||
|
||||
@classmethod
|
||||
def analyze_current_queries(cls) -> Dict[str, Any]:
|
||||
"""Analyze the current request's queries"""
|
||||
if hasattr(connection, 'queries'):
|
||||
return cls.analyze_queries(connection.queries)
|
||||
return {}
|
||||
|
||||
|
||||
# Performance monitoring decorators
|
||||
def monitor_function_performance(operation_name: Optional[str] = None):
|
||||
"""Decorator to monitor function performance"""
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
name = operation_name or f"{func.__module__}.{func.__name__}"
|
||||
with monitor_performance(name, function=func.__name__, module=func.__module__):
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def track_database_queries(warn_threshold: int = 10):
|
||||
"""Decorator to track database queries for a function"""
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
operation_name = f"{func.__module__}.{func.__name__}"
|
||||
with track_queries(operation_name, warn_threshold):
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
# Performance metrics collection
|
||||
class PerformanceMetrics:
|
||||
"""Collect and aggregate performance metrics"""
|
||||
|
||||
def __init__(self):
|
||||
self.metrics = []
|
||||
|
||||
def record_metric(self, name: str, value: float, tags: Optional[Dict] = None):
|
||||
"""Record a performance metric"""
|
||||
metric = {
|
||||
'name': name,
|
||||
'value': value,
|
||||
'timestamp': timezone.now().isoformat(),
|
||||
'tags': tags or {}
|
||||
}
|
||||
self.metrics.append(metric)
|
||||
|
||||
# Log the metric
|
||||
logger.info(
|
||||
f"Performance metric: {name} = {value}",
|
||||
extra=metric
|
||||
)
|
||||
|
||||
def get_metrics(self, name: Optional[str] = None) -> List[Dict]:
|
||||
"""Get recorded metrics, optionally filtered by name"""
|
||||
if name:
|
||||
return [m for m in self.metrics if m['name'] == name]
|
||||
return self.metrics.copy()
|
||||
|
||||
def clear_metrics(self):
|
||||
"""Clear all recorded metrics"""
|
||||
self.metrics.clear()
|
||||
|
||||
|
||||
# Global performance metrics instance
|
||||
performance_metrics = PerformanceMetrics()
|
||||
Reference in New Issue
Block a user