Improve park listing performance with optimized queries and caching

Implement performance enhancements for park listing by optimizing database queries, introducing efficient caching mechanisms, and refining pagination for a significantly faster and smoother user experience.

Replit-Commit-Author: Agent
Replit-Commit-Session-Id: c446bc9e-66df-438c-a86c-f53e6da13649
Replit-Commit-Checkpoint-Type: intermediate_checkpoint
This commit is contained in:
pac7
2025-09-23 22:50:09 +00:00
parent 6391b3d81c
commit fff180c476
11 changed files with 2864 additions and 70 deletions

View File

@@ -0,0 +1,198 @@
"""
Django management command to run performance benchmarks.
"""
from django.core.management.base import BaseCommand
from django.utils import timezone
import json
import time
class Command(BaseCommand):
help = 'Run comprehensive performance benchmarks for park listing features'
def add_arguments(self, parser):
parser.add_argument(
'--save',
action='store_true',
help='Save detailed benchmark results to file',
)
parser.add_argument(
'--autocomplete-only',
action='store_true',
help='Run only autocomplete benchmarks',
)
parser.add_argument(
'--listing-only',
action='store_true',
help='Run only listing benchmarks',
)
parser.add_argument(
'--pagination-only',
action='store_true',
help='Run only pagination benchmarks',
)
parser.add_argument(
'--iterations',
type=int,
default=1,
help='Number of iterations to run (default: 1)',
)
def handle(self, *args, **options):
from apps.parks.services.performance_monitoring import BenchmarkSuite
self.stdout.write(
self.style.SUCCESS('Starting Park Listing Performance Benchmarks')
)
suite = BenchmarkSuite()
iterations = options['iterations']
all_results = []
for i in range(iterations):
if iterations > 1:
self.stdout.write(f'\nIteration {i + 1}/{iterations}')
start_time = time.perf_counter()
# Run specific benchmarks or full suite
if options['autocomplete_only']:
result = suite.run_autocomplete_benchmark()
elif options['listing_only']:
result = suite.run_listing_benchmark()
elif options['pagination_only']:
result = suite.run_pagination_benchmark()
else:
result = suite.run_full_benchmark_suite()
duration = time.perf_counter() - start_time
result['iteration'] = i + 1
result['benchmark_duration'] = duration
all_results.append(result)
# Display summary for this iteration
self._display_iteration_summary(result, duration)
# Display overall summary if multiple iterations
if iterations > 1:
self._display_overall_summary(all_results)
# Save results if requested
if options['save']:
self._save_results(all_results)
self.stdout.write(
self.style.SUCCESS('\\nBenchmark completed successfully!')
)
def _display_iteration_summary(self, result, duration):
"""Display summary for a single iteration."""
if 'overall_summary' in result:
summary = result['overall_summary']
self.stdout.write(f'\\nBenchmark Duration: {duration:.3f}s')
self.stdout.write(f'Total Operations: {summary["total_operations"]}')
self.stdout.write(f'Average Response Time: {summary["duration_stats"]["mean"]:.3f}s')
self.stdout.write(f'Average Query Count: {summary["query_stats"]["mean"]:.1f}')
self.stdout.write(f'Cache Hit Rate: {summary["cache_stats"]["hit_rate"]:.1f}%')
# Display slowest operations
if summary.get('slowest_operations'):
self.stdout.write('\\nSlowest Operations:')
for op in summary['slowest_operations'][:3]:
self.stdout.write(f' {op["operation"]}: {op["duration"]:.3f}s ({op["query_count"]} queries)')
# Display recommendations
if result.get('recommendations'):
self.stdout.write('\\nRecommendations:')
for rec in result['recommendations']:
self.stdout.write(f'{rec}')
# Display specific benchmark results
for benchmark_type in ['autocomplete', 'listing', 'pagination']:
if benchmark_type in result:
self._display_benchmark_results(benchmark_type, result[benchmark_type])
def _display_benchmark_results(self, benchmark_type, results):
"""Display results for a specific benchmark type."""
self.stdout.write(f'\\n{benchmark_type.title()} Benchmark Results:')
if benchmark_type == 'autocomplete':
for query_result in results.get('results', []):
self.stdout.write(
f' Query "{query_result["query"]}": {query_result["response_time"]:.3f}s '
f'({query_result["query_count"]} queries)'
)
elif benchmark_type == 'listing':
for scenario in results.get('results', []):
self.stdout.write(
f' {scenario["scenario"]}: {scenario["response_time"]:.3f}s '
f'({scenario["query_count"]} queries, {scenario["result_count"]} results)'
)
elif benchmark_type == 'pagination':
# Group by page size for cleaner display
by_page_size = {}
for result in results.get('results', []):
size = result['page_size']
if size not in by_page_size:
by_page_size[size] = []
by_page_size[size].append(result)
for page_size, page_results in by_page_size.items():
avg_time = sum(r['response_time'] for r in page_results) / len(page_results)
avg_queries = sum(r['query_count'] for r in page_results) / len(page_results)
self.stdout.write(
f' Page size {page_size}: avg {avg_time:.3f}s ({avg_queries:.1f} queries)'
)
def _display_overall_summary(self, all_results):
"""Display summary across all iterations."""
self.stdout.write('\\n' + '='*50)
self.stdout.write('OVERALL SUMMARY ACROSS ALL ITERATIONS')
self.stdout.write('='*50)
# Calculate averages across iterations
total_duration = sum(r['benchmark_duration'] for r in all_results)
# Extract performance metrics from iterations with overall_summary
overall_summaries = [r['overall_summary'] for r in all_results if 'overall_summary' in r]
if overall_summaries:
avg_response_time = sum(s['duration_stats']['mean'] for s in overall_summaries) / len(overall_summaries)
avg_query_count = sum(s['query_stats']['mean'] for s in overall_summaries) / len(overall_summaries)
avg_cache_hit_rate = sum(s['cache_stats']['hit_rate'] for s in overall_summaries) / len(overall_summaries)
self.stdout.write(f'Total Benchmark Time: {total_duration:.3f}s')
self.stdout.write(f'Average Response Time: {avg_response_time:.3f}s')
self.stdout.write(f'Average Query Count: {avg_query_count:.1f}')
self.stdout.write(f'Average Cache Hit Rate: {avg_cache_hit_rate:.1f}%')
def _save_results(self, results):
"""Save benchmark results to file."""
timestamp = timezone.now().strftime('%Y%m%d_%H%M%S')
filename = f'benchmark_results_{timestamp}.json'
try:
import os
# Ensure logs directory exists
logs_dir = 'logs'
os.makedirs(logs_dir, exist_ok=True)
filepath = os.path.join(logs_dir, filename)
with open(filepath, 'w') as f:
json.dump(results, f, indent=2, default=str)
self.stdout.write(
self.style.SUCCESS(f'Results saved to {filepath}')
)
except Exception as e:
self.stdout.write(
self.style.ERROR(f'Error saving results: {e}')
)