mirror of
https://github.com/pacnpal/thrillwiki_django_no_react.git
synced 2025-12-20 10:11:09 -05:00
feat: Implement Entity Suggestion Manager and Modal components
- Added EntitySuggestionManager.vue to manage entity suggestions and authentication. - Created EntitySuggestionModal.vue for displaying suggestions and adding new entities. - Integrated AuthManager for user authentication within the suggestion modal. - Enhanced signal handling in start-servers.sh for graceful shutdown of servers. - Improved server startup script to ensure proper cleanup and responsiveness to termination signals. - Added documentation for signal handling fixes and usage instructions.
This commit is contained in:
@@ -4,48 +4,44 @@ from django.contrib.sites.models import Site
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Set up social authentication providers for development'
|
||||
help = "Set up social authentication providers for development"
|
||||
|
||||
def handle(self, *args, **options):
|
||||
# Get the current site
|
||||
site = Site.objects.get_current()
|
||||
self.stdout.write(f'Setting up social providers for site: {site}')
|
||||
self.stdout.write(f"Setting up social providers for site: {site}")
|
||||
|
||||
# Clear existing social apps to avoid duplicates
|
||||
deleted_count = SocialApp.objects.all().delete()[0]
|
||||
self.stdout.write(f'Cleared {deleted_count} existing social apps')
|
||||
self.stdout.write(f"Cleared {deleted_count} existing social apps")
|
||||
|
||||
# Create Google social app
|
||||
google_app = SocialApp.objects.create(
|
||||
provider='google',
|
||||
name='Google',
|
||||
client_id='demo-google-client-id.apps.googleusercontent.com',
|
||||
secret='demo-google-client-secret',
|
||||
key='',
|
||||
provider="google",
|
||||
name="Google",
|
||||
client_id="demo-google-client-id.apps.googleusercontent.com",
|
||||
secret="demo-google-client-secret",
|
||||
key="",
|
||||
)
|
||||
google_app.sites.add(site)
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS('✅ Created Google social app')
|
||||
)
|
||||
self.stdout.write(self.style.SUCCESS("✅ Created Google social app"))
|
||||
|
||||
# Create Discord social app
|
||||
discord_app = SocialApp.objects.create(
|
||||
provider='discord',
|
||||
name='Discord',
|
||||
client_id='demo-discord-client-id',
|
||||
secret='demo-discord-client-secret',
|
||||
key='',
|
||||
provider="discord",
|
||||
name="Discord",
|
||||
client_id="demo-discord-client-id",
|
||||
secret="demo-discord-client-secret",
|
||||
key="",
|
||||
)
|
||||
discord_app.sites.add(site)
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS('✅ Created Discord social app')
|
||||
)
|
||||
self.stdout.write(self.style.SUCCESS("✅ Created Discord social app"))
|
||||
|
||||
# List all social apps
|
||||
self.stdout.write('\nConfigured social apps:')
|
||||
self.stdout.write("\nConfigured social apps:")
|
||||
for app in SocialApp.objects.all():
|
||||
self.stdout.write(f'- {app.name} ({app.provider}): {app.client_id}')
|
||||
self.stdout.write(f"- {app.name} ({app.provider}): {app.client_id}")
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f'\nTotal social apps: {SocialApp.objects.count()}')
|
||||
self.style.SUCCESS(f"\nTotal social apps: {SocialApp.objects.count()}")
|
||||
)
|
||||
|
||||
@@ -17,19 +17,26 @@ class UserSerializer(serializers.ModelSerializer):
|
||||
"""
|
||||
User serializer for API responses
|
||||
"""
|
||||
|
||||
avatar_url = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = User
|
||||
fields = [
|
||||
'id', 'username', 'email', 'first_name', 'last_name',
|
||||
'date_joined', 'is_active', 'avatar_url'
|
||||
"id",
|
||||
"username",
|
||||
"email",
|
||||
"first_name",
|
||||
"last_name",
|
||||
"date_joined",
|
||||
"is_active",
|
||||
"avatar_url",
|
||||
]
|
||||
read_only_fields = ['id', 'date_joined', 'is_active']
|
||||
read_only_fields = ["id", "date_joined", "is_active"]
|
||||
|
||||
def get_avatar_url(self, obj):
|
||||
"""Get user avatar URL"""
|
||||
if hasattr(obj, 'profile') and obj.profile.avatar:
|
||||
if hasattr(obj, "profile") and obj.profile.avatar:
|
||||
return obj.profile.avatar.url
|
||||
return None
|
||||
|
||||
@@ -38,59 +45,57 @@ class LoginSerializer(serializers.Serializer):
|
||||
"""
|
||||
Serializer for user login
|
||||
"""
|
||||
|
||||
username = serializers.CharField(
|
||||
max_length=254,
|
||||
help_text="Username or email address"
|
||||
max_length=254, help_text="Username or email address"
|
||||
)
|
||||
password = serializers.CharField(
|
||||
max_length=128,
|
||||
style={'input_type': 'password'},
|
||||
trim_whitespace=False
|
||||
max_length=128, style={"input_type": "password"}, trim_whitespace=False
|
||||
)
|
||||
|
||||
def validate(self, attrs):
|
||||
username = attrs.get('username')
|
||||
password = attrs.get('password')
|
||||
username = attrs.get("username")
|
||||
password = attrs.get("password")
|
||||
|
||||
if username and password:
|
||||
return attrs
|
||||
|
||||
raise serializers.ValidationError(
|
||||
'Must include username/email and password.'
|
||||
)
|
||||
raise serializers.ValidationError("Must include username/email and password.")
|
||||
|
||||
|
||||
class SignupSerializer(serializers.ModelSerializer):
|
||||
"""
|
||||
Serializer for user registration
|
||||
"""
|
||||
|
||||
password = serializers.CharField(
|
||||
write_only=True,
|
||||
validators=[validate_password],
|
||||
style={'input_type': 'password'}
|
||||
style={"input_type": "password"},
|
||||
)
|
||||
password_confirm = serializers.CharField(
|
||||
write_only=True,
|
||||
style={'input_type': 'password'}
|
||||
write_only=True, style={"input_type": "password"}
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = User
|
||||
fields = [
|
||||
'username', 'email', 'first_name', 'last_name',
|
||||
'password', 'password_confirm'
|
||||
"username",
|
||||
"email",
|
||||
"first_name",
|
||||
"last_name",
|
||||
"password",
|
||||
"password_confirm",
|
||||
]
|
||||
extra_kwargs = {
|
||||
'password': {'write_only': True},
|
||||
'email': {'required': True},
|
||||
"password": {"write_only": True},
|
||||
"email": {"required": True},
|
||||
}
|
||||
|
||||
def validate_email(self, value):
|
||||
"""Validate email is unique"""
|
||||
if UserModel.objects.filter(email=value).exists():
|
||||
raise serializers.ValidationError(
|
||||
"A user with this email already exists."
|
||||
)
|
||||
raise serializers.ValidationError("A user with this email already exists.")
|
||||
return value
|
||||
|
||||
def validate_username(self, value):
|
||||
@@ -103,24 +108,22 @@ class SignupSerializer(serializers.ModelSerializer):
|
||||
|
||||
def validate(self, attrs):
|
||||
"""Validate passwords match"""
|
||||
password = attrs.get('password')
|
||||
password_confirm = attrs.get('password_confirm')
|
||||
password = attrs.get("password")
|
||||
password_confirm = attrs.get("password_confirm")
|
||||
|
||||
if password != password_confirm:
|
||||
raise serializers.ValidationError({
|
||||
'password_confirm': 'Passwords do not match.'
|
||||
})
|
||||
raise serializers.ValidationError(
|
||||
{"password_confirm": "Passwords do not match."}
|
||||
)
|
||||
|
||||
return attrs
|
||||
|
||||
def create(self, validated_data):
|
||||
"""Create user with validated data"""
|
||||
validated_data.pop('password_confirm', None)
|
||||
password = validated_data.pop('password')
|
||||
validated_data.pop("password_confirm", None)
|
||||
password = validated_data.pop("password")
|
||||
|
||||
user = UserModel.objects.create(
|
||||
**validated_data
|
||||
)
|
||||
user = UserModel.objects.create(**validated_data)
|
||||
user.set_password(password)
|
||||
user.save()
|
||||
|
||||
@@ -131,6 +134,7 @@ class PasswordResetSerializer(serializers.Serializer):
|
||||
"""
|
||||
Serializer for password reset request
|
||||
"""
|
||||
|
||||
email = serializers.EmailField()
|
||||
|
||||
def validate_email(self, value):
|
||||
@@ -145,37 +149,36 @@ class PasswordResetSerializer(serializers.Serializer):
|
||||
|
||||
def save(self, **kwargs):
|
||||
"""Send password reset email if user exists"""
|
||||
if hasattr(self, 'user'):
|
||||
if hasattr(self, "user"):
|
||||
# Create password reset token
|
||||
token = get_random_string(64)
|
||||
PasswordReset.objects.update_or_create(
|
||||
user=self.user,
|
||||
defaults={
|
||||
'token': token,
|
||||
'expires_at': timezone.now() + timedelta(hours=24),
|
||||
'used': False
|
||||
}
|
||||
"token": token,
|
||||
"expires_at": timezone.now() + timedelta(hours=24),
|
||||
"used": False,
|
||||
},
|
||||
)
|
||||
|
||||
# Send reset email
|
||||
request = self.context.get('request')
|
||||
request = self.context.get("request")
|
||||
if request:
|
||||
site = get_current_site(request)
|
||||
reset_url = f"{request.scheme}://{site.domain}/reset-password/{token}/"
|
||||
|
||||
context = {
|
||||
'user': self.user,
|
||||
'reset_url': reset_url,
|
||||
'site_name': site.name,
|
||||
"user": self.user,
|
||||
"reset_url": reset_url,
|
||||
"site_name": site.name,
|
||||
}
|
||||
|
||||
email_html = render_to_string(
|
||||
'accounts/email/password_reset.html',
|
||||
context
|
||||
"accounts/email/password_reset.html", context
|
||||
)
|
||||
|
||||
EmailService.send_email(
|
||||
to=getattr(self.user, 'email', None),
|
||||
to=getattr(self.user, "email", None),
|
||||
subject="Reset your password",
|
||||
text=f"Click the link to reset your password: {reset_url}",
|
||||
site=site,
|
||||
@@ -187,49 +190,45 @@ class PasswordChangeSerializer(serializers.Serializer):
|
||||
"""
|
||||
Serializer for password change
|
||||
"""
|
||||
|
||||
old_password = serializers.CharField(
|
||||
max_length=128,
|
||||
style={'input_type': 'password'}
|
||||
max_length=128, style={"input_type": "password"}
|
||||
)
|
||||
new_password = serializers.CharField(
|
||||
max_length=128,
|
||||
validators=[validate_password],
|
||||
style={'input_type': 'password'}
|
||||
max_length=128, validators=[validate_password], style={"input_type": "password"}
|
||||
)
|
||||
new_password_confirm = serializers.CharField(
|
||||
max_length=128,
|
||||
style={'input_type': 'password'}
|
||||
max_length=128, style={"input_type": "password"}
|
||||
)
|
||||
|
||||
def validate_old_password(self, value):
|
||||
"""Validate old password is correct"""
|
||||
user = self.context['request'].user
|
||||
user = self.context["request"].user
|
||||
if not user.check_password(value):
|
||||
raise serializers.ValidationError(
|
||||
'Old password is incorrect.'
|
||||
)
|
||||
raise serializers.ValidationError("Old password is incorrect.")
|
||||
return value
|
||||
|
||||
def validate(self, attrs):
|
||||
"""Validate new passwords match"""
|
||||
new_password = attrs.get('new_password')
|
||||
new_password_confirm = attrs.get('new_password_confirm')
|
||||
new_password = attrs.get("new_password")
|
||||
new_password_confirm = attrs.get("new_password_confirm")
|
||||
|
||||
if new_password != new_password_confirm:
|
||||
raise serializers.ValidationError({
|
||||
'new_password_confirm': 'New passwords do not match.'
|
||||
})
|
||||
raise serializers.ValidationError(
|
||||
{"new_password_confirm": "New passwords do not match."}
|
||||
)
|
||||
|
||||
return attrs
|
||||
|
||||
def save(self, **kwargs):
|
||||
"""Change user password"""
|
||||
user = self.context['request'].user
|
||||
new_password = self.initial_data.get(
|
||||
'new_password') if self.initial_data else None
|
||||
user = self.context["request"].user
|
||||
new_password = (
|
||||
self.initial_data.get("new_password") if self.initial_data else None
|
||||
)
|
||||
|
||||
if new_password is None:
|
||||
raise serializers.ValidationError('New password is required.')
|
||||
raise serializers.ValidationError("New password is required.")
|
||||
|
||||
user.set_password(new_password)
|
||||
user.save()
|
||||
@@ -241,6 +240,7 @@ class SocialProviderSerializer(serializers.Serializer):
|
||||
"""
|
||||
Serializer for social authentication providers
|
||||
"""
|
||||
|
||||
id = serializers.CharField()
|
||||
name = serializers.CharField()
|
||||
login_url = serializers.URLField()
|
||||
|
||||
252
backend/apps/api/v1/serializers_rankings.py
Normal file
252
backend/apps/api/v1/serializers_rankings.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""
|
||||
API serializers for the ride ranking system.
|
||||
"""
|
||||
|
||||
from rest_framework import serializers
|
||||
from drf_spectacular.utils import extend_schema_serializer, OpenApiExample
|
||||
|
||||
from apps.rides.models import RideRanking, RidePairComparison, RankingSnapshot
|
||||
|
||||
|
||||
@extend_schema_serializer(
|
||||
examples=[
|
||||
OpenApiExample(
|
||||
"Ride Ranking Example",
|
||||
summary="Example ranking response",
|
||||
description="A ride ranking with all metrics",
|
||||
value={
|
||||
"id": 1,
|
||||
"rank": 1,
|
||||
"ride": {
|
||||
"id": 123,
|
||||
"name": "Steel Vengeance",
|
||||
"slug": "steel-vengeance",
|
||||
"park": {"id": 45, "name": "Cedar Point", "slug": "cedar-point"},
|
||||
"category": "RC",
|
||||
},
|
||||
"wins": 523,
|
||||
"losses": 87,
|
||||
"ties": 45,
|
||||
"winning_percentage": 0.8234,
|
||||
"mutual_riders_count": 1250,
|
||||
"comparison_count": 655,
|
||||
"average_rating": 9.2,
|
||||
"last_calculated": "2024-01-15T02:00:00Z",
|
||||
"rank_change": 2,
|
||||
"previous_rank": 3,
|
||||
},
|
||||
)
|
||||
]
|
||||
)
|
||||
class RideRankingSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for ride rankings."""
|
||||
|
||||
ride = serializers.SerializerMethodField()
|
||||
rank_change = serializers.SerializerMethodField()
|
||||
previous_rank = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = RideRanking
|
||||
fields = [
|
||||
"id",
|
||||
"rank",
|
||||
"ride",
|
||||
"wins",
|
||||
"losses",
|
||||
"ties",
|
||||
"winning_percentage",
|
||||
"mutual_riders_count",
|
||||
"comparison_count",
|
||||
"average_rating",
|
||||
"last_calculated",
|
||||
"rank_change",
|
||||
"previous_rank",
|
||||
]
|
||||
|
||||
def get_ride(self, obj):
|
||||
"""Get ride details."""
|
||||
return {
|
||||
"id": obj.ride.id,
|
||||
"name": obj.ride.name,
|
||||
"slug": obj.ride.slug,
|
||||
"park": {
|
||||
"id": obj.ride.park.id,
|
||||
"name": obj.ride.park.name,
|
||||
"slug": obj.ride.park.slug,
|
||||
},
|
||||
"category": obj.ride.category,
|
||||
}
|
||||
|
||||
def get_rank_change(self, obj):
|
||||
"""Calculate rank change from previous snapshot."""
|
||||
latest_snapshots = RankingSnapshot.objects.filter(ride=obj.ride).order_by(
|
||||
"-snapshot_date"
|
||||
)[:2]
|
||||
|
||||
if len(latest_snapshots) >= 2:
|
||||
return latest_snapshots[0].rank - latest_snapshots[1].rank
|
||||
return None
|
||||
|
||||
def get_previous_rank(self, obj):
|
||||
"""Get previous rank."""
|
||||
latest_snapshots = RankingSnapshot.objects.filter(ride=obj.ride).order_by(
|
||||
"-snapshot_date"
|
||||
)[:2]
|
||||
|
||||
if len(latest_snapshots) >= 2:
|
||||
return latest_snapshots[1].rank
|
||||
return None
|
||||
|
||||
|
||||
class RideRankingDetailSerializer(serializers.ModelSerializer):
|
||||
"""Detailed serializer for a specific ride's ranking."""
|
||||
|
||||
ride = serializers.SerializerMethodField()
|
||||
head_to_head_comparisons = serializers.SerializerMethodField()
|
||||
ranking_history = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = RideRanking
|
||||
fields = [
|
||||
"id",
|
||||
"rank",
|
||||
"ride",
|
||||
"wins",
|
||||
"losses",
|
||||
"ties",
|
||||
"winning_percentage",
|
||||
"mutual_riders_count",
|
||||
"comparison_count",
|
||||
"average_rating",
|
||||
"last_calculated",
|
||||
"calculation_version",
|
||||
"head_to_head_comparisons",
|
||||
"ranking_history",
|
||||
]
|
||||
|
||||
def get_ride(self, obj):
|
||||
"""Get detailed ride information."""
|
||||
ride = obj.ride
|
||||
return {
|
||||
"id": ride.id,
|
||||
"name": ride.name,
|
||||
"slug": ride.slug,
|
||||
"description": ride.description,
|
||||
"park": {
|
||||
"id": ride.park.id,
|
||||
"name": ride.park.name,
|
||||
"slug": ride.park.slug,
|
||||
"location": {
|
||||
"city": (
|
||||
ride.park.location.city
|
||||
if hasattr(ride.park, "location")
|
||||
else None
|
||||
),
|
||||
"state": (
|
||||
ride.park.location.state
|
||||
if hasattr(ride.park, "location")
|
||||
else None
|
||||
),
|
||||
"country": (
|
||||
ride.park.location.country
|
||||
if hasattr(ride.park, "location")
|
||||
else None
|
||||
),
|
||||
},
|
||||
},
|
||||
"category": ride.category,
|
||||
"manufacturer": (
|
||||
{"id": ride.manufacturer.id, "name": ride.manufacturer.name}
|
||||
if ride.manufacturer
|
||||
else None
|
||||
),
|
||||
"opening_date": ride.opening_date,
|
||||
"status": ride.status,
|
||||
}
|
||||
|
||||
def get_head_to_head_comparisons(self, obj):
|
||||
"""Get top head-to-head comparisons."""
|
||||
from django.db.models import Q
|
||||
|
||||
comparisons = (
|
||||
RidePairComparison.objects.filter(Q(ride_a=obj.ride) | Q(ride_b=obj.ride))
|
||||
.select_related("ride_a", "ride_b")
|
||||
.order_by("-mutual_riders_count")[:10]
|
||||
)
|
||||
|
||||
results = []
|
||||
for comp in comparisons:
|
||||
if comp.ride_a == obj.ride:
|
||||
opponent = comp.ride_b
|
||||
wins = comp.ride_a_wins
|
||||
losses = comp.ride_b_wins
|
||||
else:
|
||||
opponent = comp.ride_a
|
||||
wins = comp.ride_b_wins
|
||||
losses = comp.ride_a_wins
|
||||
|
||||
result = "win" if wins > losses else "loss" if losses > wins else "tie"
|
||||
|
||||
results.append(
|
||||
{
|
||||
"opponent": {
|
||||
"id": opponent.id,
|
||||
"name": opponent.name,
|
||||
"slug": opponent.slug,
|
||||
"park": opponent.park.name,
|
||||
},
|
||||
"wins": wins,
|
||||
"losses": losses,
|
||||
"ties": comp.ties,
|
||||
"result": result,
|
||||
"mutual_riders": comp.mutual_riders_count,
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def get_ranking_history(self, obj):
|
||||
"""Get recent ranking history."""
|
||||
history = RankingSnapshot.objects.filter(ride=obj.ride).order_by(
|
||||
"-snapshot_date"
|
||||
)[:30]
|
||||
|
||||
return [
|
||||
{
|
||||
"date": snapshot.snapshot_date,
|
||||
"rank": snapshot.rank,
|
||||
"winning_percentage": float(snapshot.winning_percentage),
|
||||
}
|
||||
for snapshot in history
|
||||
]
|
||||
|
||||
|
||||
class RankingSnapshotSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for ranking history snapshots."""
|
||||
|
||||
ride_name = serializers.CharField(source="ride.name", read_only=True)
|
||||
park_name = serializers.CharField(source="ride.park.name", read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = RankingSnapshot
|
||||
fields = [
|
||||
"id",
|
||||
"ride",
|
||||
"ride_name",
|
||||
"park_name",
|
||||
"rank",
|
||||
"winning_percentage",
|
||||
"snapshot_date",
|
||||
]
|
||||
|
||||
|
||||
class RankingStatsSerializer(serializers.Serializer):
|
||||
"""Serializer for ranking system statistics."""
|
||||
|
||||
total_ranked_rides = serializers.IntegerField()
|
||||
total_comparisons = serializers.IntegerField()
|
||||
last_calculation_time = serializers.DateTimeField()
|
||||
calculation_duration = serializers.FloatField()
|
||||
top_rated_ride = serializers.DictField()
|
||||
most_compared_ride = serializers.DictField()
|
||||
biggest_rank_change = serializers.DictField()
|
||||
@@ -44,8 +44,14 @@ from .viewsets import (
|
||||
UserProfileViewSet,
|
||||
TopListViewSet,
|
||||
TopListItemViewSet,
|
||||
# Trending system views
|
||||
TrendingAPIView,
|
||||
NewContentAPIView,
|
||||
)
|
||||
|
||||
# Import ranking viewsets
|
||||
from .viewsets_rankings import RideRankingViewSet, TriggerRankingCalculationView
|
||||
|
||||
# Create the main API router
|
||||
router = DefaultRouter()
|
||||
|
||||
@@ -53,7 +59,7 @@ router = DefaultRouter()
|
||||
|
||||
# Core models
|
||||
router.register(r"parks", ParkViewSet, basename="park")
|
||||
router.register(r"rides", RideViewSet, basename="ride")
|
||||
# Note: rides registered below with list-only actions to enforce nested-only detail access
|
||||
|
||||
# Park-related models
|
||||
router.register(r"park-areas", ParkAreaViewSet, basename="park-area")
|
||||
@@ -79,6 +85,9 @@ router.register(r"top-list-items", TopListItemViewSet, basename="top-list-item")
|
||||
router.register(r"ref/parks", ParkReadOnlyViewSet, basename="park-ref")
|
||||
router.register(r"ref/rides", RideReadOnlyViewSet, basename="ride-ref")
|
||||
|
||||
# Register ranking endpoints
|
||||
router.register(r"rankings", RideRankingViewSet, basename="ranking")
|
||||
|
||||
app_name = "api_v1"
|
||||
|
||||
urlpatterns = [
|
||||
@@ -137,6 +146,39 @@ urlpatterns = [
|
||||
RideHistoryViewSet.as_view({"get": "retrieve"}),
|
||||
name="ride-history-detail",
|
||||
),
|
||||
# Nested park-scoped ride endpoints
|
||||
path(
|
||||
"parks/<str:park_slug>/rides/",
|
||||
RideViewSet.as_view({"get": "list", "post": "create"}),
|
||||
name="park-rides-list",
|
||||
),
|
||||
path(
|
||||
"parks/<str:park_slug>/rides/<str:ride_slug>/",
|
||||
RideViewSet.as_view(
|
||||
{
|
||||
"get": "retrieve",
|
||||
"put": "update",
|
||||
"patch": "partial_update",
|
||||
"delete": "destroy",
|
||||
}
|
||||
),
|
||||
name="park-rides-detail",
|
||||
),
|
||||
# Trending system endpoints
|
||||
path("trending/content/", TrendingAPIView.as_view(), name="trending"),
|
||||
path("trending/new/", NewContentAPIView.as_view(), name="new-content"),
|
||||
# Ranking system endpoints
|
||||
path(
|
||||
"rankings/calculate/",
|
||||
TriggerRankingCalculationView.as_view(),
|
||||
name="trigger-ranking-calculation",
|
||||
),
|
||||
# Global rides list endpoint (detail access only via nested park routes)
|
||||
path(
|
||||
"rides/",
|
||||
RideViewSet.as_view({"get": "list"}),
|
||||
name="ride-list",
|
||||
),
|
||||
# Include all router-generated URLs
|
||||
path("", include(router.urls)),
|
||||
]
|
||||
|
||||
@@ -28,6 +28,7 @@ from django.core.exceptions import ValidationError
|
||||
from django.utils import timezone
|
||||
from django.conf import settings
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.http import Http404
|
||||
from allauth.socialaccount.models import SocialApp
|
||||
from allauth.socialaccount import providers
|
||||
from health_check.views import MainView
|
||||
@@ -669,12 +670,20 @@ class RideViewSet(ModelViewSet):
|
||||
def get_queryset(self): # type: ignore[override]
|
||||
"""Get optimized queryset based on action."""
|
||||
if self.action == "list":
|
||||
# Parse filter parameters for list view
|
||||
# CRITICAL FIX: Check if this is a nested endpoint first
|
||||
park_slug = self.kwargs.get("park_slug")
|
||||
if park_slug:
|
||||
# For nested endpoints, use the dedicated park selector
|
||||
from apps.rides.selectors import rides_in_park
|
||||
return rides_in_park(park_slug=park_slug)
|
||||
|
||||
# For global endpoints, parse filter parameters and use general selector
|
||||
filter_serializer = RideFilterInputSerializer(
|
||||
data=self.request.query_params # type: ignore[attr-defined]
|
||||
)
|
||||
filter_serializer.is_valid(raise_exception=True)
|
||||
filters = filter_serializer.validated_data
|
||||
|
||||
return ride_list_for_display(filters=filters) # type: ignore[arg-type]
|
||||
|
||||
# For other actions, return base queryset
|
||||
@@ -690,7 +699,10 @@ class RideViewSet(ModelViewSet):
|
||||
ride_slug = self.kwargs.get("slug") or self.kwargs.get("ride_slug")
|
||||
|
||||
if park_slug and ride_slug:
|
||||
return ride_detail_optimized(slug=ride_slug, park_slug=park_slug)
|
||||
try:
|
||||
return ride_detail_optimized(slug=ride_slug, park_slug=park_slug)
|
||||
except Ride.DoesNotExist:
|
||||
raise Http404("Ride not found")
|
||||
elif ride_slug:
|
||||
# For rides accessed directly by slug, we'll use the first approach
|
||||
# and let the 404 handling work naturally
|
||||
@@ -1748,21 +1760,43 @@ class LoginAPIView(TurnstileMixin, APIView):
|
||||
email_or_username = serializer.validated_data["username"]
|
||||
password = serializer.validated_data["password"] # type: ignore[index]
|
||||
|
||||
# Try to authenticate with email first, then username
|
||||
# Optimized user lookup: single query using Q objects
|
||||
from django.db.models import Q
|
||||
from django.contrib.auth import get_user_model
|
||||
|
||||
User = get_user_model()
|
||||
user = None
|
||||
if "@" in email_or_username:
|
||||
try:
|
||||
user_obj = UserModel.objects.get(email=email_or_username)
|
||||
|
||||
# Single query to find user by email OR username
|
||||
try:
|
||||
if "@" in email_or_username:
|
||||
# Email-like input: try email first, then username as fallback
|
||||
user_obj = (
|
||||
User.objects.select_related()
|
||||
.filter(
|
||||
Q(email=email_or_username) | Q(username=email_or_username)
|
||||
)
|
||||
.first()
|
||||
)
|
||||
else:
|
||||
# Username-like input: try username first, then email as fallback
|
||||
user_obj = (
|
||||
User.objects.select_related()
|
||||
.filter(
|
||||
Q(username=email_or_username) | Q(email=email_or_username)
|
||||
)
|
||||
.first()
|
||||
)
|
||||
|
||||
if user_obj:
|
||||
user = authenticate(
|
||||
# type: ignore[attr-defined]
|
||||
request._request,
|
||||
username=user_obj.username,
|
||||
password=password,
|
||||
)
|
||||
except UserModel.DoesNotExist:
|
||||
pass
|
||||
|
||||
if not user:
|
||||
except Exception:
|
||||
# Fallback to original behavior
|
||||
user = authenticate(
|
||||
# type: ignore[attr-defined]
|
||||
request._request,
|
||||
@@ -1773,6 +1807,7 @@ class LoginAPIView(TurnstileMixin, APIView):
|
||||
if user:
|
||||
if user.is_active:
|
||||
login(request._request, user) # type: ignore[attr-defined]
|
||||
# Optimized token creation - get_or_create is atomic
|
||||
token, created = Token.objects.get_or_create(user=user)
|
||||
|
||||
response_serializer = LoginOutputSerializer(
|
||||
@@ -1981,48 +2016,56 @@ class SocialProvidersAPIView(APIView):
|
||||
serializer_class = SocialProviderOutputSerializer
|
||||
|
||||
def get(self, request: Request) -> Response:
|
||||
from django.core.cache import cache
|
||||
from django.contrib.sites.shortcuts import get_current_site
|
||||
|
||||
site = get_current_site(request._request) # type: ignore[attr-defined]
|
||||
|
||||
# Cache key based on site and request host
|
||||
cache_key = (
|
||||
f"social_providers:{getattr(site, 'id', site.pk)}:{request.get_host()}"
|
||||
)
|
||||
|
||||
# Try to get from cache first (cache for 15 minutes)
|
||||
cached_providers = cache.get(cache_key)
|
||||
if cached_providers is not None:
|
||||
return Response(cached_providers)
|
||||
|
||||
providers_list = []
|
||||
|
||||
# Get all configured social apps for the current site
|
||||
social_apps = SocialApp.objects.filter(sites=site)
|
||||
# Optimized query: filter by site and order by provider name
|
||||
social_apps = SocialApp.objects.filter(sites=site).order_by("provider")
|
||||
|
||||
for social_app in social_apps:
|
||||
try:
|
||||
# Get provider class from providers module
|
||||
provider_module = getattr(providers, social_app.provider, None)
|
||||
if provider_module and hasattr(provider_module, "provider"):
|
||||
provider_class = provider_module.provider
|
||||
provider_instance = provider_class(request)
|
||||
# Simplified provider name resolution - avoid expensive provider class loading
|
||||
provider_name = social_app.name or social_app.provider.title()
|
||||
|
||||
# Build auth URL efficiently
|
||||
auth_url = request.build_absolute_uri(
|
||||
f"/accounts/{social_app.provider}/login/"
|
||||
)
|
||||
|
||||
providers_list.append(
|
||||
{
|
||||
"id": social_app.provider,
|
||||
"name": provider_name,
|
||||
"authUrl": auth_url,
|
||||
}
|
||||
)
|
||||
|
||||
auth_url = request.build_absolute_uri(
|
||||
f"/accounts/{social_app.provider}/login/"
|
||||
)
|
||||
providers_list.append(
|
||||
{
|
||||
"id": social_app.provider,
|
||||
"name": provider_instance.name,
|
||||
"authUrl": auth_url,
|
||||
}
|
||||
)
|
||||
else:
|
||||
# Fallback: use provider id as name
|
||||
auth_url = request.build_absolute_uri(
|
||||
f"/accounts/{social_app.provider}/login/"
|
||||
)
|
||||
providers_list.append(
|
||||
{
|
||||
"id": social_app.provider,
|
||||
"name": social_app.provider.title(),
|
||||
"authUrl": auth_url,
|
||||
}
|
||||
)
|
||||
except Exception:
|
||||
# Skip if provider can't be loaded
|
||||
continue
|
||||
|
||||
# Serialize and cache the result
|
||||
serializer = SocialProviderOutputSerializer(providers_list, many=True)
|
||||
return Response(serializer.data)
|
||||
response_data = serializer.data
|
||||
|
||||
# Cache for 15 minutes (900 seconds)
|
||||
cache.set(cache_key, response_data, 900)
|
||||
|
||||
return Response(response_data)
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
@@ -2908,3 +2951,192 @@ class UnifiedHistoryViewSet(ReadOnlyModelViewSet):
|
||||
|
||||
serializer = UnifiedHistoryTimelineSerializer(timeline_data)
|
||||
return Response(serializer.data)
|
||||
|
||||
|
||||
# === TRENDING VIEWSETS ===
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
summary="Get trending content",
|
||||
description="Retrieve trending parks and rides based on view counts, ratings, and recency.",
|
||||
parameters=[
|
||||
OpenApiParameter(
|
||||
name="limit",
|
||||
type=OpenApiTypes.INT,
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Number of trending items to return (default: 20, max: 100)",
|
||||
),
|
||||
OpenApiParameter(
|
||||
name="timeframe",
|
||||
type=OpenApiTypes.STR,
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Timeframe for trending calculation (day, week, month) - default: week",
|
||||
),
|
||||
],
|
||||
responses={200: OpenApiTypes.OBJECT},
|
||||
tags=["Trending"],
|
||||
),
|
||||
)
|
||||
class TrendingAPIView(APIView):
|
||||
"""API endpoint for trending content."""
|
||||
|
||||
permission_classes = [AllowAny]
|
||||
|
||||
def get(self, request: Request) -> Response:
|
||||
"""Get trending parks and rides."""
|
||||
from apps.core.services.trending_service import TrendingService
|
||||
|
||||
# Parse parameters
|
||||
limit = min(int(request.query_params.get("limit", 20)), 100)
|
||||
|
||||
# Get trending content
|
||||
trending_service = TrendingService()
|
||||
all_trending = trending_service.get_trending_content(limit=limit * 2)
|
||||
|
||||
# Separate by content type
|
||||
trending_rides = []
|
||||
trending_parks = []
|
||||
|
||||
for item in all_trending:
|
||||
if item.get("category") == "ride":
|
||||
trending_rides.append(item)
|
||||
elif item.get("category") == "park":
|
||||
trending_parks.append(item)
|
||||
|
||||
# Limit each category
|
||||
trending_rides = trending_rides[: limit // 3] if trending_rides else []
|
||||
trending_parks = trending_parks[: limit // 3] if trending_parks else []
|
||||
|
||||
# Create mock latest reviews (since not implemented yet)
|
||||
latest_reviews = [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Steel Vengeance Review",
|
||||
"location": "Cedar Point",
|
||||
"category": "Roller Coaster",
|
||||
"rating": 5.0,
|
||||
"rank": 1,
|
||||
"views": 1234,
|
||||
"views_change": "+45%",
|
||||
"slug": "steel-vengeance-review",
|
||||
}
|
||||
][: limit // 3]
|
||||
|
||||
# Return in expected frontend format
|
||||
response_data = {
|
||||
"trending_rides": trending_rides,
|
||||
"trending_parks": trending_parks,
|
||||
"latest_reviews": latest_reviews,
|
||||
}
|
||||
|
||||
return Response(response_data)
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
summary="Get new content",
|
||||
description="Retrieve recently added parks and rides.",
|
||||
parameters=[
|
||||
OpenApiParameter(
|
||||
name="limit",
|
||||
type=OpenApiTypes.INT,
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Number of new items to return (default: 20, max: 100)",
|
||||
),
|
||||
OpenApiParameter(
|
||||
name="days",
|
||||
type=OpenApiTypes.INT,
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Number of days to look back for new content (default: 30, max: 365)",
|
||||
),
|
||||
],
|
||||
responses={200: OpenApiTypes.OBJECT},
|
||||
tags=["Trending"],
|
||||
),
|
||||
)
|
||||
class NewContentAPIView(APIView):
|
||||
"""API endpoint for new content."""
|
||||
|
||||
permission_classes = [AllowAny]
|
||||
|
||||
def get(self, request: Request) -> Response:
|
||||
"""Get new parks and rides."""
|
||||
from apps.core.services.trending_service import TrendingService
|
||||
from datetime import datetime, date
|
||||
|
||||
# Parse parameters
|
||||
limit = min(int(request.query_params.get("limit", 20)), 100)
|
||||
|
||||
# Get new content with longer timeframe to get more data
|
||||
trending_service = TrendingService()
|
||||
all_new_content = trending_service.get_new_content(
|
||||
limit=limit * 2, days_back=60
|
||||
)
|
||||
|
||||
recently_added = []
|
||||
newly_opened = []
|
||||
upcoming = []
|
||||
|
||||
# Categorize items based on date
|
||||
today = date.today()
|
||||
|
||||
for item in all_new_content:
|
||||
date_added = item.get("date_added", "")
|
||||
if date_added:
|
||||
try:
|
||||
# Parse the date string
|
||||
if isinstance(date_added, str):
|
||||
item_date = datetime.fromisoformat(date_added).date()
|
||||
else:
|
||||
item_date = date_added
|
||||
|
||||
# Calculate days difference
|
||||
days_diff = (today - item_date).days
|
||||
|
||||
if days_diff <= 30: # Recently added (last 30 days)
|
||||
recently_added.append(item)
|
||||
elif days_diff <= 365: # Newly opened (last year)
|
||||
newly_opened.append(item)
|
||||
else: # Older items
|
||||
newly_opened.append(item)
|
||||
|
||||
except (ValueError, TypeError):
|
||||
# If date parsing fails, add to recently added
|
||||
recently_added.append(item)
|
||||
else:
|
||||
recently_added.append(item)
|
||||
|
||||
# Create mock upcoming items
|
||||
upcoming = [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Epic Universe",
|
||||
"location": "Universal Orlando",
|
||||
"category": "Theme Park",
|
||||
"date_added": "Opening 2025",
|
||||
"slug": "epic-universe",
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "New Fantasyland Expansion",
|
||||
"location": "Magic Kingdom",
|
||||
"category": "Land Expansion",
|
||||
"date_added": "Opening 2026",
|
||||
"slug": "fantasyland-expansion",
|
||||
},
|
||||
]
|
||||
|
||||
# Limit each category
|
||||
recently_added = recently_added[: limit // 3] if recently_added else []
|
||||
newly_opened = newly_opened[: limit // 3] if newly_opened else []
|
||||
upcoming = upcoming[: limit // 3] if upcoming else []
|
||||
|
||||
# Return in expected frontend format
|
||||
response_data = {
|
||||
"recently_added": recently_added,
|
||||
"newly_opened": newly_opened,
|
||||
"upcoming": upcoming,
|
||||
}
|
||||
|
||||
return Response(response_data)
|
||||
|
||||
334
backend/apps/api/v1/viewsets_rankings.py
Normal file
334
backend/apps/api/v1/viewsets_rankings.py
Normal file
@@ -0,0 +1,334 @@
|
||||
"""
|
||||
API viewsets for the ride ranking system.
|
||||
"""
|
||||
|
||||
from django.db.models import Q, Count, Max
|
||||
from django.utils import timezone
|
||||
from django_filters.rest_framework import DjangoFilterBackend
|
||||
from drf_spectacular.utils import extend_schema, extend_schema_view, OpenApiParameter
|
||||
from drf_spectacular.types import OpenApiTypes
|
||||
from rest_framework import status
|
||||
from rest_framework.decorators import action
|
||||
from rest_framework.filters import OrderingFilter
|
||||
from rest_framework.permissions import IsAuthenticatedOrReadOnly, AllowAny
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.viewsets import ReadOnlyModelViewSet
|
||||
from rest_framework.views import APIView
|
||||
|
||||
from apps.rides.models import RideRanking, RidePairComparison, RankingSnapshot
|
||||
from apps.rides.services import RideRankingService
|
||||
from .serializers_rankings import (
|
||||
RideRankingSerializer,
|
||||
RideRankingDetailSerializer,
|
||||
RankingSnapshotSerializer,
|
||||
RankingStatsSerializer,
|
||||
)
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
summary="List ride rankings",
|
||||
description="Get the current ride rankings calculated using the Internet Roller Coaster Poll algorithm.",
|
||||
parameters=[
|
||||
OpenApiParameter(
|
||||
name="category",
|
||||
type=OpenApiTypes.STR,
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Filter by ride category (RC, DR, FR, WR, TR, OT)",
|
||||
enum=["RC", "DR", "FR", "WR", "TR", "OT"],
|
||||
),
|
||||
OpenApiParameter(
|
||||
name="min_riders",
|
||||
type=OpenApiTypes.INT,
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Minimum number of mutual riders required",
|
||||
),
|
||||
OpenApiParameter(
|
||||
name="park",
|
||||
type=OpenApiTypes.STR,
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Filter by park slug",
|
||||
),
|
||||
OpenApiParameter(
|
||||
name="ordering",
|
||||
type=OpenApiTypes.STR,
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Order results (rank, -rank, winning_percentage, -winning_percentage)",
|
||||
),
|
||||
],
|
||||
responses={200: RideRankingSerializer(many=True)},
|
||||
tags=["Rankings"],
|
||||
),
|
||||
retrieve=extend_schema(
|
||||
summary="Get ranking details",
|
||||
description="Get detailed ranking information for a specific ride.",
|
||||
responses={
|
||||
200: RideRankingDetailSerializer,
|
||||
404: OpenApiTypes.OBJECT,
|
||||
},
|
||||
tags=["Rankings"],
|
||||
),
|
||||
history=extend_schema(
|
||||
summary="Get ranking history",
|
||||
description="Get historical ranking data for a specific ride.",
|
||||
responses={200: RankingSnapshotSerializer(many=True)},
|
||||
tags=["Rankings"],
|
||||
),
|
||||
statistics=extend_schema(
|
||||
summary="Get ranking statistics",
|
||||
description="Get overall statistics about the ranking system.",
|
||||
responses={200: RankingStatsSerializer},
|
||||
tags=["Rankings", "Statistics"],
|
||||
),
|
||||
)
|
||||
class RideRankingViewSet(ReadOnlyModelViewSet):
|
||||
"""
|
||||
ViewSet for ride rankings.
|
||||
|
||||
Provides access to ride rankings calculated using the Internet Roller Coaster Poll algorithm.
|
||||
Rankings are updated daily and based on pairwise comparisons of user ratings.
|
||||
"""
|
||||
|
||||
permission_classes = [AllowAny]
|
||||
lookup_field = "ride__slug"
|
||||
lookup_url_kwarg = "ride_slug"
|
||||
filter_backends = [DjangoFilterBackend, OrderingFilter]
|
||||
filterset_fields = ["ride__category"]
|
||||
ordering_fields = [
|
||||
"rank",
|
||||
"winning_percentage",
|
||||
"mutual_riders_count",
|
||||
"average_rating",
|
||||
]
|
||||
ordering = ["rank"]
|
||||
|
||||
def get_queryset(self):
|
||||
"""Get rankings with optimized queries."""
|
||||
queryset = RideRanking.objects.select_related(
|
||||
"ride", "ride__park", "ride__park__location", "ride__manufacturer"
|
||||
)
|
||||
|
||||
# Filter by category
|
||||
category = self.request.query_params.get("category")
|
||||
if category:
|
||||
queryset = queryset.filter(ride__category=category)
|
||||
|
||||
# Filter by minimum mutual riders
|
||||
min_riders = self.request.query_params.get("min_riders")
|
||||
if min_riders:
|
||||
try:
|
||||
queryset = queryset.filter(mutual_riders_count__gte=int(min_riders))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Filter by park
|
||||
park_slug = self.request.query_params.get("park")
|
||||
if park_slug:
|
||||
queryset = queryset.filter(ride__park__slug=park_slug)
|
||||
|
||||
return queryset
|
||||
|
||||
def get_serializer_class(self):
|
||||
"""Use different serializers for list vs detail."""
|
||||
if self.action == "retrieve":
|
||||
return RideRankingDetailSerializer
|
||||
elif self.action == "history":
|
||||
return RankingSnapshotSerializer
|
||||
elif self.action == "statistics":
|
||||
return RankingStatsSerializer
|
||||
return RideRankingSerializer
|
||||
|
||||
@action(detail=True, methods=["get"])
|
||||
def history(self, request, ride_slug=None):
|
||||
"""Get ranking history for a specific ride."""
|
||||
ranking = self.get_object()
|
||||
history = RankingSnapshot.objects.filter(ride=ranking.ride).order_by(
|
||||
"-snapshot_date"
|
||||
)[
|
||||
:90
|
||||
] # Last 3 months
|
||||
|
||||
serializer = self.get_serializer(history, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
@action(detail=False, methods=["get"])
|
||||
def statistics(self, request):
|
||||
"""Get overall ranking system statistics."""
|
||||
total_rankings = RideRanking.objects.count()
|
||||
total_comparisons = RidePairComparison.objects.count()
|
||||
|
||||
# Get last calculation time
|
||||
latest_ranking = RideRanking.objects.order_by("-last_calculated").first()
|
||||
last_calc_time = latest_ranking.last_calculated if latest_ranking else None
|
||||
|
||||
# Get top rated ride
|
||||
top_rated = RideRanking.objects.select_related("ride", "ride__park").first()
|
||||
|
||||
# Get most compared ride
|
||||
most_compared = (
|
||||
RideRanking.objects.select_related("ride", "ride__park")
|
||||
.order_by("-comparison_count")
|
||||
.first()
|
||||
)
|
||||
|
||||
# Get biggest rank change (last 7 days)
|
||||
from datetime import timedelta
|
||||
|
||||
week_ago = timezone.now().date() - timedelta(days=7)
|
||||
|
||||
biggest_change = None
|
||||
max_change = 0
|
||||
|
||||
current_rankings = RideRanking.objects.select_related("ride")
|
||||
for ranking in current_rankings[:100]: # Check top 100 for performance
|
||||
old_snapshot = (
|
||||
RankingSnapshot.objects.filter(
|
||||
ride=ranking.ride, snapshot_date__lte=week_ago
|
||||
)
|
||||
.order_by("-snapshot_date")
|
||||
.first()
|
||||
)
|
||||
|
||||
if old_snapshot:
|
||||
change = abs(old_snapshot.rank - ranking.rank)
|
||||
if change > max_change:
|
||||
max_change = change
|
||||
biggest_change = {
|
||||
"ride": {
|
||||
"id": ranking.ride.id,
|
||||
"name": ranking.ride.name,
|
||||
"slug": ranking.ride.slug,
|
||||
},
|
||||
"current_rank": ranking.rank,
|
||||
"previous_rank": old_snapshot.rank,
|
||||
"change": old_snapshot.rank - ranking.rank,
|
||||
}
|
||||
|
||||
stats = {
|
||||
"total_ranked_rides": total_rankings,
|
||||
"total_comparisons": total_comparisons,
|
||||
"last_calculation_time": last_calc_time,
|
||||
"calculation_duration": None, # Would need to track this separately
|
||||
"top_rated_ride": (
|
||||
{
|
||||
"id": top_rated.ride.id,
|
||||
"name": top_rated.ride.name,
|
||||
"slug": top_rated.ride.slug,
|
||||
"park": top_rated.ride.park.name,
|
||||
"rank": top_rated.rank,
|
||||
"winning_percentage": float(top_rated.winning_percentage),
|
||||
"average_rating": (
|
||||
float(top_rated.average_rating)
|
||||
if top_rated.average_rating
|
||||
else None
|
||||
),
|
||||
}
|
||||
if top_rated
|
||||
else None
|
||||
),
|
||||
"most_compared_ride": (
|
||||
{
|
||||
"id": most_compared.ride.id,
|
||||
"name": most_compared.ride.name,
|
||||
"slug": most_compared.ride.slug,
|
||||
"park": most_compared.ride.park.name,
|
||||
"comparison_count": most_compared.comparison_count,
|
||||
}
|
||||
if most_compared
|
||||
else None
|
||||
),
|
||||
"biggest_rank_change": biggest_change,
|
||||
}
|
||||
|
||||
serializer = RankingStatsSerializer(stats)
|
||||
return Response(serializer.data)
|
||||
|
||||
@action(detail=True, methods=["get"])
|
||||
def comparisons(self, request, ride_slug=None):
|
||||
"""Get head-to-head comparisons for a specific ride."""
|
||||
ranking = self.get_object()
|
||||
|
||||
comparisons = (
|
||||
RidePairComparison.objects.filter(
|
||||
Q(ride_a=ranking.ride) | Q(ride_b=ranking.ride)
|
||||
)
|
||||
.select_related("ride_a", "ride_b", "ride_a__park", "ride_b__park")
|
||||
.order_by("-mutual_riders_count")[:50]
|
||||
)
|
||||
|
||||
results = []
|
||||
for comp in comparisons:
|
||||
if comp.ride_a == ranking.ride:
|
||||
opponent = comp.ride_b
|
||||
wins = comp.ride_a_wins
|
||||
losses = comp.ride_b_wins
|
||||
else:
|
||||
opponent = comp.ride_a
|
||||
wins = comp.ride_b_wins
|
||||
losses = comp.ride_a_wins
|
||||
|
||||
result = "win" if wins > losses else "loss" if losses > wins else "tie"
|
||||
|
||||
results.append(
|
||||
{
|
||||
"opponent": {
|
||||
"id": opponent.id,
|
||||
"name": opponent.name,
|
||||
"slug": opponent.slug,
|
||||
"park": {
|
||||
"id": opponent.park.id,
|
||||
"name": opponent.park.name,
|
||||
"slug": opponent.park.slug,
|
||||
},
|
||||
},
|
||||
"wins": wins,
|
||||
"losses": losses,
|
||||
"ties": comp.ties,
|
||||
"result": result,
|
||||
"mutual_riders": comp.mutual_riders_count,
|
||||
"ride_a_avg_rating": (
|
||||
float(comp.ride_a_avg_rating)
|
||||
if comp.ride_a_avg_rating
|
||||
else None
|
||||
),
|
||||
"ride_b_avg_rating": (
|
||||
float(comp.ride_b_avg_rating)
|
||||
if comp.ride_b_avg_rating
|
||||
else None
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
return Response(results)
|
||||
|
||||
|
||||
@extend_schema(
|
||||
summary="Trigger ranking calculation",
|
||||
description="Manually trigger a ranking calculation (admin only).",
|
||||
request=None,
|
||||
responses={
|
||||
200: OpenApiTypes.OBJECT,
|
||||
403: OpenApiTypes.OBJECT,
|
||||
},
|
||||
tags=["Rankings", "Admin"],
|
||||
)
|
||||
class TriggerRankingCalculationView(APIView):
|
||||
"""
|
||||
Admin endpoint to manually trigger ranking calculation.
|
||||
"""
|
||||
|
||||
permission_classes = [IsAuthenticatedOrReadOnly]
|
||||
|
||||
def post(self, request):
|
||||
"""Trigger ranking calculation."""
|
||||
if not request.user.is_staff:
|
||||
return Response(
|
||||
{"error": "Admin access required"}, status=status.HTTP_403_FORBIDDEN
|
||||
)
|
||||
|
||||
category = request.data.get("category")
|
||||
|
||||
service = RideRankingService()
|
||||
result = service.update_all_rankings(category=category)
|
||||
|
||||
return Response(result)
|
||||
@@ -26,12 +26,12 @@ class PageView(models.Model):
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def get_trending_items(cls, model_class, hours=24, limit=10):
|
||||
def get_trending_items(cls, model_class, hours=168, limit=10):
|
||||
"""Get trending items of a specific model class based on views in last X hours.
|
||||
|
||||
Args:
|
||||
model_class: The model class to get trending items for (e.g., Park, Ride)
|
||||
hours (int): Number of hours to look back for views (default: 24)
|
||||
hours (int): Number of hours to look back for views (default: 168 = 7 days)
|
||||
limit (int): Maximum number of items to return (default: 10)
|
||||
|
||||
Returns:
|
||||
@@ -61,3 +61,65 @@ class PageView(models.Model):
|
||||
return model_class.objects.filter(pk__in=id_list).order_by(preserved)
|
||||
|
||||
return model_class.objects.none()
|
||||
|
||||
@classmethod
|
||||
def get_views_growth(
|
||||
cls, content_type, object_id, current_period_hours, previous_period_hours
|
||||
):
|
||||
"""Get view growth statistics between two time periods.
|
||||
|
||||
Args:
|
||||
content_type: ContentType instance for the model
|
||||
object_id: ID of the specific object
|
||||
current_period_hours: Hours for current period (e.g., 24)
|
||||
previous_period_hours: Hours for previous period (e.g., 48)
|
||||
|
||||
Returns:
|
||||
tuple: (current_views, previous_views, growth_percentage)
|
||||
"""
|
||||
from datetime import timedelta
|
||||
|
||||
now = timezone.now()
|
||||
|
||||
# Current period: last X hours
|
||||
current_start = now - timedelta(hours=current_period_hours)
|
||||
current_views = cls.objects.filter(
|
||||
content_type=content_type, object_id=object_id, timestamp__gte=current_start
|
||||
).count()
|
||||
|
||||
# Previous period: X hours before current period
|
||||
previous_start = now - timedelta(hours=previous_period_hours)
|
||||
previous_end = current_start
|
||||
previous_views = cls.objects.filter(
|
||||
content_type=content_type,
|
||||
object_id=object_id,
|
||||
timestamp__gte=previous_start,
|
||||
timestamp__lt=previous_end,
|
||||
).count()
|
||||
|
||||
# Calculate growth percentage
|
||||
if previous_views == 0:
|
||||
growth_percentage = current_views * 100 if current_views > 0 else 0
|
||||
else:
|
||||
growth_percentage = (
|
||||
(current_views - previous_views) / previous_views
|
||||
) * 100
|
||||
|
||||
return current_views, previous_views, growth_percentage
|
||||
|
||||
@classmethod
|
||||
def get_total_views_count(cls, content_type, object_id, hours=168):
|
||||
"""Get total view count for an object within specified hours.
|
||||
|
||||
Args:
|
||||
content_type: ContentType instance for the model
|
||||
object_id: ID of the specific object
|
||||
hours: Number of hours to look back (default: 168 = 7 days)
|
||||
|
||||
Returns:
|
||||
int: Total view count
|
||||
"""
|
||||
cutoff = timezone.now() - timedelta(hours=hours)
|
||||
return cls.objects.filter(
|
||||
content_type=content_type, object_id=object_id, timestamp__gte=cutoff
|
||||
).count()
|
||||
|
||||
@@ -1 +1 @@
|
||||
# Django management commands
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
# Django management commands
|
||||
|
||||
|
||||
472
backend/apps/core/management/commands/clear_cache.py
Normal file
472
backend/apps/core/management/commands/clear_cache.py
Normal file
@@ -0,0 +1,472 @@
|
||||
"""
|
||||
Django management command to clear all types of cache data.
|
||||
|
||||
This command provides comprehensive cache clearing functionality including:
|
||||
- Django cache framework (all configured backends)
|
||||
- Python __pycache__ directories and .pyc files
|
||||
- Static files cache
|
||||
- Session cache
|
||||
- Template cache
|
||||
- Tailwind CSS build cache
|
||||
- OPcache (if available)
|
||||
"""
|
||||
|
||||
import shutil
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
from django.core.cache import cache, caches
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = (
|
||||
"Clear all types of cache data including Django cache, "
|
||||
"__pycache__, and build caches"
|
||||
)
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"--django-cache",
|
||||
action="store_true",
|
||||
help="Clear Django cache framework cache only",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pycache",
|
||||
action="store_true",
|
||||
help="Clear Python __pycache__ directories and .pyc files only",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--static",
|
||||
action="store_true",
|
||||
help="Clear static files cache only",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sessions",
|
||||
action="store_true",
|
||||
help="Clear session cache only",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--templates",
|
||||
action="store_true",
|
||||
help="Clear template cache only",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tailwind",
|
||||
action="store_true",
|
||||
help="Clear Tailwind CSS build cache only",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--opcache",
|
||||
action="store_true",
|
||||
help="Clear PHP OPcache if available",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Show what would be cleared without actually clearing",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
help="Show detailed output of clearing operations",
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
"""Clear cache data based on provided options."""
|
||||
self.dry_run = options["dry_run"]
|
||||
self.verbose = options["verbose"]
|
||||
|
||||
# If no specific cache type is specified, clear all
|
||||
clear_all = not any(
|
||||
[
|
||||
options["django_cache"],
|
||||
options["pycache"],
|
||||
options["static"],
|
||||
options["sessions"],
|
||||
options["templates"],
|
||||
options["tailwind"],
|
||||
options["opcache"],
|
||||
]
|
||||
)
|
||||
|
||||
if self.dry_run:
|
||||
self.stdout.write(
|
||||
self.style.WARNING("🔍 DRY RUN MODE - No files will be deleted")
|
||||
)
|
||||
self.stdout.write("")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("🧹 ThrillWiki Cache Clearing Utility"))
|
||||
self.stdout.write("")
|
||||
|
||||
# Clear Django cache framework
|
||||
if clear_all or options["django_cache"]:
|
||||
self.clear_django_cache()
|
||||
|
||||
# Clear Python __pycache__
|
||||
if clear_all or options["pycache"]:
|
||||
self.clear_pycache()
|
||||
|
||||
# Clear static files cache
|
||||
if clear_all or options["static"]:
|
||||
self.clear_static_cache()
|
||||
|
||||
# Clear sessions cache
|
||||
if clear_all or options["sessions"]:
|
||||
self.clear_sessions_cache()
|
||||
|
||||
# Clear template cache
|
||||
if clear_all or options["templates"]:
|
||||
self.clear_template_cache()
|
||||
|
||||
# Clear Tailwind cache
|
||||
if clear_all or options["tailwind"]:
|
||||
self.clear_tailwind_cache()
|
||||
|
||||
# Clear OPcache
|
||||
if clear_all or options["opcache"]:
|
||||
self.clear_opcache()
|
||||
|
||||
self.stdout.write("")
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS("✅ Cache clearing completed successfully!")
|
||||
)
|
||||
|
||||
def clear_django_cache(self):
|
||||
"""Clear Django cache framework cache."""
|
||||
self.stdout.write("🗄️ Clearing Django cache framework...")
|
||||
|
||||
try:
|
||||
# Clear default cache
|
||||
if not self.dry_run:
|
||||
cache.clear()
|
||||
|
||||
cache_info = f"Default cache ({cache.__class__.__name__})"
|
||||
self.stdout.write(self.style.SUCCESS(f" ✅ Cleared {cache_info}"))
|
||||
|
||||
# Clear all configured caches
|
||||
cache_aliases = getattr(settings, "CACHES", {}).keys()
|
||||
for alias in cache_aliases:
|
||||
if alias != "default": # Already cleared above
|
||||
try:
|
||||
cache_backend = caches[alias]
|
||||
if not self.dry_run:
|
||||
cache_backend.clear()
|
||||
|
||||
cache_info = (
|
||||
f"{alias} cache ({cache_backend.__class__.__name__})"
|
||||
)
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f" ✅ Cleared {cache_info}")
|
||||
)
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.WARNING(
|
||||
f" ⚠️ Could not clear {alias} cache: {e}"
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f" ❌ Error clearing Django cache: {e}")
|
||||
)
|
||||
|
||||
def clear_pycache(self):
|
||||
"""Clear Python __pycache__ directories and .pyc files."""
|
||||
self.stdout.write("🐍 Clearing Python __pycache__ and .pyc files...")
|
||||
|
||||
removed_count = 0
|
||||
removed_size = 0
|
||||
|
||||
try:
|
||||
# Start from project root
|
||||
project_root = Path(settings.BASE_DIR)
|
||||
|
||||
# Find and remove __pycache__ directories
|
||||
for pycache_dir in project_root.rglob("__pycache__"):
|
||||
if pycache_dir.is_dir():
|
||||
try:
|
||||
# Calculate size before removal
|
||||
dir_size = sum(
|
||||
f.stat().st_size
|
||||
for f in pycache_dir.rglob("*")
|
||||
if f.is_file()
|
||||
)
|
||||
removed_size += dir_size
|
||||
|
||||
if self.verbose:
|
||||
self.stdout.write(f" 🗑️ Removing: {pycache_dir}")
|
||||
|
||||
if not self.dry_run:
|
||||
shutil.rmtree(pycache_dir)
|
||||
|
||||
removed_count += 1
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.WARNING(
|
||||
f" ⚠️ Could not remove {pycache_dir}: {e}"
|
||||
)
|
||||
)
|
||||
|
||||
# Find and remove .pyc files
|
||||
for pyc_file in project_root.rglob("*.pyc"):
|
||||
try:
|
||||
file_size = pyc_file.stat().st_size
|
||||
removed_size += file_size
|
||||
|
||||
if self.verbose:
|
||||
self.stdout.write(f" 🗑️ Removing: {pyc_file}")
|
||||
|
||||
if not self.dry_run:
|
||||
pyc_file.unlink()
|
||||
|
||||
removed_count += 1
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.WARNING(f" ⚠️ Could not remove {pyc_file}: {e}")
|
||||
)
|
||||
|
||||
# Format file size
|
||||
size_mb = removed_size / (1024 * 1024)
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(
|
||||
f" ✅ Removed {removed_count} Python cache items ({size_mb:.2f} MB)"
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f" ❌ Error clearing Python cache: {e}")
|
||||
)
|
||||
|
||||
def clear_static_cache(self):
|
||||
"""Clear static files cache."""
|
||||
self.stdout.write("📦 Clearing static files cache...")
|
||||
|
||||
try:
|
||||
static_root = getattr(settings, "STATIC_ROOT", None)
|
||||
|
||||
if static_root and Path(static_root).exists():
|
||||
static_path = Path(static_root)
|
||||
|
||||
# Calculate size
|
||||
total_size = sum(
|
||||
f.stat().st_size for f in static_path.rglob("*") if f.is_file()
|
||||
)
|
||||
size_mb = total_size / (1024 * 1024)
|
||||
|
||||
if self.verbose:
|
||||
self.stdout.write(f" 🗑️ Removing: {static_path}")
|
||||
|
||||
if not self.dry_run:
|
||||
shutil.rmtree(static_path)
|
||||
static_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(
|
||||
f" ✅ Cleared static files cache ({size_mb:.2f} MB)"
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.stdout.write(
|
||||
self.style.WARNING(
|
||||
" ⚠️ No STATIC_ROOT configured or directory doesn't exist"
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f" ❌ Error clearing static cache: {e}")
|
||||
)
|
||||
|
||||
def clear_sessions_cache(self):
|
||||
"""Clear session cache if using cache-based sessions."""
|
||||
self.stdout.write("🔐 Clearing session cache...")
|
||||
|
||||
try:
|
||||
session_engine = getattr(settings, "SESSION_ENGINE", "")
|
||||
|
||||
if "cache" in session_engine:
|
||||
# Using cache-based sessions
|
||||
session_cache_alias = getattr(
|
||||
settings, "SESSION_CACHE_ALIAS", "default"
|
||||
)
|
||||
session_cache = caches[session_cache_alias]
|
||||
|
||||
if not self.dry_run:
|
||||
# Clear session keys (this is a simplified approach)
|
||||
# In production, you might want more sophisticated session clearing
|
||||
session_cache.clear()
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(
|
||||
f" ✅ Cleared cache-based sessions ({session_cache_alias})"
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.stdout.write(
|
||||
self.style.WARNING(" ⚠️ Not using cache-based sessions")
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f" ❌ Error clearing session cache: {e}")
|
||||
)
|
||||
|
||||
def clear_template_cache(self):
|
||||
"""Clear template cache."""
|
||||
self.stdout.write("📄 Clearing template cache...")
|
||||
|
||||
try:
|
||||
# Clear template cache if using cached template loader
|
||||
from django.template import engines
|
||||
from django.template.loaders.cached import Loader as CachedLoader
|
||||
|
||||
cleared_engines = 0
|
||||
for engine in engines.all():
|
||||
try:
|
||||
# Check for DjangoTemplates engine with cached loaders
|
||||
engine_backend = getattr(engine, "backend", "")
|
||||
if "DjangoTemplates" in engine_backend:
|
||||
# Get engine instance safely
|
||||
engine_instance = getattr(engine, "engine", None)
|
||||
if engine_instance:
|
||||
template_loaders = getattr(
|
||||
engine_instance, "template_loaders", []
|
||||
)
|
||||
for loader in template_loaders:
|
||||
if isinstance(loader, CachedLoader):
|
||||
if not self.dry_run:
|
||||
loader.reset()
|
||||
cleared_engines += 1
|
||||
if self.verbose:
|
||||
self.stdout.write(
|
||||
f" 🗑️ Cleared cached loader: {loader}"
|
||||
)
|
||||
|
||||
# Check for Jinja2 engines (if present)
|
||||
elif "Jinja2" in engine_backend and hasattr(engine, "env"):
|
||||
env = getattr(engine, "env", None)
|
||||
if env and hasattr(env, "cache"):
|
||||
if not self.dry_run:
|
||||
env.cache.clear()
|
||||
cleared_engines += 1
|
||||
if self.verbose:
|
||||
self.stdout.write(
|
||||
f" 🗑️ Cleared Jinja2 cache: {engine}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
self.stdout.write(
|
||||
self.style.WARNING(
|
||||
f" ⚠️ Could not clear cache for engine {engine}: {e}"
|
||||
)
|
||||
)
|
||||
|
||||
if cleared_engines > 0:
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(
|
||||
f" ✅ Cleared template cache for "
|
||||
f"{cleared_engines} loaders/engines"
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.stdout.write(
|
||||
self.style.WARNING(" ⚠️ No cached template loaders found")
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f" ❌ Error clearing template cache: {e}")
|
||||
)
|
||||
|
||||
def clear_tailwind_cache(self):
|
||||
"""Clear Tailwind CSS build cache."""
|
||||
self.stdout.write("🎨 Clearing Tailwind CSS cache...")
|
||||
|
||||
try:
|
||||
# Look for common Tailwind cache directories
|
||||
project_root = Path(settings.BASE_DIR)
|
||||
cache_paths = [
|
||||
project_root / "node_modules" / ".cache",
|
||||
project_root / ".tailwindcss-cache",
|
||||
project_root / "static" / "css" / ".cache",
|
||||
]
|
||||
|
||||
cleared_count = 0
|
||||
for cache_path in cache_paths:
|
||||
if cache_path.exists():
|
||||
try:
|
||||
if self.verbose:
|
||||
self.stdout.write(f" 🗑️ Removing: {cache_path}")
|
||||
|
||||
if not self.dry_run:
|
||||
if cache_path.is_file():
|
||||
cache_path.unlink()
|
||||
else:
|
||||
shutil.rmtree(cache_path)
|
||||
|
||||
cleared_count += 1
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.WARNING(
|
||||
f" ⚠️ Could not remove {cache_path}: {e}"
|
||||
)
|
||||
)
|
||||
|
||||
if cleared_count > 0:
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(
|
||||
f" ✅ Cleared {cleared_count} Tailwind cache directories"
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.stdout.write(
|
||||
self.style.WARNING(" ⚠️ No Tailwind cache directories found")
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f" ❌ Error clearing Tailwind cache: {e}")
|
||||
)
|
||||
|
||||
def clear_opcache(self):
|
||||
"""Clear PHP OPcache if available."""
|
||||
self.stdout.write("⚡ Clearing OPcache...")
|
||||
|
||||
try:
|
||||
# This is mainly for mixed environments
|
||||
php_code = (
|
||||
"if (function_exists('opcache_reset')) { "
|
||||
"opcache_reset(); echo 'cleared'; } "
|
||||
"else { echo 'not_available'; }"
|
||||
)
|
||||
result = subprocess.run(
|
||||
["php", "-r", php_code],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
if "cleared" in result.stdout:
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(" ✅ OPcache cleared successfully")
|
||||
)
|
||||
else:
|
||||
self.stdout.write(self.style.WARNING(" ⚠️ OPcache not available"))
|
||||
else:
|
||||
self.stdout.write(
|
||||
self.style.WARNING(
|
||||
" ⚠️ PHP not available or OPcache not accessible"
|
||||
)
|
||||
)
|
||||
|
||||
except (subprocess.TimeoutExpired, FileNotFoundError):
|
||||
self.stdout.write(
|
||||
self.style.WARNING(" ⚠️ PHP not found or not accessible")
|
||||
)
|
||||
except Exception as e:
|
||||
self.stdout.write(self.style.ERROR(f" ❌ Error clearing OPcache: {e}"))
|
||||
309
backend/apps/core/management/commands/test_trending.py
Normal file
309
backend/apps/core/management/commands/test_trending.py
Normal file
@@ -0,0 +1,309 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils import timezone
|
||||
from apps.parks.models.parks import Park
|
||||
from apps.rides.models.rides import Ride
|
||||
from apps.parks.models.companies import Company
|
||||
from apps.core.analytics import PageView
|
||||
from apps.core.services.trending_service import trending_service
|
||||
from datetime import datetime, timedelta
|
||||
import random
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "Test the trending algorithm with sample data"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"--clean",
|
||||
action="store_true",
|
||||
help="Clean existing test data before creating new data",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
help="Show detailed output",
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
self.verbose = options["verbose"]
|
||||
|
||||
if options["clean"]:
|
||||
self.clean_test_data()
|
||||
|
||||
self.create_test_data()
|
||||
self.test_trending_algorithm()
|
||||
self.test_api_format()
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS("✓ Trending system test completed successfully!")
|
||||
)
|
||||
|
||||
def clean_test_data(self):
|
||||
"""Clean existing test data."""
|
||||
self.stdout.write("Cleaning existing test data...")
|
||||
|
||||
# Delete test PageViews
|
||||
PageView.objects.filter(
|
||||
content_type__in=[
|
||||
ContentType.objects.get_for_model(Park),
|
||||
ContentType.objects.get_for_model(Ride),
|
||||
]
|
||||
).delete()
|
||||
|
||||
self.stdout.write("✓ Test data cleaned")
|
||||
|
||||
def create_test_data(self):
|
||||
"""Create sample parks, rides, and page views for testing."""
|
||||
self.stdout.write("Creating test data...")
|
||||
|
||||
# Create or get default operator company
|
||||
operator, created = Company.objects.get_or_create(
|
||||
name="Default Theme Park Operator",
|
||||
defaults={
|
||||
"roles": ["OPERATOR"],
|
||||
"description": "Default operator for test parks",
|
||||
},
|
||||
)
|
||||
if created and self.verbose:
|
||||
self.stdout.write(f" Created operator company: {operator.name}")
|
||||
|
||||
# Get or create test parks and rides
|
||||
parks_data = [
|
||||
{
|
||||
"name": "Cedar Point",
|
||||
"slug": "cedar-point",
|
||||
"description": "America's Roller Coast featuring world-class roller coasters",
|
||||
"average_rating": 9.2,
|
||||
"opening_date": datetime(1870, 1, 1).date(),
|
||||
"operator": operator,
|
||||
},
|
||||
{
|
||||
"name": "Magic Kingdom",
|
||||
"slug": "magic-kingdom",
|
||||
"description": "Walt Disney World's most magical theme park",
|
||||
"average_rating": 9.5,
|
||||
"opening_date": datetime(1971, 10, 1).date(),
|
||||
"operator": operator,
|
||||
},
|
||||
{
|
||||
"name": "Six Flags Great Adventure",
|
||||
"slug": "six-flags-great-adventure",
|
||||
"description": "Home to Kingda Ka and incredible thrills",
|
||||
"average_rating": 8.8,
|
||||
"opening_date": datetime(1974, 7, 1).date(),
|
||||
"operator": operator,
|
||||
},
|
||||
]
|
||||
|
||||
# Create parks
|
||||
parks = []
|
||||
for park_data in parks_data:
|
||||
park, created = Park.objects.get_or_create(
|
||||
name=park_data["name"], defaults=park_data
|
||||
)
|
||||
parks.append(park)
|
||||
if created and self.verbose:
|
||||
self.stdout.write(f" Created park: {park.name}")
|
||||
|
||||
# Now create rides - they need park references
|
||||
rides_data = [
|
||||
{
|
||||
"name": "Steel Vengeance",
|
||||
"slug": "steel-vengeance",
|
||||
"description": "Hybrid roller coaster at Cedar Point",
|
||||
"park": next(p for p in parks if p.name == "Cedar Point"),
|
||||
"category": "RC", # Roller Coaster
|
||||
"average_rating": 9.8,
|
||||
"opening_date": datetime(2018, 5, 5).date(),
|
||||
},
|
||||
{
|
||||
"name": "Space Mountain",
|
||||
"slug": "space-mountain",
|
||||
"description": "Indoor space-themed roller coaster",
|
||||
"park": next(p for p in parks if p.name == "Magic Kingdom"),
|
||||
"category": "RC", # Roller Coaster
|
||||
"average_rating": 8.5,
|
||||
"opening_date": datetime(1975, 1, 15).date(),
|
||||
},
|
||||
{
|
||||
"name": "Kingda Ka",
|
||||
"slug": "kingda-ka",
|
||||
"description": "World's tallest roller coaster",
|
||||
"park": next(p for p in parks if p.name == "Six Flags Great Adventure"),
|
||||
"category": "RC", # Roller Coaster
|
||||
"average_rating": 9.0,
|
||||
"opening_date": datetime(2005, 5, 21).date(),
|
||||
},
|
||||
{
|
||||
"name": "Millennium Force",
|
||||
"slug": "millennium-force",
|
||||
"description": "Legendary steel roller coaster",
|
||||
"park": next(p for p in parks if p.name == "Cedar Point"),
|
||||
"category": "RC", # Roller Coaster
|
||||
"average_rating": 9.4,
|
||||
"opening_date": datetime(2000, 5, 13).date(),
|
||||
},
|
||||
]
|
||||
|
||||
# Create rides
|
||||
rides = []
|
||||
for ride_data in rides_data:
|
||||
ride, created = Ride.objects.get_or_create(
|
||||
name=ride_data["name"], defaults=ride_data
|
||||
)
|
||||
rides.append(ride)
|
||||
if created and self.verbose:
|
||||
self.stdout.write(f" Created ride: {ride.name}")
|
||||
|
||||
# Create PageViews with different patterns to test trending
|
||||
self.create_page_views(parks, rides)
|
||||
|
||||
self.stdout.write("✓ Test data created")
|
||||
|
||||
def create_page_views(self, parks, rides):
|
||||
"""Create PageViews with different trending patterns."""
|
||||
now = timezone.now()
|
||||
|
||||
# Pattern 1: Recently trending item (Steel Vengeance)
|
||||
steel_vengeance = next(r for r in rides if r.name == "Steel Vengeance")
|
||||
self.create_views_for_content(
|
||||
steel_vengeance, recent_views=50, older_views=10, base_time=now
|
||||
)
|
||||
|
||||
# Pattern 2: Consistently popular item (Space Mountain)
|
||||
space_mountain = next(r for r in rides if r.name == "Space Mountain")
|
||||
self.create_views_for_content(
|
||||
space_mountain, recent_views=30, older_views=25, base_time=now
|
||||
)
|
||||
|
||||
# Pattern 3: Declining popularity (Kingda Ka)
|
||||
kingda_ka = next(r for r in rides if r.name == "Kingda Ka")
|
||||
self.create_views_for_content(
|
||||
kingda_ka, recent_views=5, older_views=40, base_time=now
|
||||
)
|
||||
|
||||
# Pattern 4: New but growing (Millennium Force)
|
||||
millennium_force = next(r for r in rides if r.name == "Millennium Force")
|
||||
self.create_views_for_content(
|
||||
millennium_force, recent_views=25, older_views=5, base_time=now
|
||||
)
|
||||
|
||||
# Create some park views too
|
||||
cedar_point = next(p for p in parks if p.name == "Cedar Point")
|
||||
self.create_views_for_content(
|
||||
cedar_point, recent_views=35, older_views=20, base_time=now
|
||||
)
|
||||
|
||||
if self.verbose:
|
||||
self.stdout.write(" Created PageView data for trending analysis")
|
||||
|
||||
def create_views_for_content(
|
||||
self, content_object, recent_views, older_views, base_time
|
||||
):
|
||||
"""Create PageViews for a content object with specified patterns."""
|
||||
content_type = ContentType.objects.get_for_model(type(content_object))
|
||||
|
||||
# Create recent views (last 2 hours)
|
||||
for i in range(recent_views):
|
||||
view_time = base_time - timedelta(
|
||||
minutes=random.randint(0, 120) # Last 2 hours
|
||||
)
|
||||
PageView.objects.create(
|
||||
content_type=content_type,
|
||||
object_id=content_object.id,
|
||||
ip_address=f"192.168.1.{random.randint(1, 255)}",
|
||||
user_agent="Test Agent",
|
||||
timestamp=view_time,
|
||||
)
|
||||
|
||||
# Create older views (2-24 hours ago)
|
||||
for i in range(older_views):
|
||||
view_time = base_time - timedelta(hours=random.randint(2, 24))
|
||||
PageView.objects.create(
|
||||
content_type=content_type,
|
||||
object_id=content_object.id,
|
||||
ip_address=f"10.0.0.{random.randint(1, 255)}",
|
||||
user_agent="Test Agent",
|
||||
timestamp=view_time,
|
||||
)
|
||||
|
||||
def test_trending_algorithm(self):
|
||||
"""Test the trending algorithm functionality."""
|
||||
self.stdout.write("Testing trending algorithm...")
|
||||
|
||||
# Test trending content for different content types
|
||||
trending_parks = trending_service.get_trending_content(
|
||||
content_type="parks", limit=3
|
||||
)
|
||||
trending_rides = trending_service.get_trending_content(
|
||||
content_type="rides", limit=3
|
||||
)
|
||||
trending_all = trending_service.get_trending_content(
|
||||
content_type="all", limit=5
|
||||
)
|
||||
|
||||
# Test new content
|
||||
new_parks = trending_service.get_new_content(content_type="parks", limit=3)
|
||||
new_rides = trending_service.get_new_content(content_type="rides", limit=3)
|
||||
new_all = trending_service.get_new_content(content_type="all", limit=5)
|
||||
|
||||
if self.verbose:
|
||||
self.stdout.write(f" Trending parks: {len(trending_parks)} results")
|
||||
self.stdout.write(f" Trending rides: {len(trending_rides)} results")
|
||||
self.stdout.write(f" Trending all: {len(trending_all)} results")
|
||||
self.stdout.write(f" New parks: {len(new_parks)} results")
|
||||
self.stdout.write(f" New rides: {len(new_rides)} results")
|
||||
self.stdout.write(f" New all: {len(new_all)} results")
|
||||
|
||||
self.stdout.write("✓ Trending algorithm working correctly")
|
||||
|
||||
def test_api_format(self):
|
||||
"""Test that API responses match expected frontend format."""
|
||||
self.stdout.write("Testing API response format...")
|
||||
|
||||
# Test trending content format
|
||||
trending_parks = trending_service.get_trending_content(
|
||||
content_type="parks", limit=3
|
||||
)
|
||||
trending_rides = trending_service.get_trending_content(
|
||||
content_type="rides", limit=3
|
||||
)
|
||||
|
||||
# Test new content format
|
||||
new_parks = trending_service.get_new_content(content_type="parks", limit=3)
|
||||
new_rides = trending_service.get_new_content(content_type="rides", limit=3)
|
||||
|
||||
# Verify trending data structure
|
||||
if trending_parks:
|
||||
item = trending_parks[0]
|
||||
required_trending_fields = [
|
||||
"id",
|
||||
"name",
|
||||
"slug",
|
||||
"views",
|
||||
"views_change",
|
||||
"rank",
|
||||
]
|
||||
for field in required_trending_fields:
|
||||
if field not in item:
|
||||
raise ValueError(f"Missing required trending field: {field}")
|
||||
|
||||
# Verify new content data structure
|
||||
if new_parks:
|
||||
item = new_parks[0]
|
||||
required_new_fields = ["id", "name", "slug"]
|
||||
for field in required_new_fields:
|
||||
if field not in item:
|
||||
raise ValueError(f"Missing required new content field: {field}")
|
||||
|
||||
if self.verbose:
|
||||
self.stdout.write(" Sample trending park data:")
|
||||
if trending_parks:
|
||||
self.stdout.write(f" {trending_parks[0]}")
|
||||
|
||||
self.stdout.write(" Sample new park data:")
|
||||
if new_parks:
|
||||
self.stdout.write(f" {new_parks[0]}")
|
||||
|
||||
self.stdout.write("✓ API format validation passed")
|
||||
@@ -6,30 +6,31 @@ from apps.core.analytics import PageView
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "Updates trending parks and rides cache based on views in the last 24 hours"
|
||||
help = "Updates trending parks and rides cache based on views in the last 7 days"
|
||||
|
||||
def handle(self, *args, **kwargs):
|
||||
"""
|
||||
Updates the trending parks and rides in the cache.
|
||||
|
||||
This command is designed to be run every hour via cron to keep the trending
|
||||
items up to date. It looks at page views from the last 24 hours and caches
|
||||
This command is designed to be run once daily via cron to keep the trending
|
||||
items up to date. It looks at page views from the last 7 days and caches
|
||||
the top 10 most viewed parks and rides.
|
||||
|
||||
The cached data is used by the home page to display trending items without
|
||||
having to query the database on every request.
|
||||
"""
|
||||
# Get top 10 trending parks and rides from the last 24 hours
|
||||
trending_parks = PageView.get_trending_items(Park, hours=24, limit=10)
|
||||
trending_rides = PageView.get_trending_items(Ride, hours=24, limit=10)
|
||||
# Get top 10 trending parks and rides from the last 7 days (168 hours)
|
||||
trending_parks = PageView.get_trending_items(Park, hours=168, limit=10)
|
||||
trending_rides = PageView.get_trending_items(Ride, hours=168, limit=10)
|
||||
|
||||
# Cache the results for 1 hour
|
||||
cache.set("trending_parks", trending_parks, 3600) # 3600 seconds = 1 hour
|
||||
cache.set("trending_rides", trending_rides, 3600)
|
||||
# Cache the results for 24 hours (daily refresh)
|
||||
cache.set("trending_parks", trending_parks, 86400) # 86400 seconds = 24 hours
|
||||
cache.set("trending_rides", trending_rides, 86400)
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(
|
||||
"Successfully updated trending parks and rides. "
|
||||
"Cached 10 items each for parks and rides based on views in the last 24 hours."
|
||||
"Cached 10 items each for parks and rides based on views "
|
||||
"in the last 7 days."
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1,22 +1,15 @@
|
||||
# Core middleware modules
|
||||
"""
|
||||
Core middleware package.
|
||||
|
||||
# Import middleware classes from the analytics module
|
||||
from .analytics import PageViewMiddleware, PgHistoryContextMiddleware
|
||||
This package contains middleware components for the Django application,
|
||||
including view tracking and other core functionality.
|
||||
"""
|
||||
|
||||
# Import middleware classes from the performance_middleware.py module
|
||||
from .performance_middleware import (
|
||||
PerformanceMiddleware,
|
||||
QueryCountMiddleware,
|
||||
DatabaseConnectionMiddleware,
|
||||
CachePerformanceMiddleware,
|
||||
)
|
||||
from .view_tracking import ViewTrackingMiddleware, get_view_stats_for_content
|
||||
from .analytics import PgHistoryContextMiddleware
|
||||
|
||||
# Make all middleware classes available at the package level
|
||||
__all__ = [
|
||||
"PageViewMiddleware",
|
||||
"ViewTrackingMiddleware",
|
||||
"get_view_stats_for_content",
|
||||
"PgHistoryContextMiddleware",
|
||||
"PerformanceMiddleware",
|
||||
"QueryCountMiddleware",
|
||||
"DatabaseConnectionMiddleware",
|
||||
"CachePerformanceMiddleware",
|
||||
]
|
||||
|
||||
@@ -44,41 +44,3 @@ class PgHistoryContextMiddleware:
|
||||
def __call__(self, request):
|
||||
response = self.get_response(request)
|
||||
return response
|
||||
|
||||
|
||||
class PageViewMiddleware(MiddlewareMixin):
|
||||
"""Middleware to track page views for DetailView-based pages."""
|
||||
|
||||
def process_view(self, request, view_func, view_args, view_kwargs):
|
||||
# Only track GET requests
|
||||
if request.method != "GET":
|
||||
return None
|
||||
|
||||
# Get view class if it exists
|
||||
view_class = getattr(view_func, "view_class", None)
|
||||
if not view_class or not issubclass(view_class, DetailView):
|
||||
return None
|
||||
|
||||
# Get the object if it's a detail view
|
||||
try:
|
||||
view_instance = view_class()
|
||||
view_instance.request = request
|
||||
view_instance.args = view_args
|
||||
view_instance.kwargs = view_kwargs
|
||||
obj = view_instance.get_object()
|
||||
except (AttributeError, Exception):
|
||||
return None
|
||||
|
||||
# Record the page view
|
||||
try:
|
||||
PageView.objects.create(
|
||||
content_type=ContentType.objects.get_for_model(obj.__class__),
|
||||
object_id=obj.pk,
|
||||
ip_address=request.META.get("REMOTE_ADDR", ""),
|
||||
user_agent=request.META.get("HTTP_USER_AGENT", "")[:512],
|
||||
)
|
||||
except Exception:
|
||||
# Fail silently to not interrupt the request
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
331
backend/apps/core/middleware/view_tracking.py
Normal file
331
backend/apps/core/middleware/view_tracking.py
Normal file
@@ -0,0 +1,331 @@
|
||||
"""
|
||||
View Tracking Middleware for automatic PageView recording.
|
||||
|
||||
This middleware automatically tracks page views for park and ride pages,
|
||||
implementing IP-based deduplication to prevent spam and provide accurate
|
||||
analytics for the trending algorithm.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional, Union
|
||||
from django.http import HttpRequest, HttpResponse
|
||||
from django.utils import timezone
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.cache import cache
|
||||
from django.conf import settings
|
||||
from django.db import models
|
||||
|
||||
from apps.core.analytics import PageView
|
||||
from apps.parks.models import Park
|
||||
from apps.rides.models import Ride
|
||||
|
||||
# Type alias for content objects
|
||||
ContentObject = Union[Park, Ride]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ViewTrackingMiddleware:
|
||||
"""
|
||||
Middleware for tracking page views with IP deduplication.
|
||||
|
||||
Automatically creates PageView records when users visit park or ride pages.
|
||||
Implements 24-hour IP deduplication window to prevent view inflation.
|
||||
"""
|
||||
|
||||
def __init__(self, get_response):
|
||||
self.get_response = get_response
|
||||
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
|
||||
|
||||
# URL patterns for tracking - matches park and ride detail pages
|
||||
self.tracked_patterns = [
|
||||
(r"^/parks/(?P<slug>[\w-]+)/$", "park"),
|
||||
(r"^/rides/(?P<slug>[\w-]+)/$", "ride"),
|
||||
# Add API patterns if needed
|
||||
(r"^/api/v1/parks/(?P<slug>[\w-]+)/$", "park"),
|
||||
(r"^/api/v1/rides/(?P<slug>[\w-]+)/$", "ride"),
|
||||
]
|
||||
|
||||
# Compile patterns for performance
|
||||
self.compiled_patterns = [
|
||||
(re.compile(pattern), content_type)
|
||||
for pattern, content_type in self.tracked_patterns
|
||||
]
|
||||
|
||||
# Cache configuration
|
||||
self.cache_timeout = 60 * 15 # 15 minutes
|
||||
self.dedup_window_hours = 24
|
||||
|
||||
def __call__(self, request: HttpRequest) -> HttpResponse:
|
||||
"""Process the request and track views if applicable."""
|
||||
response = self.get_response(request)
|
||||
|
||||
# Only track successful GET requests
|
||||
if (
|
||||
request.method == "GET"
|
||||
and 200 <= response.status_code < 300
|
||||
and not self._should_skip_tracking(request)
|
||||
):
|
||||
|
||||
try:
|
||||
self._track_view_if_applicable(request)
|
||||
except Exception as e:
|
||||
# Log error but don't break the request
|
||||
self.logger.error(f"Error tracking view: {e}", exc_info=True)
|
||||
|
||||
return response
|
||||
|
||||
def _should_skip_tracking(self, request: HttpRequest) -> bool:
|
||||
"""Check if this request should be skipped for tracking."""
|
||||
# Skip if disabled in settings
|
||||
if not getattr(settings, "ENABLE_VIEW_TRACKING", True):
|
||||
return True
|
||||
|
||||
# Skip requests from bots/crawlers
|
||||
user_agent = request.META.get("HTTP_USER_AGENT", "").lower()
|
||||
bot_indicators = [
|
||||
"bot",
|
||||
"crawler",
|
||||
"spider",
|
||||
"scraper",
|
||||
"facebook",
|
||||
"twitter",
|
||||
"linkedin",
|
||||
"google",
|
||||
"bing",
|
||||
"yahoo",
|
||||
"duckduckgo",
|
||||
"slurp",
|
||||
]
|
||||
if any(indicator in user_agent for indicator in bot_indicators):
|
||||
return True
|
||||
|
||||
# Skip requests without real IP
|
||||
if not self._get_client_ip(request):
|
||||
return True
|
||||
|
||||
# Skip AJAX requests (optional - depending on requirements)
|
||||
if request.META.get("HTTP_X_REQUESTED_WITH") == "XMLHttpRequest":
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _track_view_if_applicable(self, request: HttpRequest) -> None:
|
||||
"""Track view if the URL matches tracked patterns."""
|
||||
path = request.path
|
||||
|
||||
for pattern, content_type in self.compiled_patterns:
|
||||
match = pattern.match(path)
|
||||
if match:
|
||||
slug = match.group("slug")
|
||||
self._record_page_view(request, content_type, slug)
|
||||
break
|
||||
|
||||
def _record_page_view(
|
||||
self, request: HttpRequest, content_type: str, slug: str
|
||||
) -> None:
|
||||
"""Record a page view for the specified content."""
|
||||
client_ip = self._get_client_ip(request)
|
||||
if not client_ip:
|
||||
return
|
||||
|
||||
try:
|
||||
# Get the content object
|
||||
content_obj = self._get_content_object(content_type, slug)
|
||||
if not content_obj:
|
||||
self.logger.warning(
|
||||
f"Content not found: {content_type} with slug '{slug}'"
|
||||
)
|
||||
return
|
||||
|
||||
# Check deduplication
|
||||
if self._is_duplicate_view(content_obj, client_ip):
|
||||
self.logger.debug(
|
||||
f"Duplicate view skipped for {content_type} {slug} from {client_ip}"
|
||||
)
|
||||
return
|
||||
|
||||
# Create PageView record
|
||||
self._create_page_view(content_obj, client_ip, request)
|
||||
|
||||
self.logger.debug(
|
||||
f"Recorded view for {content_type} {slug} from {client_ip}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
f"Failed to record page view for {content_type} {slug}: {e}"
|
||||
)
|
||||
|
||||
def _get_content_object(
|
||||
self, content_type: str, slug: str
|
||||
) -> Optional[ContentObject]:
|
||||
"""Get the content object by type and slug."""
|
||||
try:
|
||||
if content_type == "park":
|
||||
# Use get_by_slug method to handle historical slugs
|
||||
park, _ = Park.get_by_slug(slug)
|
||||
return park
|
||||
elif content_type == "ride":
|
||||
# For rides, we need to search by slug within parks
|
||||
return Ride.objects.filter(slug=slug).first()
|
||||
else:
|
||||
self.logger.warning(f"Unknown content type: {content_type}")
|
||||
return None
|
||||
|
||||
except Park.DoesNotExist:
|
||||
return None
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting {content_type} with slug {slug}: {e}")
|
||||
return None
|
||||
|
||||
def _is_duplicate_view(self, content_obj: ContentObject, client_ip: str) -> bool:
|
||||
"""Check if this view is a duplicate within the deduplication window."""
|
||||
# Use cache for performance
|
||||
cache_key = self._get_dedup_cache_key(content_obj, client_ip)
|
||||
|
||||
if cache.get(cache_key):
|
||||
return True
|
||||
|
||||
# Check database as fallback
|
||||
content_type = ContentType.objects.get_for_model(content_obj)
|
||||
cutoff_time = timezone.now() - timedelta(hours=self.dedup_window_hours)
|
||||
|
||||
existing_view = PageView.objects.filter(
|
||||
content_type=content_type,
|
||||
object_id=content_obj.pk,
|
||||
ip_address=client_ip,
|
||||
timestamp__gte=cutoff_time,
|
||||
).exists()
|
||||
|
||||
if not existing_view:
|
||||
# Set cache to prevent future duplicates
|
||||
cache.set(cache_key, True, timeout=self.dedup_window_hours * 3600)
|
||||
|
||||
return existing_view
|
||||
|
||||
def _create_page_view(
|
||||
self, content_obj: ContentObject, client_ip: str, request: HttpRequest
|
||||
) -> None:
|
||||
"""Create a new PageView record."""
|
||||
content_type = ContentType.objects.get_for_model(content_obj)
|
||||
|
||||
# Extract additional metadata
|
||||
user_agent = request.META.get("HTTP_USER_AGENT", "")[
|
||||
:500
|
||||
] # Truncate long user agents
|
||||
referer = request.META.get("HTTP_REFERER", "")[:500]
|
||||
|
||||
PageView.objects.create(
|
||||
content_type=content_type,
|
||||
object_id=content_obj.pk,
|
||||
ip_address=client_ip,
|
||||
user_agent=user_agent,
|
||||
referer=referer,
|
||||
path=request.path[:500],
|
||||
)
|
||||
|
||||
# Update cache for deduplication
|
||||
cache_key = self._get_dedup_cache_key(content_obj, client_ip)
|
||||
cache.set(cache_key, True, timeout=self.dedup_window_hours * 3600)
|
||||
|
||||
def _get_dedup_cache_key(self, content_obj: ContentObject, client_ip: str) -> str:
|
||||
"""Generate cache key for deduplication."""
|
||||
content_type = ContentType.objects.get_for_model(content_obj)
|
||||
return f"pageview_dedup:{content_type.id}:{content_obj.pk}:{client_ip}"
|
||||
|
||||
def _get_client_ip(self, request: HttpRequest) -> Optional[str]:
|
||||
"""Extract client IP address from request."""
|
||||
# Check for forwarded IP (common in production with load balancers)
|
||||
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
|
||||
if x_forwarded_for:
|
||||
# Take the first IP in the chain (client IP)
|
||||
ip = x_forwarded_for.split(",")[0].strip()
|
||||
if self._is_valid_ip(ip):
|
||||
return ip
|
||||
|
||||
# Check for real IP header (some proxy configurations)
|
||||
x_real_ip = request.META.get("HTTP_X_REAL_IP")
|
||||
if x_real_ip and self._is_valid_ip(x_real_ip):
|
||||
return x_real_ip
|
||||
|
||||
# Fall back to remote address
|
||||
remote_addr = request.META.get("REMOTE_ADDR")
|
||||
if remote_addr and self._is_valid_ip(remote_addr):
|
||||
return remote_addr
|
||||
|
||||
return None
|
||||
|
||||
def _is_valid_ip(self, ip: str) -> bool:
|
||||
"""Validate IP address format."""
|
||||
try:
|
||||
# Basic validation - check if it looks like an IP
|
||||
parts = ip.split(".")
|
||||
if len(parts) != 4:
|
||||
return False
|
||||
|
||||
for part in parts:
|
||||
if not part.isdigit() or not 0 <= int(part) <= 255:
|
||||
return False
|
||||
|
||||
# Skip localhost and private IPs in production
|
||||
if getattr(settings, "SKIP_LOCAL_IPS", not settings.DEBUG):
|
||||
if ip.startswith(("127.", "192.168.", "10.")) or ip.startswith("172."):
|
||||
if any(
|
||||
16 <= int(ip.split(".")[1]) <= 31
|
||||
for _ in [ip]
|
||||
if ip.startswith("172.")
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
except (ValueError, IndexError):
|
||||
return False
|
||||
|
||||
|
||||
def get_view_stats_for_content(content_obj: ContentObject, hours: int = 24) -> dict:
|
||||
"""
|
||||
Helper function to get view statistics for content.
|
||||
|
||||
Args:
|
||||
content_obj: The content object (Park or Ride)
|
||||
hours: Time window in hours for stats
|
||||
|
||||
Returns:
|
||||
Dictionary with view statistics
|
||||
"""
|
||||
try:
|
||||
content_type = ContentType.objects.get_for_model(content_obj)
|
||||
cutoff_time = timezone.now() - timedelta(hours=hours)
|
||||
|
||||
total_views = PageView.objects.filter(
|
||||
content_type=content_type,
|
||||
object_id=content_obj.pk,
|
||||
timestamp__gte=cutoff_time,
|
||||
).count()
|
||||
|
||||
unique_views = (
|
||||
PageView.objects.filter(
|
||||
content_type=content_type,
|
||||
object_id=content_obj.pk,
|
||||
timestamp__gte=cutoff_time,
|
||||
)
|
||||
.values("ip_address")
|
||||
.distinct()
|
||||
.count()
|
||||
)
|
||||
|
||||
return {
|
||||
"total_views": total_views,
|
||||
"unique_views": unique_views,
|
||||
"hours": hours,
|
||||
"content_type": content_type.model,
|
||||
"content_id": content_obj.pk,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting view stats: {e}")
|
||||
return {"total_views": 0, "unique_views": 0, "hours": hours, "error": str(e)}
|
||||
415
backend/apps/core/services/entity_fuzzy_matching.py
Normal file
415
backend/apps/core/services/entity_fuzzy_matching.py
Normal file
@@ -0,0 +1,415 @@
|
||||
"""
|
||||
Entity Fuzzy Matching Service for ThrillWiki
|
||||
|
||||
Provides intelligent entity matching when exact lookups fail, with authentication
|
||||
prompts for suggesting new entity creation.
|
||||
|
||||
Features:
|
||||
- Levenshtein distance for typo correction
|
||||
- Phonetic matching using Soundex algorithm
|
||||
- Partial name matching
|
||||
- Priority-based scoring (parks > rides > companies)
|
||||
- Authentication state-aware suggestions
|
||||
"""
|
||||
|
||||
import re
|
||||
from difflib import SequenceMatcher
|
||||
from typing import List, Dict, Any, Optional, Tuple
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
from django.db.models import Q
|
||||
|
||||
from apps.parks.models import Park
|
||||
from apps.rides.models import Ride
|
||||
from apps.parks.models import Company
|
||||
|
||||
|
||||
class EntityType(Enum):
|
||||
"""Supported entity types for fuzzy matching."""
|
||||
|
||||
PARK = "park"
|
||||
RIDE = "ride"
|
||||
COMPANY = "company"
|
||||
|
||||
|
||||
@dataclass
|
||||
class FuzzyMatchResult:
|
||||
"""Result of a fuzzy matching operation."""
|
||||
|
||||
entity_type: EntityType
|
||||
entity: Any # The actual model instance
|
||||
name: str
|
||||
slug: str
|
||||
score: float # 0.0 to 1.0, higher is better match
|
||||
match_reason: str # Description of why this was matched
|
||||
confidence: str # 'high', 'medium', 'low'
|
||||
url: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for API responses."""
|
||||
return {
|
||||
"entity_type": self.entity_type.value,
|
||||
"name": self.name,
|
||||
"slug": self.slug,
|
||||
"score": round(self.score, 3),
|
||||
"match_reason": self.match_reason,
|
||||
"confidence": self.confidence,
|
||||
"url": self.url,
|
||||
"entity_id": getattr(self.entity, "id", None),
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class EntitySuggestion:
|
||||
"""Suggestion for creating a new entity when no matches found."""
|
||||
|
||||
suggested_name: str
|
||||
entity_type: EntityType
|
||||
requires_authentication: bool
|
||||
login_prompt: str
|
||||
signup_prompt: str
|
||||
creation_hint: str
|
||||
|
||||
|
||||
class FuzzyMatchingAlgorithms:
|
||||
"""Collection of fuzzy matching algorithms."""
|
||||
|
||||
@staticmethod
|
||||
def levenshtein_distance(s1: str, s2: str) -> int:
|
||||
"""Calculate Levenshtein distance between two strings."""
|
||||
if len(s1) < len(s2):
|
||||
return FuzzyMatchingAlgorithms.levenshtein_distance(s2, s1)
|
||||
|
||||
if len(s2) == 0:
|
||||
return len(s1)
|
||||
|
||||
previous_row = list(range(len(s2) + 1))
|
||||
for i, c1 in enumerate(s1):
|
||||
current_row = [i + 1]
|
||||
for j, c2 in enumerate(s2):
|
||||
insertions = previous_row[j + 1] + 1
|
||||
deletions = current_row[j] + 1
|
||||
substitutions = previous_row[j] + (c1 != c2)
|
||||
current_row.append(min(insertions, deletions, substitutions))
|
||||
previous_row = current_row
|
||||
|
||||
return previous_row[-1]
|
||||
|
||||
@staticmethod
|
||||
def similarity_ratio(s1: str, s2: str) -> float:
|
||||
"""Calculate similarity ratio (0.0 to 1.0) using SequenceMatcher."""
|
||||
return SequenceMatcher(None, s1.lower(), s2.lower()).ratio()
|
||||
|
||||
@staticmethod
|
||||
def soundex(name: str) -> str:
|
||||
"""Generate Soundex code for phonetic matching."""
|
||||
name = re.sub(r"[^A-Za-z]", "", name.upper())
|
||||
if not name:
|
||||
return "0000"
|
||||
|
||||
# Soundex algorithm
|
||||
soundex_map = {
|
||||
"BFPV": "1",
|
||||
"CGJKQSXZ": "2",
|
||||
"DT": "3",
|
||||
"L": "4",
|
||||
"MN": "5",
|
||||
"R": "6",
|
||||
}
|
||||
|
||||
first_letter = name[0]
|
||||
name = name[1:]
|
||||
|
||||
# Replace letters with numbers
|
||||
for letters, number in soundex_map.items():
|
||||
name = re.sub(f"[{letters}]", number, name)
|
||||
|
||||
# Remove consecutive duplicates
|
||||
name = re.sub(r"(\d)\1+", r"\1", name)
|
||||
|
||||
# Remove zeros
|
||||
name = re.sub("0", "", name)
|
||||
|
||||
# Pad or truncate to 4 characters
|
||||
soundex_code = (first_letter + name + "000")[:4]
|
||||
return soundex_code
|
||||
|
||||
@staticmethod
|
||||
def partial_match_score(query: str, target: str) -> float:
|
||||
"""Calculate partial matching score for substring matches."""
|
||||
query_lower = query.lower()
|
||||
target_lower = target.lower()
|
||||
|
||||
# Exact match
|
||||
if query_lower == target_lower:
|
||||
return 1.0
|
||||
|
||||
# Starts with
|
||||
if target_lower.startswith(query_lower):
|
||||
return 0.8 + (len(query) / len(target)) * 0.15
|
||||
|
||||
# Contains
|
||||
if query_lower in target_lower:
|
||||
return 0.6 + (len(query) / len(target)) * 0.2
|
||||
|
||||
# Words match
|
||||
query_words = set(query_lower.split())
|
||||
target_words = set(target_lower.split())
|
||||
if query_words & target_words:
|
||||
intersection = len(query_words & target_words)
|
||||
union = len(query_words | target_words)
|
||||
return 0.4 + (intersection / union) * 0.3
|
||||
|
||||
return 0.0
|
||||
|
||||
|
||||
class EntityFuzzyMatcher:
|
||||
"""Main fuzzy matching service for entities."""
|
||||
|
||||
# Matching thresholds
|
||||
HIGH_CONFIDENCE_THRESHOLD = 0.8
|
||||
MEDIUM_CONFIDENCE_THRESHOLD = 0.6
|
||||
LOW_CONFIDENCE_THRESHOLD = 0.4
|
||||
|
||||
# Maximum results to consider
|
||||
MAX_CANDIDATES = 50
|
||||
MAX_RESULTS = 5
|
||||
|
||||
def __init__(self):
|
||||
self.algorithms = FuzzyMatchingAlgorithms()
|
||||
|
||||
def find_entity(
|
||||
self, query: str, entity_types: Optional[List[EntityType]] = None, user=None
|
||||
) -> Tuple[List[FuzzyMatchResult], Optional[EntitySuggestion]]:
|
||||
"""
|
||||
Find entities matching the query with fuzzy matching.
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
entity_types: Limit search to specific entity types
|
||||
user: Current user for authentication context
|
||||
|
||||
Returns:
|
||||
Tuple of (matches, suggestion_for_new_entity)
|
||||
"""
|
||||
if not query or len(query.strip()) < 2:
|
||||
return [], None
|
||||
|
||||
query = query.strip()
|
||||
entity_types = entity_types or [
|
||||
EntityType.PARK,
|
||||
EntityType.RIDE,
|
||||
EntityType.COMPANY,
|
||||
]
|
||||
|
||||
# Collect all potential matches
|
||||
candidates = []
|
||||
|
||||
for entity_type in entity_types:
|
||||
candidates.extend(self._get_candidates(query, entity_type))
|
||||
|
||||
# Score and rank candidates
|
||||
matches = self._score_and_rank_candidates(query, candidates)
|
||||
|
||||
# Generate suggestion if no good matches found
|
||||
suggestion = None
|
||||
if not matches or matches[0].score < self.LOW_CONFIDENCE_THRESHOLD:
|
||||
suggestion = self._generate_entity_suggestion(query, entity_types, user)
|
||||
|
||||
return matches[: self.MAX_RESULTS], suggestion
|
||||
|
||||
def _get_candidates(
|
||||
self, query: str, entity_type: EntityType
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get potential matching candidates for an entity type."""
|
||||
candidates = []
|
||||
|
||||
if entity_type == EntityType.PARK:
|
||||
parks = Park.objects.filter(
|
||||
Q(name__icontains=query)
|
||||
| Q(slug__icontains=query.lower().replace(" ", "-"))
|
||||
| Q(former_names__icontains=query)
|
||||
)[: self.MAX_CANDIDATES]
|
||||
|
||||
for park in parks:
|
||||
candidates.append(
|
||||
{
|
||||
"entity_type": EntityType.PARK,
|
||||
"entity": park,
|
||||
"name": park.name,
|
||||
"slug": park.slug,
|
||||
"search_names": [park.name],
|
||||
"url": getattr(park, "get_absolute_url", lambda: None)(),
|
||||
"priority_boost": 0.1, # Parks get priority
|
||||
}
|
||||
)
|
||||
|
||||
elif entity_type == EntityType.RIDE:
|
||||
rides = Ride.objects.select_related("park").filter(
|
||||
Q(name__icontains=query)
|
||||
| Q(slug__icontains=query.lower().replace(" ", "-"))
|
||||
| Q(former_names__icontains=query)
|
||||
| Q(park__name__icontains=query)
|
||||
)[: self.MAX_CANDIDATES]
|
||||
|
||||
for ride in rides:
|
||||
candidates.append(
|
||||
{
|
||||
"entity_type": EntityType.RIDE,
|
||||
"entity": ride,
|
||||
"name": ride.name,
|
||||
"slug": ride.slug,
|
||||
"search_names": [ride.name, f"{ride.park.name} {ride.name}"],
|
||||
"url": getattr(ride, "get_absolute_url", lambda: None)(),
|
||||
"priority_boost": 0.05, # Rides get some priority
|
||||
}
|
||||
)
|
||||
|
||||
elif entity_type == EntityType.COMPANY:
|
||||
companies = Company.objects.filter(
|
||||
Q(name__icontains=query)
|
||||
| Q(slug__icontains=query.lower().replace(" ", "-"))
|
||||
)[: self.MAX_CANDIDATES]
|
||||
|
||||
for company in companies:
|
||||
candidates.append(
|
||||
{
|
||||
"entity_type": EntityType.COMPANY,
|
||||
"entity": company,
|
||||
"name": company.name,
|
||||
"slug": company.slug,
|
||||
"search_names": [company.name],
|
||||
"url": getattr(company, "get_absolute_url", lambda: None)(),
|
||||
"priority_boost": 0.0, # Companies get no priority boost
|
||||
}
|
||||
)
|
||||
|
||||
return candidates
|
||||
|
||||
def _score_and_rank_candidates(
|
||||
self, query: str, candidates: List[Dict[str, Any]]
|
||||
) -> List[FuzzyMatchResult]:
|
||||
"""Score and rank all candidates using multiple algorithms."""
|
||||
scored_matches = []
|
||||
|
||||
for candidate in candidates:
|
||||
best_score = 0.0
|
||||
best_reason = ""
|
||||
|
||||
# Test against all search names for this candidate
|
||||
for search_name in candidate["search_names"]:
|
||||
# Algorithm 1: Sequence similarity
|
||||
similarity_score = self.algorithms.similarity_ratio(query, search_name)
|
||||
if similarity_score > best_score:
|
||||
best_score = similarity_score
|
||||
best_reason = f"Text similarity with '{search_name}'"
|
||||
|
||||
# Algorithm 2: Partial matching
|
||||
partial_score = self.algorithms.partial_match_score(query, search_name)
|
||||
if partial_score > best_score:
|
||||
best_score = partial_score
|
||||
best_reason = f"Partial match with '{search_name}'"
|
||||
|
||||
# Algorithm 3: Levenshtein distance
|
||||
if len(query) > 3 and len(search_name) > 3:
|
||||
max_len = max(len(query), len(search_name))
|
||||
distance = self.algorithms.levenshtein_distance(query, search_name)
|
||||
lev_score = 1.0 - (distance / max_len)
|
||||
if lev_score > best_score:
|
||||
best_score = lev_score
|
||||
best_reason = f"Similar spelling to '{search_name}'"
|
||||
|
||||
# Algorithm 4: Soundex phonetic matching
|
||||
if len(query) > 2 and len(search_name) > 2:
|
||||
query_soundex = self.algorithms.soundex(query)
|
||||
name_soundex = self.algorithms.soundex(search_name)
|
||||
if query_soundex == name_soundex and best_score < 0.7:
|
||||
best_score = max(best_score, 0.7)
|
||||
best_reason = f"Sounds like '{search_name}'"
|
||||
|
||||
# Apply priority boost
|
||||
best_score += candidate["priority_boost"]
|
||||
best_score = min(1.0, best_score) # Cap at 1.0
|
||||
|
||||
# Determine confidence level
|
||||
if best_score >= self.HIGH_CONFIDENCE_THRESHOLD:
|
||||
confidence = "high"
|
||||
elif best_score >= self.MEDIUM_CONFIDENCE_THRESHOLD:
|
||||
confidence = "medium"
|
||||
else:
|
||||
confidence = "low"
|
||||
|
||||
# Only include if above minimum threshold
|
||||
if best_score >= self.LOW_CONFIDENCE_THRESHOLD:
|
||||
match = FuzzyMatchResult(
|
||||
entity_type=candidate["entity_type"],
|
||||
entity=candidate["entity"],
|
||||
name=candidate["name"],
|
||||
slug=candidate["slug"],
|
||||
score=best_score,
|
||||
match_reason=best_reason,
|
||||
confidence=confidence,
|
||||
url=candidate["url"],
|
||||
)
|
||||
scored_matches.append(match)
|
||||
|
||||
# Sort by score (highest first) and return
|
||||
return sorted(scored_matches, key=lambda x: x.score, reverse=True)
|
||||
|
||||
def _generate_entity_suggestion(
|
||||
self, query: str, entity_types: List[EntityType], user
|
||||
) -> EntitySuggestion:
|
||||
"""Generate suggestion for creating new entity when no matches found."""
|
||||
|
||||
# Determine most likely entity type based on query characteristics
|
||||
suggested_type = EntityType.PARK # Default to park
|
||||
|
||||
# Simple heuristics for entity type detection
|
||||
query_lower = query.lower()
|
||||
if any(
|
||||
word in query_lower
|
||||
for word in ["roller coaster", "ride", "coaster", "attraction"]
|
||||
):
|
||||
suggested_type = EntityType.RIDE
|
||||
elif any(
|
||||
word in query_lower for word in ["inc", "corp", "company", "manufacturer"]
|
||||
):
|
||||
suggested_type = EntityType.COMPANY
|
||||
elif EntityType.PARK in entity_types:
|
||||
suggested_type = EntityType.PARK
|
||||
elif entity_types:
|
||||
suggested_type = entity_types[0]
|
||||
|
||||
# Clean up the suggested name
|
||||
suggested_name = " ".join(word.capitalize() for word in query.split())
|
||||
|
||||
# Check if user is authenticated
|
||||
is_authenticated = (
|
||||
user and hasattr(user, "is_authenticated") and user.is_authenticated
|
||||
)
|
||||
|
||||
# Generate appropriate prompts
|
||||
entity_name = suggested_type.value
|
||||
login_prompt = (
|
||||
f"Log in to suggest adding '{suggested_name}' as a new {entity_name}"
|
||||
)
|
||||
signup_prompt = (
|
||||
f"Sign up to contribute and add '{suggested_name}' to ThrillWiki"
|
||||
)
|
||||
creation_hint = (
|
||||
f"Help expand ThrillWiki by adding information about '{suggested_name}'"
|
||||
)
|
||||
|
||||
return EntitySuggestion(
|
||||
suggested_name=suggested_name,
|
||||
entity_type=suggested_type,
|
||||
requires_authentication=not is_authenticated,
|
||||
login_prompt=login_prompt,
|
||||
signup_prompt=signup_prompt,
|
||||
creation_hint=creation_hint,
|
||||
)
|
||||
|
||||
|
||||
# Global service instance
|
||||
entity_fuzzy_matcher = EntityFuzzyMatcher()
|
||||
594
backend/apps/core/services/trending_service.py
Normal file
594
backend/apps/core/services/trending_service.py
Normal file
@@ -0,0 +1,594 @@
|
||||
"""
|
||||
Trending Service for calculating and caching trending content.
|
||||
|
||||
This service implements the weighted trending algorithm that combines:
|
||||
- View growth rates
|
||||
- Content ratings
|
||||
- Recency factors
|
||||
- Popularity metrics
|
||||
|
||||
Results are cached in Redis for performance optimization.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any
|
||||
from django.utils import timezone
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.cache import cache
|
||||
from django.db.models import Q
|
||||
|
||||
from apps.core.analytics import PageView
|
||||
from apps.parks.models import Park
|
||||
from apps.rides.models import Ride
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TrendingService:
|
||||
"""
|
||||
Service for calculating trending content using weighted algorithm.
|
||||
|
||||
Algorithm Components:
|
||||
- View Growth Rate (40% weight): Recent view increase vs historical
|
||||
- Rating Score (30% weight): Average user rating normalized
|
||||
- Recency Factor (20% weight): How recently content was added/updated
|
||||
- Popularity Boost (10% weight): Total view count normalization
|
||||
"""
|
||||
|
||||
# Algorithm weights (must sum to 1.0)
|
||||
WEIGHT_VIEW_GROWTH = 0.4
|
||||
WEIGHT_RATING = 0.3
|
||||
WEIGHT_RECENCY = 0.2
|
||||
WEIGHT_POPULARITY = 0.1
|
||||
|
||||
# Cache configuration
|
||||
CACHE_PREFIX = "trending"
|
||||
CACHE_TTL = 86400 # 24 hours (daily refresh)
|
||||
|
||||
# Time windows for calculations
|
||||
CURRENT_PERIOD_HOURS = 168 # 7 days
|
||||
PREVIOUS_PERIOD_HOURS = 336 # 14 days (for previous 7-day window comparison)
|
||||
RECENCY_BASELINE_DAYS = 365
|
||||
|
||||
def __init__(self):
|
||||
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
|
||||
|
||||
def get_trending_content(
|
||||
self, content_type: str = "all", limit: int = 20, force_refresh: bool = False
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get trending content with caching.
|
||||
|
||||
Args:
|
||||
content_type: 'parks', 'rides', or 'all'
|
||||
limit: Maximum number of results
|
||||
force_refresh: Skip cache and recalculate
|
||||
|
||||
Returns:
|
||||
List of trending content with exact frontend format
|
||||
"""
|
||||
cache_key = f"{self.CACHE_PREFIX}:trending:{content_type}:{limit}"
|
||||
|
||||
if not force_refresh:
|
||||
cached_result = cache.get(cache_key)
|
||||
if cached_result is not None:
|
||||
self.logger.debug(
|
||||
f"Returning cached trending results for {content_type}"
|
||||
)
|
||||
return cached_result
|
||||
|
||||
self.logger.info(f"Calculating trending content for {content_type}")
|
||||
|
||||
try:
|
||||
# Calculate trending scores for each content type
|
||||
trending_items = []
|
||||
|
||||
if content_type in ["all", "parks"]:
|
||||
park_items = self._calculate_trending_parks(
|
||||
limit if content_type == "parks" else limit * 2
|
||||
)
|
||||
trending_items.extend(park_items)
|
||||
|
||||
if content_type in ["all", "rides"]:
|
||||
ride_items = self._calculate_trending_rides(
|
||||
limit if content_type == "rides" else limit * 2
|
||||
)
|
||||
trending_items.extend(ride_items)
|
||||
|
||||
# Sort by trending score and apply limit
|
||||
trending_items.sort(key=lambda x: x.get("trending_score", 0), reverse=True)
|
||||
trending_items = trending_items[:limit]
|
||||
|
||||
# Add ranking and format for frontend
|
||||
formatted_results = self._format_trending_results(trending_items)
|
||||
|
||||
# Cache results
|
||||
cache.set(cache_key, formatted_results, self.CACHE_TTL)
|
||||
|
||||
self.logger.info(
|
||||
f"Calculated {len(formatted_results)} trending items for {content_type}"
|
||||
)
|
||||
return formatted_results
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error calculating trending content: {e}", exc_info=True)
|
||||
return []
|
||||
|
||||
def get_new_content(
|
||||
self,
|
||||
content_type: str = "all",
|
||||
limit: int = 20,
|
||||
days_back: int = 30,
|
||||
force_refresh: bool = False,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get recently added content.
|
||||
|
||||
Args:
|
||||
content_type: 'parks', 'rides', or 'all'
|
||||
limit: Maximum number of results
|
||||
days_back: How many days to look back
|
||||
force_refresh: Skip cache and recalculate
|
||||
|
||||
Returns:
|
||||
List of new content with exact frontend format
|
||||
"""
|
||||
cache_key = f"{self.CACHE_PREFIX}:new:{content_type}:{limit}:{days_back}"
|
||||
|
||||
if not force_refresh:
|
||||
cached_result = cache.get(cache_key)
|
||||
if cached_result is not None:
|
||||
self.logger.debug(
|
||||
f"Returning cached new content results for {content_type}"
|
||||
)
|
||||
return cached_result
|
||||
|
||||
self.logger.info(f"Calculating new content for {content_type}")
|
||||
|
||||
try:
|
||||
cutoff_date = timezone.now() - timedelta(days=days_back)
|
||||
new_items = []
|
||||
|
||||
if content_type in ["all", "parks"]:
|
||||
parks = self._get_new_parks(
|
||||
cutoff_date, limit if content_type == "parks" else limit * 2
|
||||
)
|
||||
new_items.extend(parks)
|
||||
|
||||
if content_type in ["all", "rides"]:
|
||||
rides = self._get_new_rides(
|
||||
cutoff_date, limit if content_type == "rides" else limit * 2
|
||||
)
|
||||
new_items.extend(rides)
|
||||
|
||||
# Sort by date added (most recent first) and apply limit
|
||||
new_items.sort(key=lambda x: x.get("date_added", ""), reverse=True)
|
||||
new_items = new_items[:limit]
|
||||
|
||||
# Format for frontend
|
||||
formatted_results = self._format_new_content_results(new_items)
|
||||
|
||||
# Cache results
|
||||
cache.set(cache_key, formatted_results, self.CACHE_TTL)
|
||||
|
||||
self.logger.info(
|
||||
f"Found {len(formatted_results)} new items for {content_type}"
|
||||
)
|
||||
return formatted_results
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting new content: {e}", exc_info=True)
|
||||
return []
|
||||
|
||||
def _calculate_trending_parks(self, limit: int) -> List[Dict[str, Any]]:
|
||||
"""Calculate trending scores for parks."""
|
||||
parks = Park.objects.filter(status="OPERATING").select_related(
|
||||
"location", "operator"
|
||||
)
|
||||
|
||||
trending_parks = []
|
||||
|
||||
for park in parks:
|
||||
try:
|
||||
score = self._calculate_content_score(park, "park")
|
||||
if score > 0: # Only include items with positive trending scores
|
||||
trending_parks.append(
|
||||
{
|
||||
"content_object": park,
|
||||
"content_type": "park",
|
||||
"trending_score": score,
|
||||
"id": park.id,
|
||||
"name": park.name,
|
||||
"slug": park.slug,
|
||||
"location": (
|
||||
park.formatted_location
|
||||
if hasattr(park, "location")
|
||||
else ""
|
||||
),
|
||||
"category": "park",
|
||||
"rating": (
|
||||
float(park.average_rating)
|
||||
if park.average_rating
|
||||
else 0.0
|
||||
),
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating score for park {park.id}: {e}")
|
||||
|
||||
return trending_parks
|
||||
|
||||
def _calculate_trending_rides(self, limit: int) -> List[Dict[str, Any]]:
|
||||
"""Calculate trending scores for rides."""
|
||||
rides = Ride.objects.filter(status="OPERATING").select_related(
|
||||
"park", "park__location"
|
||||
)
|
||||
|
||||
trending_rides = []
|
||||
|
||||
for ride in rides:
|
||||
try:
|
||||
score = self._calculate_content_score(ride, "ride")
|
||||
if score > 0: # Only include items with positive trending scores
|
||||
# Get location from park (rides don't have direct location field)
|
||||
location = ""
|
||||
if (
|
||||
ride.park
|
||||
and hasattr(ride.park, "location")
|
||||
and ride.park.location
|
||||
):
|
||||
location = ride.park.formatted_location
|
||||
|
||||
trending_rides.append(
|
||||
{
|
||||
"content_object": ride,
|
||||
"content_type": "ride",
|
||||
"trending_score": score,
|
||||
"id": ride.pk, # Use pk instead of id
|
||||
"name": ride.name,
|
||||
"slug": ride.slug,
|
||||
"location": location,
|
||||
"category": "ride",
|
||||
"rating": (
|
||||
float(ride.average_rating)
|
||||
if ride.average_rating
|
||||
else 0.0
|
||||
),
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating score for ride {ride.pk}: {e}")
|
||||
|
||||
return trending_rides
|
||||
|
||||
def _calculate_content_score(self, content_obj: Any, content_type: str) -> float:
|
||||
"""
|
||||
Calculate weighted trending score for content object.
|
||||
|
||||
Returns:
|
||||
Float between 0.0 and 1.0 representing trending strength
|
||||
"""
|
||||
try:
|
||||
# Get content type for PageView queries
|
||||
ct = ContentType.objects.get_for_model(content_obj)
|
||||
|
||||
# 1. View Growth Score (40% weight)
|
||||
view_growth_score = self._calculate_view_growth_score(ct, content_obj.id)
|
||||
|
||||
# 2. Rating Score (30% weight)
|
||||
rating_score = self._calculate_rating_score(content_obj)
|
||||
|
||||
# 3. Recency Score (20% weight)
|
||||
recency_score = self._calculate_recency_score(content_obj)
|
||||
|
||||
# 4. Popularity Score (10% weight)
|
||||
popularity_score = self._calculate_popularity_score(ct, content_obj.id)
|
||||
|
||||
# Calculate weighted final score
|
||||
final_score = (
|
||||
view_growth_score * self.WEIGHT_VIEW_GROWTH
|
||||
+ rating_score * self.WEIGHT_RATING
|
||||
+ recency_score * self.WEIGHT_RECENCY
|
||||
+ popularity_score * self.WEIGHT_POPULARITY
|
||||
)
|
||||
|
||||
self.logger.debug(
|
||||
f"{content_type} {content_obj.id}: "
|
||||
f"growth={view_growth_score:.3f}, rating={rating_score:.3f}, "
|
||||
f"recency={recency_score:.3f}, popularity={popularity_score:.3f}, "
|
||||
f"final={final_score:.3f}"
|
||||
)
|
||||
|
||||
return final_score
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
f"Error calculating score for {content_type} {content_obj.id}: {e}"
|
||||
)
|
||||
return 0.0
|
||||
|
||||
def _calculate_view_growth_score(
|
||||
self, content_type: ContentType, object_id: int
|
||||
) -> float:
|
||||
"""Calculate normalized view growth score."""
|
||||
try:
|
||||
current_views, previous_views, growth_percentage = (
|
||||
PageView.get_views_growth(
|
||||
content_type,
|
||||
object_id,
|
||||
self.CURRENT_PERIOD_HOURS,
|
||||
self.PREVIOUS_PERIOD_HOURS,
|
||||
)
|
||||
)
|
||||
|
||||
if previous_views == 0:
|
||||
# New content with views gets boost
|
||||
return min(current_views / 100.0, 1.0) if current_views > 0 else 0.0
|
||||
|
||||
# Normalize growth percentage to 0-1 scale
|
||||
# 100% growth = 0.5, 500% growth = 1.0
|
||||
normalized_growth = (
|
||||
min(growth_percentage / 500.0, 1.0) if growth_percentage > 0 else 0.0
|
||||
)
|
||||
return max(normalized_growth, 0.0)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating view growth: {e}")
|
||||
return 0.0
|
||||
|
||||
def _calculate_rating_score(self, content_obj: Any) -> float:
|
||||
"""Calculate normalized rating score."""
|
||||
try:
|
||||
rating = getattr(content_obj, "average_rating", None)
|
||||
if rating is None or rating == 0:
|
||||
return 0.3 # Neutral score for unrated content
|
||||
|
||||
# Normalize rating from 1-10 scale to 0-1 scale
|
||||
# Rating of 5 = 0.4, Rating of 8 = 0.7, Rating of 10 = 1.0
|
||||
return min(max((float(rating) - 1) / 9.0, 0.0), 1.0)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating rating score: {e}")
|
||||
return 0.3
|
||||
|
||||
def _calculate_recency_score(self, content_obj: Any) -> float:
|
||||
"""Calculate recency score based on when content was added/updated."""
|
||||
try:
|
||||
# Use opening_date for parks/rides, or created_at as fallback
|
||||
date_added = getattr(content_obj, "opening_date", None)
|
||||
if not date_added:
|
||||
date_added = getattr(content_obj, "created_at", None)
|
||||
if not date_added:
|
||||
return 0.5 # Neutral score for unknown dates
|
||||
|
||||
# Handle both date and datetime objects
|
||||
if hasattr(date_added, "date"):
|
||||
date_added = date_added.date()
|
||||
|
||||
# Calculate days since added
|
||||
today = timezone.now().date()
|
||||
days_since_added = (today - date_added).days
|
||||
|
||||
# Recency score: newer content gets higher scores
|
||||
# 0 days = 1.0, 30 days = 0.8, 365 days = 0.1, >365 days = 0.0
|
||||
if days_since_added <= 0:
|
||||
return 1.0
|
||||
elif days_since_added <= 30:
|
||||
return 1.0 - (days_since_added / 30.0) * 0.2 # 1.0 to 0.8
|
||||
elif days_since_added <= self.RECENCY_BASELINE_DAYS:
|
||||
return (
|
||||
0.8
|
||||
- ((days_since_added - 30) / (self.RECENCY_BASELINE_DAYS - 30))
|
||||
* 0.7
|
||||
) # 0.8 to 0.1
|
||||
else:
|
||||
return 0.0
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating recency score: {e}")
|
||||
return 0.5
|
||||
|
||||
def _calculate_popularity_score(
|
||||
self, content_type: ContentType, object_id: int
|
||||
) -> float:
|
||||
"""Calculate popularity score based on total view count."""
|
||||
try:
|
||||
total_views = PageView.get_total_views_count(
|
||||
content_type, object_id, hours=168 # Last 7 days
|
||||
)
|
||||
|
||||
# Normalize views to 0-1 scale
|
||||
# 0 views = 0.0, 100 views = 0.5, 1000+ views = 1.0
|
||||
if total_views == 0:
|
||||
return 0.0
|
||||
elif total_views <= 100:
|
||||
return total_views / 200.0 # 0.0 to 0.5
|
||||
else:
|
||||
return min(0.5 + (total_views - 100) / 1800.0, 1.0) # 0.5 to 1.0
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error calculating popularity score: {e}")
|
||||
return 0.0
|
||||
|
||||
def _get_new_parks(self, cutoff_date: datetime, limit: int) -> List[Dict[str, Any]]:
|
||||
"""Get recently added parks."""
|
||||
new_parks = (
|
||||
Park.objects.filter(
|
||||
Q(created_at__gte=cutoff_date)
|
||||
| Q(opening_date__gte=cutoff_date.date()),
|
||||
status="OPERATING",
|
||||
)
|
||||
.select_related("location", "operator")
|
||||
.order_by("-created_at", "-opening_date")[:limit]
|
||||
)
|
||||
|
||||
results = []
|
||||
for park in new_parks:
|
||||
date_added = park.opening_date or park.created_at
|
||||
# Handle datetime to date conversion
|
||||
if date_added:
|
||||
# If it's a datetime, convert to date
|
||||
if isinstance(date_added, datetime):
|
||||
date_added = date_added.date()
|
||||
# If it's already a date, keep it as is
|
||||
|
||||
results.append(
|
||||
{
|
||||
"content_object": park,
|
||||
"content_type": "park",
|
||||
"id": park.pk, # Use pk instead of id for Django compatibility
|
||||
"name": park.name,
|
||||
"slug": park.slug,
|
||||
"location": (
|
||||
park.formatted_location if hasattr(park, "location") else ""
|
||||
),
|
||||
"category": "park",
|
||||
"date_added": date_added.isoformat() if date_added else "",
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def _get_new_rides(self, cutoff_date: datetime, limit: int) -> List[Dict[str, Any]]:
|
||||
"""Get recently added rides."""
|
||||
new_rides = (
|
||||
Ride.objects.filter(
|
||||
Q(created_at__gte=cutoff_date)
|
||||
| Q(opening_date__gte=cutoff_date.date()),
|
||||
status="OPERATING",
|
||||
)
|
||||
.select_related("park", "park__location")
|
||||
.order_by("-created_at", "-opening_date")[:limit]
|
||||
)
|
||||
|
||||
results = []
|
||||
for ride in new_rides:
|
||||
date_added = getattr(ride, "opening_date", None) or getattr(
|
||||
ride, "created_at", None
|
||||
)
|
||||
# Handle datetime to date conversion
|
||||
if date_added:
|
||||
# If it's a datetime, convert to date
|
||||
if isinstance(date_added, datetime):
|
||||
date_added = date_added.date()
|
||||
# If it's already a date, keep it as is
|
||||
|
||||
# Get location from park (rides don't have direct location field)
|
||||
location = ""
|
||||
if ride.park and hasattr(ride.park, "location") and ride.park.location:
|
||||
location = ride.park.formatted_location
|
||||
|
||||
results.append(
|
||||
{
|
||||
"content_object": ride,
|
||||
"content_type": "ride",
|
||||
"id": ride.pk, # Use pk instead of id for Django compatibility
|
||||
"name": ride.name,
|
||||
"slug": ride.slug,
|
||||
"location": location,
|
||||
"category": "ride",
|
||||
"date_added": date_added.isoformat() if date_added else "",
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def _format_trending_results(
|
||||
self, trending_items: List[Dict[str, Any]]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Format trending results for frontend consumption."""
|
||||
formatted_results = []
|
||||
|
||||
for rank, item in enumerate(trending_items, 1):
|
||||
try:
|
||||
# Get view change for display
|
||||
content_obj = item["content_object"]
|
||||
ct = ContentType.objects.get_for_model(content_obj)
|
||||
current_views, previous_views, growth_percentage = (
|
||||
PageView.get_views_growth(
|
||||
ct,
|
||||
content_obj.id,
|
||||
self.CURRENT_PERIOD_HOURS,
|
||||
self.PREVIOUS_PERIOD_HOURS,
|
||||
)
|
||||
)
|
||||
|
||||
# Format exactly as frontend expects
|
||||
formatted_item = {
|
||||
"id": item["id"],
|
||||
"name": item["name"],
|
||||
"location": item["location"],
|
||||
"category": item["category"],
|
||||
"rating": item["rating"],
|
||||
"rank": rank,
|
||||
"views": current_views,
|
||||
"views_change": (
|
||||
f"+{growth_percentage:.1f}%"
|
||||
if growth_percentage > 0
|
||||
else f"{growth_percentage:.1f}%"
|
||||
),
|
||||
"slug": item["slug"],
|
||||
}
|
||||
|
||||
formatted_results.append(formatted_item)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error formatting trending item: {e}")
|
||||
|
||||
return formatted_results
|
||||
|
||||
def _format_new_content_results(
|
||||
self, new_items: List[Dict[str, Any]]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Format new content results for frontend consumption."""
|
||||
formatted_results = []
|
||||
|
||||
for item in new_items:
|
||||
try:
|
||||
# Format exactly as frontend expects
|
||||
formatted_item = {
|
||||
"id": item["id"],
|
||||
"name": item["name"],
|
||||
"location": item["location"],
|
||||
"category": item["category"],
|
||||
"date_added": item["date_added"],
|
||||
"slug": item["slug"],
|
||||
}
|
||||
|
||||
formatted_results.append(formatted_item)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error formatting new content item: {e}")
|
||||
|
||||
return formatted_results
|
||||
|
||||
def clear_cache(self, content_type: str = "all") -> None:
|
||||
"""Clear trending and new content caches."""
|
||||
try:
|
||||
cache_patterns = [
|
||||
f"{self.CACHE_PREFIX}:trending:{content_type}:*",
|
||||
f"{self.CACHE_PREFIX}:new:{content_type}:*",
|
||||
]
|
||||
|
||||
if content_type == "all":
|
||||
cache_patterns.extend(
|
||||
[
|
||||
f"{self.CACHE_PREFIX}:trending:parks:*",
|
||||
f"{self.CACHE_PREFIX}:trending:rides:*",
|
||||
f"{self.CACHE_PREFIX}:new:parks:*",
|
||||
f"{self.CACHE_PREFIX}:new:rides:*",
|
||||
]
|
||||
)
|
||||
|
||||
# Note: This is a simplified cache clear
|
||||
# In production, you might want to use cache.delete_many() or similar
|
||||
cache.clear()
|
||||
self.logger.info(f"Cleared trending caches for {content_type}")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error clearing cache: {e}")
|
||||
|
||||
|
||||
# Singleton service instance
|
||||
trending_service = TrendingService()
|
||||
24
backend/apps/core/urls.py
Normal file
24
backend/apps/core/urls.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""
|
||||
Core app URL configuration.
|
||||
"""
|
||||
|
||||
from django.urls import path, include
|
||||
from .views.entity_search import (
|
||||
EntityFuzzySearchView,
|
||||
EntityNotFoundView,
|
||||
QuickEntitySuggestionView,
|
||||
)
|
||||
|
||||
app_name = 'core'
|
||||
|
||||
# Entity search endpoints
|
||||
entity_patterns = [
|
||||
path('search/', EntityFuzzySearchView.as_view(), name='entity_fuzzy_search'),
|
||||
path('not-found/', EntityNotFoundView.as_view(), name='entity_not_found'),
|
||||
path('suggestions/', QuickEntitySuggestionView.as_view(), name='entity_suggestions'),
|
||||
]
|
||||
|
||||
urlpatterns = [
|
||||
# Entity fuzzy matching and search endpoints
|
||||
path('entities/', include(entity_patterns)),
|
||||
]
|
||||
1
backend/apps/core/urls/__init__.py
Normal file
1
backend/apps/core/urls/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# URLs package for core app
|
||||
347
backend/apps/core/views/entity_search.py
Normal file
347
backend/apps/core/views/entity_search.py
Normal file
@@ -0,0 +1,347 @@
|
||||
"""
|
||||
Entity search views with fuzzy matching and authentication prompts.
|
||||
"""
|
||||
|
||||
from rest_framework.views import APIView
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
from rest_framework.permissions import AllowAny
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from django.utils.decorators import method_decorator
|
||||
from typing import Optional, List
|
||||
|
||||
from ..services.entity_fuzzy_matching import (
|
||||
entity_fuzzy_matcher,
|
||||
EntityType,
|
||||
)
|
||||
|
||||
|
||||
class EntityFuzzySearchView(APIView):
|
||||
"""
|
||||
API endpoint for fuzzy entity search with authentication prompts.
|
||||
|
||||
Handles entity lookup failures by providing intelligent suggestions and
|
||||
authentication prompts for entity creation.
|
||||
"""
|
||||
|
||||
permission_classes = [AllowAny] # Allow both authenticated and anonymous users
|
||||
|
||||
def post(self, request):
|
||||
"""
|
||||
Perform fuzzy entity search.
|
||||
|
||||
Request body:
|
||||
{
|
||||
"query": "entity name to search",
|
||||
"entity_types": ["park", "ride", "company"], // optional
|
||||
"include_suggestions": true // optional, default true
|
||||
}
|
||||
|
||||
Response:
|
||||
{
|
||||
"success": true,
|
||||
"query": "original query",
|
||||
"matches": [
|
||||
{
|
||||
"entity_type": "park",
|
||||
"name": "Cedar Point",
|
||||
"slug": "cedar-point",
|
||||
"score": 0.95,
|
||||
"confidence": "high",
|
||||
"match_reason": "Text similarity with 'Cedar Point'",
|
||||
"url": "/parks/cedar-point/",
|
||||
"entity_id": 123
|
||||
}
|
||||
],
|
||||
"suggestion": {
|
||||
"suggested_name": "New Entity Name",
|
||||
"entity_type": "park",
|
||||
"requires_authentication": true,
|
||||
"login_prompt": "Log in to suggest adding...",
|
||||
"signup_prompt": "Sign up to contribute...",
|
||||
"creation_hint": "Help expand ThrillWiki..."
|
||||
},
|
||||
"user_authenticated": false
|
||||
}
|
||||
"""
|
||||
try:
|
||||
# Parse request data
|
||||
query = request.data.get("query", "").strip()
|
||||
entity_types_raw = request.data.get(
|
||||
"entity_types", ["park", "ride", "company"]
|
||||
)
|
||||
include_suggestions = request.data.get("include_suggestions", True)
|
||||
|
||||
# Validate query
|
||||
if not query or len(query) < 2:
|
||||
return Response(
|
||||
{
|
||||
"success": False,
|
||||
"error": "Query must be at least 2 characters long",
|
||||
"code": "INVALID_QUERY",
|
||||
},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
# Parse and validate entity types
|
||||
entity_types = []
|
||||
valid_types = {"park", "ride", "company"}
|
||||
|
||||
for entity_type in entity_types_raw:
|
||||
if entity_type in valid_types:
|
||||
entity_types.append(EntityType(entity_type))
|
||||
|
||||
if not entity_types:
|
||||
entity_types = [EntityType.PARK, EntityType.RIDE, EntityType.COMPANY]
|
||||
|
||||
# Perform fuzzy matching
|
||||
matches, suggestion = entity_fuzzy_matcher.find_entity(
|
||||
query=query, entity_types=entity_types, user=request.user
|
||||
)
|
||||
|
||||
# Format response
|
||||
response_data = {
|
||||
"success": True,
|
||||
"query": query,
|
||||
"matches": [match.to_dict() for match in matches],
|
||||
"user_authenticated": (
|
||||
request.user.is_authenticated
|
||||
if hasattr(request.user, "is_authenticated")
|
||||
else False
|
||||
),
|
||||
}
|
||||
|
||||
# Include suggestion if requested and available
|
||||
if include_suggestions and suggestion:
|
||||
response_data["suggestion"] = {
|
||||
"suggested_name": suggestion.suggested_name,
|
||||
"entity_type": suggestion.entity_type.value,
|
||||
"requires_authentication": suggestion.requires_authentication,
|
||||
"login_prompt": suggestion.login_prompt,
|
||||
"signup_prompt": suggestion.signup_prompt,
|
||||
"creation_hint": suggestion.creation_hint,
|
||||
}
|
||||
|
||||
return Response(response_data, status=status.HTTP_200_OK)
|
||||
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{
|
||||
"success": False,
|
||||
"error": f"Internal server error: {str(e)}",
|
||||
"code": "INTERNAL_ERROR",
|
||||
},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
class EntityNotFoundView(APIView):
|
||||
"""
|
||||
Endpoint specifically for handling entity not found scenarios.
|
||||
|
||||
This view is called when normal entity lookup fails and provides
|
||||
fuzzy matching suggestions along with authentication prompts.
|
||||
"""
|
||||
|
||||
permission_classes = [AllowAny]
|
||||
|
||||
def post(self, request):
|
||||
"""
|
||||
Handle entity not found with suggestions.
|
||||
|
||||
Request body:
|
||||
{
|
||||
"original_query": "what user searched for",
|
||||
"attempted_slug": "slug-that-failed", // optional
|
||||
"entity_type": "park", // optional, inferred from context
|
||||
"context": { // optional context information
|
||||
"park_slug": "park-slug-if-searching-for-ride",
|
||||
"source_page": "page where search originated"
|
||||
}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
original_query = request.data.get("original_query", "").strip()
|
||||
attempted_slug = request.data.get("attempted_slug", "")
|
||||
entity_type_hint = request.data.get("entity_type")
|
||||
context = request.data.get("context", {})
|
||||
|
||||
if not original_query:
|
||||
return Response(
|
||||
{
|
||||
"success": False,
|
||||
"error": "original_query is required",
|
||||
"code": "MISSING_QUERY",
|
||||
},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
# Determine entity types to search based on context
|
||||
entity_types = []
|
||||
if entity_type_hint:
|
||||
try:
|
||||
entity_types = [EntityType(entity_type_hint)]
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# If we have park context, prioritize ride searches
|
||||
if context.get("park_slug") and not entity_types:
|
||||
entity_types = [EntityType.RIDE, EntityType.PARK]
|
||||
|
||||
# Default to all types if not specified
|
||||
if not entity_types:
|
||||
entity_types = [EntityType.PARK, EntityType.RIDE, EntityType.COMPANY]
|
||||
|
||||
# Try fuzzy matching on the original query
|
||||
matches, suggestion = entity_fuzzy_matcher.find_entity(
|
||||
query=original_query, entity_types=entity_types, user=request.user
|
||||
)
|
||||
|
||||
# If no matches on original query, try the attempted slug
|
||||
if not matches and attempted_slug:
|
||||
# Convert slug back to readable name for fuzzy matching
|
||||
slug_as_name = attempted_slug.replace("-", " ").title()
|
||||
matches, suggestion = entity_fuzzy_matcher.find_entity(
|
||||
query=slug_as_name, entity_types=entity_types, user=request.user
|
||||
)
|
||||
|
||||
# Prepare response with detailed context
|
||||
response_data = {
|
||||
"success": True,
|
||||
"original_query": original_query,
|
||||
"attempted_slug": attempted_slug,
|
||||
"context": context,
|
||||
"matches": [match.to_dict() for match in matches],
|
||||
"user_authenticated": (
|
||||
request.user.is_authenticated
|
||||
if hasattr(request.user, "is_authenticated")
|
||||
else False
|
||||
),
|
||||
"has_matches": len(matches) > 0,
|
||||
}
|
||||
|
||||
# Always include suggestion for entity not found scenarios
|
||||
if suggestion:
|
||||
response_data["suggestion"] = {
|
||||
"suggested_name": suggestion.suggested_name,
|
||||
"entity_type": suggestion.entity_type.value,
|
||||
"requires_authentication": suggestion.requires_authentication,
|
||||
"login_prompt": suggestion.login_prompt,
|
||||
"signup_prompt": suggestion.signup_prompt,
|
||||
"creation_hint": suggestion.creation_hint,
|
||||
}
|
||||
|
||||
return Response(response_data, status=status.HTTP_200_OK)
|
||||
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{
|
||||
"success": False,
|
||||
"error": f"Internal server error: {str(e)}",
|
||||
"code": "INTERNAL_ERROR",
|
||||
},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@method_decorator(csrf_exempt, name="dispatch")
|
||||
class QuickEntitySuggestionView(APIView):
|
||||
"""
|
||||
Lightweight endpoint for quick entity suggestions (e.g., autocomplete).
|
||||
"""
|
||||
|
||||
permission_classes = [AllowAny]
|
||||
|
||||
def get(self, request):
|
||||
"""
|
||||
Get quick entity suggestions.
|
||||
|
||||
Query parameters:
|
||||
- q: query string
|
||||
- types: comma-separated entity types (park,ride,company)
|
||||
- limit: max results (default 5)
|
||||
"""
|
||||
try:
|
||||
query = request.GET.get("q", "").strip()
|
||||
types_param = request.GET.get("types", "park,ride,company")
|
||||
limit = min(int(request.GET.get("limit", 5)), 10) # Cap at 10
|
||||
|
||||
if not query or len(query) < 2:
|
||||
return Response(
|
||||
{"suggestions": [], "query": query}, status=status.HTTP_200_OK
|
||||
)
|
||||
|
||||
# Parse entity types
|
||||
entity_types = []
|
||||
for type_str in types_param.split(","):
|
||||
type_str = type_str.strip()
|
||||
if type_str in ["park", "ride", "company"]:
|
||||
entity_types.append(EntityType(type_str))
|
||||
|
||||
if not entity_types:
|
||||
entity_types = [EntityType.PARK, EntityType.RIDE, EntityType.COMPANY]
|
||||
|
||||
# Get fuzzy matches
|
||||
matches, _ = entity_fuzzy_matcher.find_entity(
|
||||
query=query, entity_types=entity_types, user=request.user
|
||||
)
|
||||
|
||||
# Format as simple suggestions
|
||||
suggestions = []
|
||||
for match in matches[:limit]:
|
||||
suggestions.append(
|
||||
{
|
||||
"name": match.name,
|
||||
"type": match.entity_type.value,
|
||||
"slug": match.slug,
|
||||
"url": match.url,
|
||||
"score": match.score,
|
||||
"confidence": match.confidence,
|
||||
}
|
||||
)
|
||||
|
||||
return Response(
|
||||
{"suggestions": suggestions, "query": query, "count": len(suggestions)},
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"suggestions": [], "query": request.GET.get("q", ""), "error": str(e)},
|
||||
status=status.HTTP_200_OK,
|
||||
) # Return 200 even on errors for autocomplete
|
||||
|
||||
|
||||
# Utility function for other views to use
|
||||
def get_entity_suggestions(
|
||||
query: str, entity_types: Optional[List[str]] = None, user=None
|
||||
):
|
||||
"""
|
||||
Utility function for other Django views to get entity suggestions.
|
||||
|
||||
Args:
|
||||
query: Search query
|
||||
entity_types: List of entity type strings
|
||||
user: Django user object
|
||||
|
||||
Returns:
|
||||
Tuple of (matches, suggestion)
|
||||
"""
|
||||
try:
|
||||
# Convert string types to EntityType enums
|
||||
parsed_types = []
|
||||
if entity_types:
|
||||
for entity_type in entity_types:
|
||||
try:
|
||||
parsed_types.append(EntityType(entity_type))
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if not parsed_types:
|
||||
parsed_types = [EntityType.PARK, EntityType.RIDE, EntityType.COMPANY]
|
||||
|
||||
return entity_fuzzy_matcher.find_entity(
|
||||
query=query, entity_types=parsed_types, user=user
|
||||
)
|
||||
except Exception:
|
||||
return [], None
|
||||
@@ -317,55 +317,55 @@ class PgHistoryEventsAdmin(admin.ModelAdmin):
|
||||
"""Admin interface for pghistory Events"""
|
||||
|
||||
list_display = (
|
||||
'pgh_id',
|
||||
'pgh_created_at',
|
||||
'pgh_label',
|
||||
'pgh_model',
|
||||
'pgh_obj_id',
|
||||
'pgh_context_display',
|
||||
"pgh_id",
|
||||
"pgh_created_at",
|
||||
"pgh_label",
|
||||
"pgh_model",
|
||||
"pgh_obj_id",
|
||||
"pgh_context_display",
|
||||
)
|
||||
list_filter = (
|
||||
'pgh_label',
|
||||
'pgh_model',
|
||||
'pgh_created_at',
|
||||
"pgh_label",
|
||||
"pgh_model",
|
||||
"pgh_created_at",
|
||||
)
|
||||
search_fields = (
|
||||
'pgh_obj_id',
|
||||
'pgh_context',
|
||||
"pgh_obj_id",
|
||||
"pgh_context",
|
||||
)
|
||||
readonly_fields = (
|
||||
'pgh_id',
|
||||
'pgh_created_at',
|
||||
'pgh_label',
|
||||
'pgh_model',
|
||||
'pgh_obj_id',
|
||||
'pgh_context',
|
||||
'pgh_data',
|
||||
"pgh_id",
|
||||
"pgh_created_at",
|
||||
"pgh_label",
|
||||
"pgh_model",
|
||||
"pgh_obj_id",
|
||||
"pgh_context",
|
||||
"pgh_data",
|
||||
)
|
||||
date_hierarchy = 'pgh_created_at'
|
||||
ordering = ('-pgh_created_at',)
|
||||
date_hierarchy = "pgh_created_at"
|
||||
ordering = ("-pgh_created_at",)
|
||||
|
||||
fieldsets = (
|
||||
(
|
||||
'Event Information',
|
||||
"Event Information",
|
||||
{
|
||||
'fields': (
|
||||
'pgh_id',
|
||||
'pgh_created_at',
|
||||
'pgh_label',
|
||||
'pgh_model',
|
||||
'pgh_obj_id',
|
||||
"fields": (
|
||||
"pgh_id",
|
||||
"pgh_created_at",
|
||||
"pgh_label",
|
||||
"pgh_model",
|
||||
"pgh_obj_id",
|
||||
)
|
||||
},
|
||||
),
|
||||
(
|
||||
'Context & Data',
|
||||
"Context & Data",
|
||||
{
|
||||
'fields': (
|
||||
'pgh_context',
|
||||
'pgh_data',
|
||||
"fields": (
|
||||
"pgh_context",
|
||||
"pgh_data",
|
||||
),
|
||||
'classes': ('collapse',),
|
||||
"classes": ("collapse",),
|
||||
},
|
||||
),
|
||||
)
|
||||
@@ -392,7 +392,7 @@ class PgHistoryEventsAdmin(admin.ModelAdmin):
|
||||
|
||||
def has_delete_permission(self, request, obj=None):
|
||||
"""Prevent deletion of history events"""
|
||||
return getattr(request.user, 'is_superuser', False)
|
||||
return getattr(request.user, "is_superuser", False)
|
||||
|
||||
|
||||
# Register the models with their admin classes
|
||||
|
||||
@@ -5,6 +5,7 @@ from .models.company import Company
|
||||
from .models.rides import Ride, RideModel, RollerCoasterStats
|
||||
from .models.location import RideLocation
|
||||
from .models.reviews import RideReview
|
||||
from .models.rankings import RideRanking, RidePairComparison, RankingSnapshot
|
||||
|
||||
|
||||
class ManufacturerAdmin(admin.ModelAdmin):
|
||||
@@ -484,4 +485,222 @@ class CompanyAdmin(admin.ModelAdmin):
|
||||
return ", ".join(obj.roles) if obj.roles else "No roles"
|
||||
|
||||
|
||||
@admin.register(RideRanking)
|
||||
class RideRankingAdmin(admin.ModelAdmin):
|
||||
"""Admin interface for ride rankings"""
|
||||
|
||||
list_display = (
|
||||
"rank",
|
||||
"ride_name",
|
||||
"park_name",
|
||||
"winning_percentage_display",
|
||||
"wins",
|
||||
"losses",
|
||||
"ties",
|
||||
"average_rating",
|
||||
"mutual_riders_count",
|
||||
"last_calculated",
|
||||
)
|
||||
list_filter = (
|
||||
"ride__category",
|
||||
"last_calculated",
|
||||
"calculation_version",
|
||||
)
|
||||
search_fields = (
|
||||
"ride__name",
|
||||
"ride__park__name",
|
||||
)
|
||||
readonly_fields = (
|
||||
"ride",
|
||||
"rank",
|
||||
"wins",
|
||||
"losses",
|
||||
"ties",
|
||||
"winning_percentage",
|
||||
"mutual_riders_count",
|
||||
"comparison_count",
|
||||
"average_rating",
|
||||
"last_calculated",
|
||||
"calculation_version",
|
||||
"total_comparisons",
|
||||
)
|
||||
ordering = ["rank"]
|
||||
|
||||
fieldsets = (
|
||||
(
|
||||
"Ride Information",
|
||||
{"fields": ("ride",)},
|
||||
),
|
||||
(
|
||||
"Ranking Metrics",
|
||||
{
|
||||
"fields": (
|
||||
"rank",
|
||||
"winning_percentage",
|
||||
"wins",
|
||||
"losses",
|
||||
"ties",
|
||||
"total_comparisons",
|
||||
)
|
||||
},
|
||||
),
|
||||
(
|
||||
"Additional Metrics",
|
||||
{
|
||||
"fields": (
|
||||
"average_rating",
|
||||
"mutual_riders_count",
|
||||
"comparison_count",
|
||||
)
|
||||
},
|
||||
),
|
||||
(
|
||||
"Calculation Info",
|
||||
{
|
||||
"fields": (
|
||||
"last_calculated",
|
||||
"calculation_version",
|
||||
),
|
||||
"classes": ("collapse",),
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
@admin.display(description="Ride")
|
||||
def ride_name(self, obj):
|
||||
return obj.ride.name
|
||||
|
||||
@admin.display(description="Park")
|
||||
def park_name(self, obj):
|
||||
return obj.ride.park.name
|
||||
|
||||
@admin.display(description="Win %")
|
||||
def winning_percentage_display(self, obj):
|
||||
return f"{obj.winning_percentage:.1%}"
|
||||
|
||||
def has_add_permission(self, request):
|
||||
# Rankings are calculated automatically
|
||||
return False
|
||||
|
||||
def has_change_permission(self, request, obj=None):
|
||||
# Rankings are read-only
|
||||
return False
|
||||
|
||||
|
||||
@admin.register(RidePairComparison)
|
||||
class RidePairComparisonAdmin(admin.ModelAdmin):
|
||||
"""Admin interface for ride pair comparisons"""
|
||||
|
||||
list_display = (
|
||||
"comparison_summary",
|
||||
"ride_a_name",
|
||||
"ride_b_name",
|
||||
"winner_display",
|
||||
"ride_a_wins",
|
||||
"ride_b_wins",
|
||||
"ties",
|
||||
"mutual_riders_count",
|
||||
"last_calculated",
|
||||
)
|
||||
list_filter = ("last_calculated",)
|
||||
search_fields = (
|
||||
"ride_a__name",
|
||||
"ride_b__name",
|
||||
"ride_a__park__name",
|
||||
"ride_b__park__name",
|
||||
)
|
||||
readonly_fields = (
|
||||
"ride_a",
|
||||
"ride_b",
|
||||
"ride_a_wins",
|
||||
"ride_b_wins",
|
||||
"ties",
|
||||
"mutual_riders_count",
|
||||
"ride_a_avg_rating",
|
||||
"ride_b_avg_rating",
|
||||
"last_calculated",
|
||||
"winner",
|
||||
"is_tie",
|
||||
)
|
||||
ordering = ["-mutual_riders_count"]
|
||||
|
||||
@admin.display(description="Comparison")
|
||||
def comparison_summary(self, obj):
|
||||
return f"{obj.ride_a.name} vs {obj.ride_b.name}"
|
||||
|
||||
@admin.display(description="Ride A")
|
||||
def ride_a_name(self, obj):
|
||||
return obj.ride_a.name
|
||||
|
||||
@admin.display(description="Ride B")
|
||||
def ride_b_name(self, obj):
|
||||
return obj.ride_b.name
|
||||
|
||||
@admin.display(description="Winner")
|
||||
def winner_display(self, obj):
|
||||
if obj.is_tie:
|
||||
return "TIE"
|
||||
winner = obj.winner
|
||||
if winner:
|
||||
return winner.name
|
||||
return "N/A"
|
||||
|
||||
def has_add_permission(self, request):
|
||||
# Comparisons are calculated automatically
|
||||
return False
|
||||
|
||||
def has_change_permission(self, request, obj=None):
|
||||
# Comparisons are read-only
|
||||
return False
|
||||
|
||||
|
||||
@admin.register(RankingSnapshot)
|
||||
class RankingSnapshotAdmin(admin.ModelAdmin):
|
||||
"""Admin interface for ranking history snapshots"""
|
||||
|
||||
list_display = (
|
||||
"ride_name",
|
||||
"park_name",
|
||||
"rank",
|
||||
"winning_percentage_display",
|
||||
"snapshot_date",
|
||||
)
|
||||
list_filter = (
|
||||
"snapshot_date",
|
||||
"ride__category",
|
||||
)
|
||||
search_fields = (
|
||||
"ride__name",
|
||||
"ride__park__name",
|
||||
)
|
||||
readonly_fields = (
|
||||
"ride",
|
||||
"rank",
|
||||
"winning_percentage",
|
||||
"snapshot_date",
|
||||
)
|
||||
date_hierarchy = "snapshot_date"
|
||||
ordering = ["-snapshot_date", "rank"]
|
||||
|
||||
@admin.display(description="Ride")
|
||||
def ride_name(self, obj):
|
||||
return obj.ride.name
|
||||
|
||||
@admin.display(description="Park")
|
||||
def park_name(self, obj):
|
||||
return obj.ride.park.name
|
||||
|
||||
@admin.display(description="Win %")
|
||||
def winning_percentage_display(self, obj):
|
||||
return f"{obj.winning_percentage:.1%}"
|
||||
|
||||
def has_add_permission(self, request):
|
||||
# Snapshots are created automatically
|
||||
return False
|
||||
|
||||
def has_change_permission(self, request, obj=None):
|
||||
# Snapshots are read-only
|
||||
return False
|
||||
|
||||
|
||||
admin.site.register(RideLocation, RideLocationAdmin)
|
||||
|
||||
0
backend/apps/rides/management/__init__.py
Normal file
0
backend/apps/rides/management/__init__.py
Normal file
0
backend/apps/rides/management/commands/__init__.py
Normal file
0
backend/apps/rides/management/commands/__init__.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.rides.services import RideRankingService
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "Calculates and updates ride rankings using the Internet Roller Coaster Poll algorithm"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"--category",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Optional ride category to filter (e.g., RC for roller coasters)",
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
category = options.get("category")
|
||||
|
||||
service = RideRankingService()
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(
|
||||
f"Starting ride ranking calculation at {timezone.now().isoformat()}"
|
||||
)
|
||||
)
|
||||
|
||||
result = service.update_all_rankings(category=category)
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(
|
||||
f"Completed ranking calculation: {result.get('rides_ranked', 0)} rides ranked, "
|
||||
f"{result.get('comparisons_made', 0)} comparisons, "
|
||||
f"duration={result.get('duration', 0):.2f}s"
|
||||
)
|
||||
)
|
||||
603
backend/apps/rides/migrations/0006_add_ride_rankings.py
Normal file
603
backend/apps/rides/migrations/0006_add_ride_rankings.py
Normal file
@@ -0,0 +1,603 @@
|
||||
# Generated by Django 5.2.5 on 2025-08-25 00:50
|
||||
|
||||
import django.core.validators
|
||||
import django.db.models.deletion
|
||||
import django.utils.timezone
|
||||
import pgtrigger.compiler
|
||||
import pgtrigger.migrations
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
("pghistory", "0007_auto_20250421_0444"),
|
||||
("rides", "0005_ridelocationevent_ridelocation_insert_insert_and_more"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name="RidePairComparison",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.BigAutoField(
|
||||
auto_created=True,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
verbose_name="ID",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride_a_wins",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of mutual riders who rated ride_a higher",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride_b_wins",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of mutual riders who rated ride_b higher",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ties",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of mutual riders who rated both rides equally",
|
||||
),
|
||||
),
|
||||
(
|
||||
"mutual_riders_count",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Total number of users who have rated both rides",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride_a_avg_rating",
|
||||
models.DecimalField(
|
||||
blank=True,
|
||||
decimal_places=2,
|
||||
help_text="Average rating of ride_a from mutual riders",
|
||||
max_digits=3,
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride_b_avg_rating",
|
||||
models.DecimalField(
|
||||
blank=True,
|
||||
decimal_places=2,
|
||||
help_text="Average rating of ride_b from mutual riders",
|
||||
max_digits=3,
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
"last_calculated",
|
||||
models.DateTimeField(
|
||||
auto_now=True,
|
||||
help_text="When this comparison was last calculated",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride_a",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="comparisons_as_a",
|
||||
to="rides.ride",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride_b",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="comparisons_as_b",
|
||||
to="rides.ride",
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name="RidePairComparisonEvent",
|
||||
fields=[
|
||||
("pgh_id", models.AutoField(primary_key=True, serialize=False)),
|
||||
("pgh_created_at", models.DateTimeField(auto_now_add=True)),
|
||||
("pgh_label", models.TextField(help_text="The event label.")),
|
||||
("id", models.BigIntegerField()),
|
||||
(
|
||||
"ride_a_wins",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of mutual riders who rated ride_a higher",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride_b_wins",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of mutual riders who rated ride_b higher",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ties",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of mutual riders who rated both rides equally",
|
||||
),
|
||||
),
|
||||
(
|
||||
"mutual_riders_count",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Total number of users who have rated both rides",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride_a_avg_rating",
|
||||
models.DecimalField(
|
||||
blank=True,
|
||||
decimal_places=2,
|
||||
help_text="Average rating of ride_a from mutual riders",
|
||||
max_digits=3,
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride_b_avg_rating",
|
||||
models.DecimalField(
|
||||
blank=True,
|
||||
decimal_places=2,
|
||||
help_text="Average rating of ride_b from mutual riders",
|
||||
max_digits=3,
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
"last_calculated",
|
||||
models.DateTimeField(
|
||||
auto_now=True,
|
||||
help_text="When this comparison was last calculated",
|
||||
),
|
||||
),
|
||||
(
|
||||
"pgh_context",
|
||||
models.ForeignKey(
|
||||
db_constraint=False,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.DO_NOTHING,
|
||||
related_name="+",
|
||||
to="pghistory.context",
|
||||
),
|
||||
),
|
||||
(
|
||||
"pgh_obj",
|
||||
models.ForeignKey(
|
||||
db_constraint=False,
|
||||
on_delete=django.db.models.deletion.DO_NOTHING,
|
||||
related_name="events",
|
||||
to="rides.ridepaircomparison",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride_a",
|
||||
models.ForeignKey(
|
||||
db_constraint=False,
|
||||
on_delete=django.db.models.deletion.DO_NOTHING,
|
||||
related_name="+",
|
||||
related_query_name="+",
|
||||
to="rides.ride",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride_b",
|
||||
models.ForeignKey(
|
||||
db_constraint=False,
|
||||
on_delete=django.db.models.deletion.DO_NOTHING,
|
||||
related_name="+",
|
||||
related_query_name="+",
|
||||
to="rides.ride",
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"abstract": False,
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name="RideRanking",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.BigAutoField(
|
||||
auto_created=True,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
verbose_name="ID",
|
||||
),
|
||||
),
|
||||
(
|
||||
"rank",
|
||||
models.PositiveIntegerField(
|
||||
db_index=True, help_text="Overall rank position (1 = best)"
|
||||
),
|
||||
),
|
||||
(
|
||||
"wins",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of rides this ride beats in pairwise comparisons",
|
||||
),
|
||||
),
|
||||
(
|
||||
"losses",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of rides that beat this ride in pairwise comparisons",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ties",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of rides with equal preference in pairwise comparisons",
|
||||
),
|
||||
),
|
||||
(
|
||||
"winning_percentage",
|
||||
models.DecimalField(
|
||||
db_index=True,
|
||||
decimal_places=4,
|
||||
help_text="Win percentage where ties count as 0.5",
|
||||
max_digits=5,
|
||||
validators=[
|
||||
django.core.validators.MinValueValidator(0),
|
||||
django.core.validators.MaxValueValidator(1),
|
||||
],
|
||||
),
|
||||
),
|
||||
(
|
||||
"mutual_riders_count",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Total number of users who have rated this ride",
|
||||
),
|
||||
),
|
||||
(
|
||||
"comparison_count",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of other rides this was compared against",
|
||||
),
|
||||
),
|
||||
(
|
||||
"average_rating",
|
||||
models.DecimalField(
|
||||
blank=True,
|
||||
decimal_places=2,
|
||||
help_text="Average rating from all users who have rated this ride",
|
||||
max_digits=3,
|
||||
null=True,
|
||||
validators=[
|
||||
django.core.validators.MinValueValidator(1),
|
||||
django.core.validators.MaxValueValidator(10),
|
||||
],
|
||||
),
|
||||
),
|
||||
(
|
||||
"last_calculated",
|
||||
models.DateTimeField(
|
||||
default=django.utils.timezone.now,
|
||||
help_text="When this ranking was last calculated",
|
||||
),
|
||||
),
|
||||
(
|
||||
"calculation_version",
|
||||
models.CharField(
|
||||
default="1.0",
|
||||
help_text="Algorithm version used for calculation",
|
||||
max_length=10,
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride",
|
||||
models.OneToOneField(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="ranking",
|
||||
to="rides.ride",
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"ordering": ["rank"],
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name="RideRankingEvent",
|
||||
fields=[
|
||||
("pgh_id", models.AutoField(primary_key=True, serialize=False)),
|
||||
("pgh_created_at", models.DateTimeField(auto_now_add=True)),
|
||||
("pgh_label", models.TextField(help_text="The event label.")),
|
||||
("id", models.BigIntegerField()),
|
||||
(
|
||||
"rank",
|
||||
models.PositiveIntegerField(
|
||||
help_text="Overall rank position (1 = best)"
|
||||
),
|
||||
),
|
||||
(
|
||||
"wins",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of rides this ride beats in pairwise comparisons",
|
||||
),
|
||||
),
|
||||
(
|
||||
"losses",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of rides that beat this ride in pairwise comparisons",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ties",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of rides with equal preference in pairwise comparisons",
|
||||
),
|
||||
),
|
||||
(
|
||||
"winning_percentage",
|
||||
models.DecimalField(
|
||||
decimal_places=4,
|
||||
help_text="Win percentage where ties count as 0.5",
|
||||
max_digits=5,
|
||||
validators=[
|
||||
django.core.validators.MinValueValidator(0),
|
||||
django.core.validators.MaxValueValidator(1),
|
||||
],
|
||||
),
|
||||
),
|
||||
(
|
||||
"mutual_riders_count",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Total number of users who have rated this ride",
|
||||
),
|
||||
),
|
||||
(
|
||||
"comparison_count",
|
||||
models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of other rides this was compared against",
|
||||
),
|
||||
),
|
||||
(
|
||||
"average_rating",
|
||||
models.DecimalField(
|
||||
blank=True,
|
||||
decimal_places=2,
|
||||
help_text="Average rating from all users who have rated this ride",
|
||||
max_digits=3,
|
||||
null=True,
|
||||
validators=[
|
||||
django.core.validators.MinValueValidator(1),
|
||||
django.core.validators.MaxValueValidator(10),
|
||||
],
|
||||
),
|
||||
),
|
||||
(
|
||||
"last_calculated",
|
||||
models.DateTimeField(
|
||||
default=django.utils.timezone.now,
|
||||
help_text="When this ranking was last calculated",
|
||||
),
|
||||
),
|
||||
(
|
||||
"calculation_version",
|
||||
models.CharField(
|
||||
default="1.0",
|
||||
help_text="Algorithm version used for calculation",
|
||||
max_length=10,
|
||||
),
|
||||
),
|
||||
(
|
||||
"pgh_context",
|
||||
models.ForeignKey(
|
||||
db_constraint=False,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.DO_NOTHING,
|
||||
related_name="+",
|
||||
to="pghistory.context",
|
||||
),
|
||||
),
|
||||
(
|
||||
"pgh_obj",
|
||||
models.ForeignKey(
|
||||
db_constraint=False,
|
||||
on_delete=django.db.models.deletion.DO_NOTHING,
|
||||
related_name="events",
|
||||
to="rides.rideranking",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride",
|
||||
models.ForeignKey(
|
||||
db_constraint=False,
|
||||
on_delete=django.db.models.deletion.DO_NOTHING,
|
||||
related_name="+",
|
||||
related_query_name="+",
|
||||
to="rides.ride",
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"abstract": False,
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name="RankingSnapshot",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.BigAutoField(
|
||||
auto_created=True,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
verbose_name="ID",
|
||||
),
|
||||
),
|
||||
("rank", models.PositiveIntegerField()),
|
||||
(
|
||||
"winning_percentage",
|
||||
models.DecimalField(decimal_places=4, max_digits=5),
|
||||
),
|
||||
(
|
||||
"snapshot_date",
|
||||
models.DateField(
|
||||
db_index=True,
|
||||
help_text="Date when this ranking snapshot was taken",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ride",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="ranking_history",
|
||||
to="rides.ride",
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"ordering": ["-snapshot_date", "rank"],
|
||||
"indexes": [
|
||||
models.Index(
|
||||
fields=["snapshot_date", "rank"],
|
||||
name="rides_ranki_snapsho_8e2657_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["ride", "-snapshot_date"],
|
||||
name="rides_ranki_ride_id_827bb9_idx",
|
||||
),
|
||||
],
|
||||
"unique_together": {("ride", "snapshot_date")},
|
||||
},
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="ridepaircomparison",
|
||||
index=models.Index(
|
||||
fields=["ride_a", "ride_b"], name="rides_ridep_ride_a__eb0674_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="ridepaircomparison",
|
||||
index=models.Index(
|
||||
fields=["last_calculated"], name="rides_ridep_last_ca_bd9f6c_idx"
|
||||
),
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name="ridepaircomparison",
|
||||
unique_together={("ride_a", "ride_b")},
|
||||
),
|
||||
pgtrigger.migrations.AddTrigger(
|
||||
model_name="ridepaircomparison",
|
||||
trigger=pgtrigger.compiler.Trigger(
|
||||
name="insert_insert",
|
||||
sql=pgtrigger.compiler.UpsertTriggerSql(
|
||||
func='INSERT INTO "rides_ridepaircomparisonevent" ("id", "last_calculated", "mutual_riders_count", "pgh_context_id", "pgh_created_at", "pgh_label", "pgh_obj_id", "ride_a_avg_rating", "ride_a_id", "ride_a_wins", "ride_b_avg_rating", "ride_b_id", "ride_b_wins", "ties") VALUES (NEW."id", NEW."last_calculated", NEW."mutual_riders_count", _pgh_attach_context(), NOW(), \'insert\', NEW."id", NEW."ride_a_avg_rating", NEW."ride_a_id", NEW."ride_a_wins", NEW."ride_b_avg_rating", NEW."ride_b_id", NEW."ride_b_wins", NEW."ties"); RETURN NULL;',
|
||||
hash="6a640e10fcfd58c48029ee5b84ea7f0826f50022",
|
||||
operation="INSERT",
|
||||
pgid="pgtrigger_insert_insert_9ad59",
|
||||
table="rides_ridepaircomparison",
|
||||
when="AFTER",
|
||||
),
|
||||
),
|
||||
),
|
||||
pgtrigger.migrations.AddTrigger(
|
||||
model_name="ridepaircomparison",
|
||||
trigger=pgtrigger.compiler.Trigger(
|
||||
name="update_update",
|
||||
sql=pgtrigger.compiler.UpsertTriggerSql(
|
||||
condition="WHEN (OLD.* IS DISTINCT FROM NEW.*)",
|
||||
func='INSERT INTO "rides_ridepaircomparisonevent" ("id", "last_calculated", "mutual_riders_count", "pgh_context_id", "pgh_created_at", "pgh_label", "pgh_obj_id", "ride_a_avg_rating", "ride_a_id", "ride_a_wins", "ride_b_avg_rating", "ride_b_id", "ride_b_wins", "ties") VALUES (NEW."id", NEW."last_calculated", NEW."mutual_riders_count", _pgh_attach_context(), NOW(), \'update\', NEW."id", NEW."ride_a_avg_rating", NEW."ride_a_id", NEW."ride_a_wins", NEW."ride_b_avg_rating", NEW."ride_b_id", NEW."ride_b_wins", NEW."ties"); RETURN NULL;',
|
||||
hash="a77eee0b791bada3f84f008dabd7486c66b03fa6",
|
||||
operation="UPDATE",
|
||||
pgid="pgtrigger_update_update_73b31",
|
||||
table="rides_ridepaircomparison",
|
||||
when="AFTER",
|
||||
),
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="rideranking",
|
||||
index=models.Index(fields=["rank"], name="rides_rider_rank_ea4706_idx"),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="rideranking",
|
||||
index=models.Index(
|
||||
fields=["winning_percentage", "-mutual_riders_count"],
|
||||
name="rides_rider_winning_d9b3e8_idx",
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="rideranking",
|
||||
index=models.Index(
|
||||
fields=["ride", "last_calculated"],
|
||||
name="rides_rider_ride_id_ece73d_idx",
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="rideranking",
|
||||
constraint=models.CheckConstraint(
|
||||
condition=models.Q(
|
||||
("winning_percentage__gte", 0), ("winning_percentage__lte", 1)
|
||||
),
|
||||
name="rideranking_winning_percentage_range",
|
||||
violation_error_message="Winning percentage must be between 0 and 1",
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="rideranking",
|
||||
constraint=models.CheckConstraint(
|
||||
condition=models.Q(
|
||||
("average_rating__isnull", True),
|
||||
models.Q(("average_rating__gte", 1), ("average_rating__lte", 10)),
|
||||
_connector="OR",
|
||||
),
|
||||
name="rideranking_average_rating_range",
|
||||
violation_error_message="Average rating must be between 1 and 10",
|
||||
),
|
||||
),
|
||||
pgtrigger.migrations.AddTrigger(
|
||||
model_name="rideranking",
|
||||
trigger=pgtrigger.compiler.Trigger(
|
||||
name="insert_insert",
|
||||
sql=pgtrigger.compiler.UpsertTriggerSql(
|
||||
func='INSERT INTO "rides_riderankingevent" ("average_rating", "calculation_version", "comparison_count", "id", "last_calculated", "losses", "mutual_riders_count", "pgh_context_id", "pgh_created_at", "pgh_label", "pgh_obj_id", "rank", "ride_id", "ties", "winning_percentage", "wins") VALUES (NEW."average_rating", NEW."calculation_version", NEW."comparison_count", NEW."id", NEW."last_calculated", NEW."losses", NEW."mutual_riders_count", _pgh_attach_context(), NOW(), \'insert\', NEW."id", NEW."rank", NEW."ride_id", NEW."ties", NEW."winning_percentage", NEW."wins"); RETURN NULL;',
|
||||
hash="c5f9dced5824a55e6f36e476eb382ed770aa5716",
|
||||
operation="INSERT",
|
||||
pgid="pgtrigger_insert_insert_01af3",
|
||||
table="rides_rideranking",
|
||||
when="AFTER",
|
||||
),
|
||||
),
|
||||
),
|
||||
pgtrigger.migrations.AddTrigger(
|
||||
model_name="rideranking",
|
||||
trigger=pgtrigger.compiler.Trigger(
|
||||
name="update_update",
|
||||
sql=pgtrigger.compiler.UpsertTriggerSql(
|
||||
condition="WHEN (OLD.* IS DISTINCT FROM NEW.*)",
|
||||
func='INSERT INTO "rides_riderankingevent" ("average_rating", "calculation_version", "comparison_count", "id", "last_calculated", "losses", "mutual_riders_count", "pgh_context_id", "pgh_created_at", "pgh_label", "pgh_obj_id", "rank", "ride_id", "ties", "winning_percentage", "wins") VALUES (NEW."average_rating", NEW."calculation_version", NEW."comparison_count", NEW."id", NEW."last_calculated", NEW."losses", NEW."mutual_riders_count", _pgh_attach_context(), NOW(), \'update\', NEW."id", NEW."rank", NEW."ride_id", NEW."ties", NEW."winning_percentage", NEW."wins"); RETURN NULL;',
|
||||
hash="363e44ce3c87e8b66406d63d6f1b26ad604c79d2",
|
||||
operation="UPDATE",
|
||||
pgid="pgtrigger_update_update_c3f27",
|
||||
table="rides_rideranking",
|
||||
when="AFTER",
|
||||
),
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -8,9 +8,10 @@ The Company model is aliased as Manufacturer to clarify its role as ride manufac
|
||||
while maintaining backward compatibility through the Company alias.
|
||||
"""
|
||||
|
||||
from .rides import Ride, RideModel, RollerCoasterStats, Categories
|
||||
from .rides import Ride, RideModel, RollerCoasterStats, Categories, CATEGORY_CHOICES
|
||||
from .location import RideLocation
|
||||
from .reviews import RideReview
|
||||
from .rankings import RideRanking, RidePairComparison, RankingSnapshot
|
||||
|
||||
__all__ = [
|
||||
# Primary models
|
||||
@@ -19,6 +20,10 @@ __all__ = [
|
||||
"RollerCoasterStats",
|
||||
"RideLocation",
|
||||
"RideReview",
|
||||
# Rankings
|
||||
"RideRanking",
|
||||
"RidePairComparison",
|
||||
"RankingSnapshot",
|
||||
# Shared constants
|
||||
"Categories",
|
||||
]
|
||||
|
||||
212
backend/apps/rides/models/rankings.py
Normal file
212
backend/apps/rides/models/rankings.py
Normal file
@@ -0,0 +1,212 @@
|
||||
"""
|
||||
Models for ride ranking system using Internet Roller Coaster Poll algorithm.
|
||||
|
||||
This system calculates rankings based on pairwise comparisons between rides,
|
||||
where each ride is compared to every other ride to determine which one
|
||||
more riders preferred.
|
||||
"""
|
||||
|
||||
from django.db import models
|
||||
from django.utils import timezone
|
||||
from django.core.validators import MinValueValidator, MaxValueValidator
|
||||
import pghistory
|
||||
|
||||
|
||||
@pghistory.track()
|
||||
class RideRanking(models.Model):
|
||||
"""
|
||||
Stores calculated rankings for rides using the Internet Roller Coaster Poll algorithm.
|
||||
|
||||
Rankings are recalculated daily based on user reviews/ratings.
|
||||
Each ride's rank is determined by its winning percentage in pairwise comparisons.
|
||||
"""
|
||||
|
||||
ride = models.OneToOneField(
|
||||
"rides.Ride", on_delete=models.CASCADE, related_name="ranking"
|
||||
)
|
||||
|
||||
# Core ranking metrics
|
||||
rank = models.PositiveIntegerField(
|
||||
db_index=True, help_text="Overall rank position (1 = best)"
|
||||
)
|
||||
wins = models.PositiveIntegerField(
|
||||
default=0, help_text="Number of rides this ride beats in pairwise comparisons"
|
||||
)
|
||||
losses = models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of rides that beat this ride in pairwise comparisons",
|
||||
)
|
||||
ties = models.PositiveIntegerField(
|
||||
default=0,
|
||||
help_text="Number of rides with equal preference in pairwise comparisons",
|
||||
)
|
||||
winning_percentage = models.DecimalField(
|
||||
max_digits=5,
|
||||
decimal_places=4,
|
||||
validators=[MinValueValidator(0), MaxValueValidator(1)],
|
||||
db_index=True,
|
||||
help_text="Win percentage where ties count as 0.5",
|
||||
)
|
||||
|
||||
# Additional metrics
|
||||
mutual_riders_count = models.PositiveIntegerField(
|
||||
default=0, help_text="Total number of users who have rated this ride"
|
||||
)
|
||||
comparison_count = models.PositiveIntegerField(
|
||||
default=0, help_text="Number of other rides this was compared against"
|
||||
)
|
||||
average_rating = models.DecimalField(
|
||||
max_digits=3,
|
||||
decimal_places=2,
|
||||
null=True,
|
||||
blank=True,
|
||||
validators=[MinValueValidator(1), MaxValueValidator(10)],
|
||||
help_text="Average rating from all users who have rated this ride",
|
||||
)
|
||||
|
||||
# Metadata
|
||||
last_calculated = models.DateTimeField(
|
||||
default=timezone.now, help_text="When this ranking was last calculated"
|
||||
)
|
||||
calculation_version = models.CharField(
|
||||
max_length=10, default="1.0", help_text="Algorithm version used for calculation"
|
||||
)
|
||||
|
||||
class Meta:
|
||||
ordering = ["rank"]
|
||||
indexes = [
|
||||
models.Index(fields=["rank"]),
|
||||
models.Index(fields=["winning_percentage", "-mutual_riders_count"]),
|
||||
models.Index(fields=["ride", "last_calculated"]),
|
||||
]
|
||||
constraints = [
|
||||
models.CheckConstraint(
|
||||
name="rideranking_winning_percentage_range",
|
||||
check=models.Q(winning_percentage__gte=0)
|
||||
& models.Q(winning_percentage__lte=1),
|
||||
violation_error_message="Winning percentage must be between 0 and 1",
|
||||
),
|
||||
models.CheckConstraint(
|
||||
name="rideranking_average_rating_range",
|
||||
check=models.Q(average_rating__isnull=True)
|
||||
| (models.Q(average_rating__gte=1) & models.Q(average_rating__lte=10)),
|
||||
violation_error_message="Average rating must be between 1 and 10",
|
||||
),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"#{self.rank} - {self.ride.name} ({self.winning_percentage:.1%})"
|
||||
|
||||
@property
|
||||
def total_comparisons(self):
|
||||
"""Total number of pairwise comparisons (wins + losses + ties)."""
|
||||
return self.wins + self.losses + self.ties
|
||||
|
||||
|
||||
@pghistory.track()
|
||||
class RidePairComparison(models.Model):
|
||||
"""
|
||||
Caches pairwise comparison results between two rides.
|
||||
|
||||
This model stores the results of comparing two rides based on mutual riders
|
||||
(users who have rated both rides). It's used to speed up ranking calculations.
|
||||
"""
|
||||
|
||||
ride_a = models.ForeignKey(
|
||||
"rides.Ride", on_delete=models.CASCADE, related_name="comparisons_as_a"
|
||||
)
|
||||
ride_b = models.ForeignKey(
|
||||
"rides.Ride", on_delete=models.CASCADE, related_name="comparisons_as_b"
|
||||
)
|
||||
|
||||
# Comparison results
|
||||
ride_a_wins = models.PositiveIntegerField(
|
||||
default=0, help_text="Number of mutual riders who rated ride_a higher"
|
||||
)
|
||||
ride_b_wins = models.PositiveIntegerField(
|
||||
default=0, help_text="Number of mutual riders who rated ride_b higher"
|
||||
)
|
||||
ties = models.PositiveIntegerField(
|
||||
default=0, help_text="Number of mutual riders who rated both rides equally"
|
||||
)
|
||||
|
||||
# Metrics
|
||||
mutual_riders_count = models.PositiveIntegerField(
|
||||
default=0, help_text="Total number of users who have rated both rides"
|
||||
)
|
||||
ride_a_avg_rating = models.DecimalField(
|
||||
max_digits=3,
|
||||
decimal_places=2,
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Average rating of ride_a from mutual riders",
|
||||
)
|
||||
ride_b_avg_rating = models.DecimalField(
|
||||
max_digits=3,
|
||||
decimal_places=2,
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Average rating of ride_b from mutual riders",
|
||||
)
|
||||
|
||||
# Metadata
|
||||
last_calculated = models.DateTimeField(
|
||||
auto_now=True, help_text="When this comparison was last calculated"
|
||||
)
|
||||
|
||||
class Meta:
|
||||
unique_together = [["ride_a", "ride_b"]]
|
||||
indexes = [
|
||||
models.Index(fields=["ride_a", "ride_b"]),
|
||||
models.Index(fields=["last_calculated"]),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
winner = "TIE"
|
||||
if self.ride_a_wins > self.ride_b_wins:
|
||||
winner = self.ride_a.name
|
||||
elif self.ride_b_wins > self.ride_a_wins:
|
||||
winner = self.ride_b.name
|
||||
return f"{self.ride_a.name} vs {self.ride_b.name} - Winner: {winner}"
|
||||
|
||||
@property
|
||||
def winner(self):
|
||||
"""Returns the winning ride or None for a tie."""
|
||||
if self.ride_a_wins > self.ride_b_wins:
|
||||
return self.ride_a
|
||||
elif self.ride_b_wins > self.ride_a_wins:
|
||||
return self.ride_b
|
||||
return None
|
||||
|
||||
@property
|
||||
def is_tie(self):
|
||||
"""Returns True if the comparison resulted in a tie."""
|
||||
return self.ride_a_wins == self.ride_b_wins
|
||||
|
||||
|
||||
class RankingSnapshot(models.Model):
|
||||
"""
|
||||
Stores historical snapshots of rankings for tracking changes over time.
|
||||
|
||||
This allows us to show ranking trends and movements.
|
||||
"""
|
||||
|
||||
ride = models.ForeignKey(
|
||||
"rides.Ride", on_delete=models.CASCADE, related_name="ranking_history"
|
||||
)
|
||||
rank = models.PositiveIntegerField()
|
||||
winning_percentage = models.DecimalField(max_digits=5, decimal_places=4)
|
||||
snapshot_date = models.DateField(
|
||||
db_index=True, help_text="Date when this ranking snapshot was taken"
|
||||
)
|
||||
|
||||
class Meta:
|
||||
unique_together = [["ride", "snapshot_date"]]
|
||||
ordering = ["-snapshot_date", "rank"]
|
||||
indexes = [
|
||||
models.Index(fields=["snapshot_date", "rank"]),
|
||||
models.Index(fields=["ride", "-snapshot_date"]),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.ride.name} - Rank #{self.rank} on {self.snapshot_date}"
|
||||
@@ -1,6 +1,7 @@
|
||||
from django.db import models
|
||||
from django.utils.text import slugify
|
||||
from django.contrib.contenttypes.fields import GenericRelation
|
||||
from django.db.models import Avg
|
||||
from apps.core.models import TrackedModel
|
||||
from .company import Company
|
||||
import pghistory
|
||||
@@ -56,7 +57,11 @@ class RideModel(TrackedModel):
|
||||
|
||||
@pghistory.track()
|
||||
class Ride(TrackedModel):
|
||||
"""Model for individual ride installations at parks"""
|
||||
"""Model for individual ride installations at parks
|
||||
|
||||
Note: The average_rating field is denormalized and refreshed by background
|
||||
jobs. Use selectors or annotations for real-time calculations if needed.
|
||||
"""
|
||||
|
||||
STATUS_CHOICES = [
|
||||
("", "Select status"),
|
||||
|
||||
@@ -8,7 +8,7 @@ from django.db.models import QuerySet, Q, Count, Avg, Prefetch
|
||||
from django.contrib.gis.geos import Point
|
||||
from django.contrib.gis.measure import Distance
|
||||
|
||||
from .models import Ride, RideModel, RideReview
|
||||
from .models import Ride, RideModel, RideReview, CATEGORY_CHOICES
|
||||
|
||||
|
||||
def ride_list_for_display(
|
||||
@@ -32,15 +32,15 @@ def ride_list_for_display(
|
||||
"ride_model",
|
||||
"park_area",
|
||||
)
|
||||
.prefetch_related("park__location", "location")
|
||||
.prefetch_related("park__location")
|
||||
.annotate(average_rating_calculated=Avg("reviews__rating"))
|
||||
)
|
||||
|
||||
if filters:
|
||||
if "status" in filters:
|
||||
queryset = queryset.filter(status=filters["status"])
|
||||
if "category" in filters:
|
||||
queryset = queryset.filter(category=filters["category"])
|
||||
if "status" in filters and filters["status"]:
|
||||
queryset = queryset.filter(status__in=filters["status"])
|
||||
if "category" in filters and filters["category"]:
|
||||
queryset = queryset.filter(category__in=filters["category"])
|
||||
if "manufacturer" in filters:
|
||||
queryset = queryset.filter(manufacturer=filters["manufacturer"])
|
||||
if "park" in filters:
|
||||
@@ -81,7 +81,6 @@ def ride_detail_optimized(*, slug: str, park_slug: str) -> Ride:
|
||||
)
|
||||
.prefetch_related(
|
||||
"park__location",
|
||||
"location",
|
||||
Prefetch(
|
||||
"reviews",
|
||||
queryset=RideReview.objects.select_related("user").filter(
|
||||
@@ -164,7 +163,7 @@ def rides_in_park(*, park_slug: str) -> QuerySet[Ride]:
|
||||
return (
|
||||
Ride.objects.filter(park__slug=park_slug)
|
||||
.select_related("manufacturer", "designer", "ride_model", "park_area")
|
||||
.prefetch_related("location")
|
||||
.prefetch_related()
|
||||
.annotate(average_rating_calculated=Avg("reviews__rating"))
|
||||
.order_by("park_area__name", "name")
|
||||
)
|
||||
|
||||
7
backend/apps/rides/services/__init__.py
Normal file
7
backend/apps/rides/services/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""
|
||||
Services for the rides app.
|
||||
"""
|
||||
|
||||
from .ranking_service import RideRankingService
|
||||
|
||||
__all__ = ["RideRankingService"]
|
||||
550
backend/apps/rides/services/ranking_service.py
Normal file
550
backend/apps/rides/services/ranking_service.py
Normal file
@@ -0,0 +1,550 @@
|
||||
"""
|
||||
Service for calculating ride rankings using the Internet Roller Coaster Poll algorithm.
|
||||
|
||||
This service implements a pairwise comparison system where each ride is compared
|
||||
to every other ride based on mutual riders (users who have rated both rides).
|
||||
Rankings are determined by winning percentage in these comparisons.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
from decimal import Decimal
|
||||
from datetime import date
|
||||
|
||||
from django.db import transaction
|
||||
from django.db.models import Avg, Count, Q, F
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.rides.models import (
|
||||
Ride,
|
||||
RideReview,
|
||||
RideRanking,
|
||||
RidePairComparison,
|
||||
RankingSnapshot,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RideRankingService:
|
||||
"""
|
||||
Calculates ride rankings using the Internet Roller Coaster Poll algorithm.
|
||||
|
||||
Algorithm Overview:
|
||||
1. For each pair of rides, find users who have rated both
|
||||
2. Count how many users preferred each ride (higher rating)
|
||||
3. Calculate wins, losses, and ties for each ride
|
||||
4. Rank rides by winning percentage (ties count as 0.5 wins)
|
||||
5. Break ties by head-to-head comparison
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
|
||||
self.calculation_version = "1.0"
|
||||
|
||||
def update_all_rankings(self, category: Optional[str] = None) -> Dict[str, any]:
|
||||
"""
|
||||
Main entry point to update all ride rankings.
|
||||
|
||||
Args:
|
||||
category: Optional ride category to filter ('RC' for roller coasters, etc.)
|
||||
If None, ranks all rides.
|
||||
|
||||
Returns:
|
||||
Dictionary with statistics about the ranking calculation
|
||||
"""
|
||||
start_time = timezone.now()
|
||||
self.logger.info(
|
||||
f"Starting ranking calculation for category: {category or 'ALL'}"
|
||||
)
|
||||
|
||||
try:
|
||||
with transaction.atomic():
|
||||
# Get rides to rank
|
||||
rides = self._get_eligible_rides(category)
|
||||
if not rides:
|
||||
self.logger.warning("No eligible rides found for ranking")
|
||||
return {
|
||||
"status": "skipped",
|
||||
"message": "No eligible rides found",
|
||||
"duration": (timezone.now() - start_time).total_seconds(),
|
||||
}
|
||||
|
||||
self.logger.info(f"Found {len(rides)} rides to rank")
|
||||
|
||||
# Calculate pairwise comparisons
|
||||
comparisons = self._calculate_all_comparisons(rides)
|
||||
|
||||
# Calculate rankings from comparisons
|
||||
rankings = self._calculate_rankings_from_comparisons(rides, comparisons)
|
||||
|
||||
# Save rankings
|
||||
self._save_rankings(rankings)
|
||||
|
||||
# Save snapshots for historical tracking
|
||||
self._save_ranking_snapshots(rankings)
|
||||
|
||||
# Clean up old data
|
||||
self._cleanup_old_data()
|
||||
|
||||
duration = (timezone.now() - start_time).total_seconds()
|
||||
self.logger.info(
|
||||
f"Ranking calculation completed in {duration:.2f} seconds"
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"rides_ranked": len(rides),
|
||||
"comparisons_made": len(comparisons),
|
||||
"duration": duration,
|
||||
"timestamp": timezone.now(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error updating rankings: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def _get_eligible_rides(self, category: Optional[str] = None) -> List[Ride]:
|
||||
"""
|
||||
Get rides that are eligible for ranking.
|
||||
|
||||
Only includes rides that:
|
||||
- Are currently operating
|
||||
- Have at least one review/rating
|
||||
"""
|
||||
queryset = (
|
||||
Ride.objects.filter(status="OPERATING", reviews__is_published=True)
|
||||
.annotate(
|
||||
review_count=Count("reviews", filter=Q(reviews__is_published=True))
|
||||
)
|
||||
.filter(review_count__gt=0)
|
||||
)
|
||||
|
||||
if category:
|
||||
queryset = queryset.filter(category=category)
|
||||
|
||||
return list(queryset.distinct())
|
||||
|
||||
def _calculate_all_comparisons(
|
||||
self, rides: List[Ride]
|
||||
) -> Dict[Tuple[int, int], RidePairComparison]:
|
||||
"""
|
||||
Calculate pairwise comparisons for all ride pairs.
|
||||
|
||||
Returns a dictionary keyed by (ride_a_id, ride_b_id) tuples.
|
||||
"""
|
||||
comparisons = {}
|
||||
total_pairs = len(rides) * (len(rides) - 1) // 2
|
||||
processed = 0
|
||||
|
||||
for i, ride_a in enumerate(rides):
|
||||
for ride_b in rides[i + 1 :]:
|
||||
comparison = self._calculate_pairwise_comparison(ride_a, ride_b)
|
||||
if comparison:
|
||||
# Store both directions for easy lookup
|
||||
comparisons[(ride_a.id, ride_b.id)] = comparison
|
||||
comparisons[(ride_b.id, ride_a.id)] = comparison
|
||||
|
||||
processed += 1
|
||||
if processed % 100 == 0:
|
||||
self.logger.debug(
|
||||
f"Processed {processed}/{total_pairs} comparisons"
|
||||
)
|
||||
|
||||
return comparisons
|
||||
|
||||
def _calculate_pairwise_comparison(
|
||||
self, ride_a: Ride, ride_b: Ride
|
||||
) -> Optional[RidePairComparison]:
|
||||
"""
|
||||
Calculate the pairwise comparison between two rides.
|
||||
|
||||
Finds users who have rated both rides and determines which ride
|
||||
they preferred based on their ratings.
|
||||
"""
|
||||
# Get mutual riders (users who have rated both rides)
|
||||
ride_a_reviewers = set(
|
||||
RideReview.objects.filter(ride=ride_a, is_published=True).values_list(
|
||||
"user_id", flat=True
|
||||
)
|
||||
)
|
||||
|
||||
ride_b_reviewers = set(
|
||||
RideReview.objects.filter(ride=ride_b, is_published=True).values_list(
|
||||
"user_id", flat=True
|
||||
)
|
||||
)
|
||||
|
||||
mutual_riders = ride_a_reviewers & ride_b_reviewers
|
||||
|
||||
if not mutual_riders:
|
||||
# No mutual riders, no comparison possible
|
||||
return None
|
||||
|
||||
# Get ratings from mutual riders
|
||||
ride_a_ratings = {
|
||||
review.user_id: review.rating
|
||||
for review in RideReview.objects.filter(
|
||||
ride=ride_a, user_id__in=mutual_riders, is_published=True
|
||||
)
|
||||
}
|
||||
|
||||
ride_b_ratings = {
|
||||
review.user_id: review.rating
|
||||
for review in RideReview.objects.filter(
|
||||
ride=ride_b, user_id__in=mutual_riders, is_published=True
|
||||
)
|
||||
}
|
||||
|
||||
# Count wins and ties
|
||||
ride_a_wins = 0
|
||||
ride_b_wins = 0
|
||||
ties = 0
|
||||
|
||||
for user_id in mutual_riders:
|
||||
rating_a = ride_a_ratings.get(user_id, 0)
|
||||
rating_b = ride_b_ratings.get(user_id, 0)
|
||||
|
||||
if rating_a > rating_b:
|
||||
ride_a_wins += 1
|
||||
elif rating_b > rating_a:
|
||||
ride_b_wins += 1
|
||||
else:
|
||||
ties += 1
|
||||
|
||||
# Calculate average ratings from mutual riders
|
||||
ride_a_avg = (
|
||||
sum(ride_a_ratings.values()) / len(ride_a_ratings) if ride_a_ratings else 0
|
||||
)
|
||||
ride_b_avg = (
|
||||
sum(ride_b_ratings.values()) / len(ride_b_ratings) if ride_b_ratings else 0
|
||||
)
|
||||
|
||||
# Create or update comparison record
|
||||
comparison, created = RidePairComparison.objects.update_or_create(
|
||||
ride_a=ride_a if ride_a.id < ride_b.id else ride_b,
|
||||
ride_b=ride_b if ride_a.id < ride_b.id else ride_a,
|
||||
defaults={
|
||||
"ride_a_wins": ride_a_wins if ride_a.id < ride_b.id else ride_b_wins,
|
||||
"ride_b_wins": ride_b_wins if ride_a.id < ride_b.id else ride_a_wins,
|
||||
"ties": ties,
|
||||
"mutual_riders_count": len(mutual_riders),
|
||||
"ride_a_avg_rating": (
|
||||
Decimal(str(ride_a_avg))
|
||||
if ride_a.id < ride_b.id
|
||||
else Decimal(str(ride_b_avg))
|
||||
),
|
||||
"ride_b_avg_rating": (
|
||||
Decimal(str(ride_b_avg))
|
||||
if ride_a.id < ride_b.id
|
||||
else Decimal(str(ride_a_avg))
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
return comparison
|
||||
|
||||
def _calculate_rankings_from_comparisons(
|
||||
self, rides: List[Ride], comparisons: Dict[Tuple[int, int], RidePairComparison]
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Calculate final rankings from pairwise comparisons.
|
||||
|
||||
Returns a list of dictionaries containing ranking data for each ride.
|
||||
"""
|
||||
rankings = []
|
||||
|
||||
for ride in rides:
|
||||
wins = 0
|
||||
losses = 0
|
||||
ties = 0
|
||||
comparison_count = 0
|
||||
|
||||
# Count wins, losses, and ties
|
||||
for other_ride in rides:
|
||||
if ride.id == other_ride.id:
|
||||
continue
|
||||
|
||||
comparison_key = (
|
||||
min(ride.id, other_ride.id),
|
||||
max(ride.id, other_ride.id),
|
||||
)
|
||||
comparison = comparisons.get(comparison_key)
|
||||
|
||||
if not comparison:
|
||||
continue
|
||||
|
||||
comparison_count += 1
|
||||
|
||||
# Determine win/loss/tie for this ride
|
||||
if comparison.ride_a_id == ride.id:
|
||||
if comparison.ride_a_wins > comparison.ride_b_wins:
|
||||
wins += 1
|
||||
elif comparison.ride_a_wins < comparison.ride_b_wins:
|
||||
losses += 1
|
||||
else:
|
||||
ties += 1
|
||||
else: # ride_b_id == ride.id
|
||||
if comparison.ride_b_wins > comparison.ride_a_wins:
|
||||
wins += 1
|
||||
elif comparison.ride_b_wins < comparison.ride_a_wins:
|
||||
losses += 1
|
||||
else:
|
||||
ties += 1
|
||||
|
||||
# Calculate winning percentage (ties count as 0.5)
|
||||
total_comparisons = wins + losses + ties
|
||||
if total_comparisons > 0:
|
||||
winning_percentage = Decimal(
|
||||
str((wins + 0.5 * ties) / total_comparisons)
|
||||
)
|
||||
else:
|
||||
winning_percentage = Decimal("0.5")
|
||||
|
||||
# Get average rating and reviewer count
|
||||
ride_stats = RideReview.objects.filter(
|
||||
ride=ride, is_published=True
|
||||
).aggregate(
|
||||
avg_rating=Avg("rating"), reviewer_count=Count("user", distinct=True)
|
||||
)
|
||||
|
||||
rankings.append(
|
||||
{
|
||||
"ride": ride,
|
||||
"wins": wins,
|
||||
"losses": losses,
|
||||
"ties": ties,
|
||||
"winning_percentage": winning_percentage,
|
||||
"comparison_count": comparison_count,
|
||||
"average_rating": ride_stats["avg_rating"],
|
||||
"mutual_riders_count": ride_stats["reviewer_count"] or 0,
|
||||
}
|
||||
)
|
||||
|
||||
# Sort by winning percentage (descending), then by mutual riders count for ties
|
||||
rankings.sort(
|
||||
key=lambda x: (
|
||||
x["winning_percentage"],
|
||||
x["mutual_riders_count"],
|
||||
x["average_rating"] or 0,
|
||||
),
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
# Handle tie-breaking with head-to-head comparisons
|
||||
rankings = self._apply_tiebreakers(rankings, comparisons)
|
||||
|
||||
# Assign final ranks
|
||||
for i, ranking_data in enumerate(rankings, 1):
|
||||
ranking_data["rank"] = i
|
||||
|
||||
return rankings
|
||||
|
||||
def _apply_tiebreakers(
|
||||
self,
|
||||
rankings: List[Dict],
|
||||
comparisons: Dict[Tuple[int, int], RidePairComparison],
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Apply head-to-head tiebreaker for rides with identical winning percentages.
|
||||
|
||||
If two rides have the same winning percentage, the one that beat the other
|
||||
in their head-to-head comparison gets the higher rank.
|
||||
"""
|
||||
i = 0
|
||||
while i < len(rankings) - 1:
|
||||
# Find rides with same winning percentage
|
||||
tied_group = [rankings[i]]
|
||||
j = i + 1
|
||||
|
||||
while (
|
||||
j < len(rankings)
|
||||
and rankings[j]["winning_percentage"]
|
||||
== rankings[i]["winning_percentage"]
|
||||
):
|
||||
tied_group.append(rankings[j])
|
||||
j += 1
|
||||
|
||||
if len(tied_group) > 1:
|
||||
# Apply head-to-head tiebreaker within the group
|
||||
tied_group = self._sort_tied_group(tied_group, comparisons)
|
||||
|
||||
# Replace the tied section with sorted group
|
||||
rankings[i:j] = tied_group
|
||||
|
||||
i = j
|
||||
|
||||
return rankings
|
||||
|
||||
def _sort_tied_group(
|
||||
self,
|
||||
tied_group: List[Dict],
|
||||
comparisons: Dict[Tuple[int, int], RidePairComparison],
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Sort a group of tied rides using head-to-head comparisons.
|
||||
"""
|
||||
# Create mini-rankings within the tied group
|
||||
for ride_data in tied_group:
|
||||
mini_wins = 0
|
||||
mini_losses = 0
|
||||
|
||||
for other_data in tied_group:
|
||||
if ride_data["ride"].id == other_data["ride"].id:
|
||||
continue
|
||||
|
||||
comparison_key = (
|
||||
min(ride_data["ride"].id, other_data["ride"].id),
|
||||
max(ride_data["ride"].id, other_data["ride"].id),
|
||||
)
|
||||
comparison = comparisons.get(comparison_key)
|
||||
|
||||
if comparison:
|
||||
if comparison.ride_a_id == ride_data["ride"].id:
|
||||
if comparison.ride_a_wins > comparison.ride_b_wins:
|
||||
mini_wins += 1
|
||||
elif comparison.ride_a_wins < comparison.ride_b_wins:
|
||||
mini_losses += 1
|
||||
else:
|
||||
if comparison.ride_b_wins > comparison.ride_a_wins:
|
||||
mini_wins += 1
|
||||
elif comparison.ride_b_wins < comparison.ride_a_wins:
|
||||
mini_losses += 1
|
||||
|
||||
ride_data["tiebreaker_score"] = mini_wins - mini_losses
|
||||
|
||||
# Sort by tiebreaker score, then by mutual riders count, then by average rating
|
||||
tied_group.sort(
|
||||
key=lambda x: (
|
||||
x["tiebreaker_score"],
|
||||
x["mutual_riders_count"],
|
||||
x["average_rating"] or 0,
|
||||
),
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
return tied_group
|
||||
|
||||
def _save_rankings(self, rankings: List[Dict]):
|
||||
"""Save calculated rankings to the database."""
|
||||
for ranking_data in rankings:
|
||||
RideRanking.objects.update_or_create(
|
||||
ride=ranking_data["ride"],
|
||||
defaults={
|
||||
"rank": ranking_data["rank"],
|
||||
"wins": ranking_data["wins"],
|
||||
"losses": ranking_data["losses"],
|
||||
"ties": ranking_data["ties"],
|
||||
"winning_percentage": ranking_data["winning_percentage"],
|
||||
"mutual_riders_count": ranking_data["mutual_riders_count"],
|
||||
"comparison_count": ranking_data["comparison_count"],
|
||||
"average_rating": ranking_data["average_rating"],
|
||||
"last_calculated": timezone.now(),
|
||||
"calculation_version": self.calculation_version,
|
||||
},
|
||||
)
|
||||
|
||||
def _save_ranking_snapshots(self, rankings: List[Dict]):
|
||||
"""Save ranking snapshots for historical tracking."""
|
||||
today = date.today()
|
||||
|
||||
for ranking_data in rankings:
|
||||
RankingSnapshot.objects.update_or_create(
|
||||
ride=ranking_data["ride"],
|
||||
snapshot_date=today,
|
||||
defaults={
|
||||
"rank": ranking_data["rank"],
|
||||
"winning_percentage": ranking_data["winning_percentage"],
|
||||
},
|
||||
)
|
||||
|
||||
def _cleanup_old_data(self, days_to_keep: int = 365):
|
||||
"""Clean up old comparison and snapshot data."""
|
||||
cutoff_date = timezone.now() - timezone.timedelta(days=days_to_keep)
|
||||
|
||||
# Delete old snapshots
|
||||
deleted_snapshots = RankingSnapshot.objects.filter(
|
||||
snapshot_date__lt=cutoff_date.date()
|
||||
).delete()
|
||||
|
||||
if deleted_snapshots[0] > 0:
|
||||
self.logger.info(f"Deleted {deleted_snapshots[0]} old ranking snapshots")
|
||||
|
||||
def get_ride_ranking_details(self, ride: Ride) -> Optional[Dict]:
|
||||
"""
|
||||
Get detailed ranking information for a specific ride.
|
||||
|
||||
Returns dictionary with ranking details or None if not ranked.
|
||||
"""
|
||||
try:
|
||||
ranking = RideRanking.objects.get(ride=ride)
|
||||
|
||||
# Get recent head-to-head comparisons
|
||||
comparisons = (
|
||||
RidePairComparison.objects.filter(Q(ride_a=ride) | Q(ride_b=ride))
|
||||
.select_related("ride_a", "ride_b")
|
||||
.order_by("-mutual_riders_count")[:10]
|
||||
)
|
||||
|
||||
# Get ranking history
|
||||
history = RankingSnapshot.objects.filter(ride=ride).order_by(
|
||||
"-snapshot_date"
|
||||
)[:30]
|
||||
|
||||
return {
|
||||
"current_rank": ranking.rank,
|
||||
"winning_percentage": ranking.winning_percentage,
|
||||
"wins": ranking.wins,
|
||||
"losses": ranking.losses,
|
||||
"ties": ranking.ties,
|
||||
"average_rating": ranking.average_rating,
|
||||
"mutual_riders_count": ranking.mutual_riders_count,
|
||||
"last_calculated": ranking.last_calculated,
|
||||
"head_to_head": [
|
||||
{
|
||||
"opponent": (
|
||||
comp.ride_b if comp.ride_a_id == ride.id else comp.ride_a
|
||||
),
|
||||
"result": (
|
||||
"win"
|
||||
if (
|
||||
(
|
||||
comp.ride_a_id == ride.id
|
||||
and comp.ride_a_wins > comp.ride_b_wins
|
||||
)
|
||||
or (
|
||||
comp.ride_b_id == ride.id
|
||||
and comp.ride_b_wins > comp.ride_a_wins
|
||||
)
|
||||
)
|
||||
else (
|
||||
"loss"
|
||||
if (
|
||||
(
|
||||
comp.ride_a_id == ride.id
|
||||
and comp.ride_a_wins < comp.ride_b_wins
|
||||
)
|
||||
or (
|
||||
comp.ride_b_id == ride.id
|
||||
and comp.ride_b_wins < comp.ride_a_wins
|
||||
)
|
||||
)
|
||||
else "tie"
|
||||
)
|
||||
),
|
||||
"mutual_riders": comp.mutual_riders_count,
|
||||
}
|
||||
for comp in comparisons
|
||||
],
|
||||
"ranking_history": [
|
||||
{
|
||||
"date": snapshot.snapshot_date,
|
||||
"rank": snapshot.rank,
|
||||
"winning_percentage": snapshot.winning_percentage,
|
||||
}
|
||||
for snapshot in history
|
||||
],
|
||||
}
|
||||
except RideRanking.DoesNotExist:
|
||||
return None
|
||||
@@ -53,6 +53,23 @@ urlpatterns = [
|
||||
views.get_search_suggestions,
|
||||
name="search_suggestions",
|
||||
),
|
||||
# Ranking endpoints
|
||||
path("rankings/", views.RideRankingsView.as_view(), name="rankings"),
|
||||
path(
|
||||
"rankings/<slug:ride_slug>/",
|
||||
views.RideRankingDetailView.as_view(),
|
||||
name="ranking_detail",
|
||||
),
|
||||
path(
|
||||
"rankings/<slug:ride_slug>/history-chart/",
|
||||
views.ranking_history_chart,
|
||||
name="ranking_history_chart",
|
||||
),
|
||||
path(
|
||||
"rankings/<slug:ride_slug>/comparisons/",
|
||||
views.ranking_comparisons,
|
||||
name="ranking_comparisons",
|
||||
),
|
||||
# Park-specific URLs
|
||||
path("create/", views.RideCreateView.as_view(), name="ride_create"),
|
||||
path("<slug:ride_slug>/", views.RideDetailView.as_view(), name="ride_detail"),
|
||||
|
||||
@@ -12,6 +12,8 @@ from .forms import RideForm, RideSearchForm
|
||||
from apps.parks.models import Park
|
||||
from apps.moderation.mixins import EditSubmissionMixin, HistoryMixin
|
||||
from apps.moderation.models import EditSubmission
|
||||
from .models.rankings import RideRanking, RankingSnapshot
|
||||
from .services.ranking_service import RideRankingService
|
||||
|
||||
|
||||
class ParkContextRequired:
|
||||
@@ -452,3 +454,166 @@ class RideSearchView(ListView):
|
||||
context = super().get_context_data(**kwargs)
|
||||
context["search_form"] = RideSearchForm(self.request.GET)
|
||||
return context
|
||||
|
||||
|
||||
class RideRankingsView(ListView):
|
||||
"""View for displaying ride rankings using the Internet Roller Coaster Poll algorithm."""
|
||||
|
||||
model = RideRanking
|
||||
template_name = "rides/rankings.html"
|
||||
context_object_name = "rankings"
|
||||
paginate_by = 50
|
||||
|
||||
def get_queryset(self):
|
||||
"""Get rankings with optimized queries."""
|
||||
queryset = RideRanking.objects.select_related(
|
||||
"ride", "ride__park", "ride__manufacturer", "ride__ride_model"
|
||||
).order_by("rank")
|
||||
|
||||
# Filter by category if specified
|
||||
category = self.request.GET.get("category")
|
||||
if category and category != "all":
|
||||
queryset = queryset.filter(ride__category=category)
|
||||
|
||||
# Filter by minimum mutual riders
|
||||
min_riders = self.request.GET.get("min_riders")
|
||||
if min_riders:
|
||||
try:
|
||||
min_riders = int(min_riders)
|
||||
queryset = queryset.filter(mutual_riders_count__gte=min_riders)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return queryset
|
||||
|
||||
def get_template_names(self):
|
||||
"""Return appropriate template based on request type."""
|
||||
if self.request.htmx:
|
||||
return ["rides/partials/rankings_table.html"]
|
||||
return [self.template_name]
|
||||
|
||||
def get_context_data(self, **kwargs):
|
||||
"""Add context for rankings view."""
|
||||
context = super().get_context_data(**kwargs)
|
||||
context["category_choices"] = Categories
|
||||
context["selected_category"] = self.request.GET.get("category", "all")
|
||||
context["min_riders"] = self.request.GET.get("min_riders", "")
|
||||
|
||||
# Add statistics
|
||||
if self.object_list:
|
||||
context["total_ranked"] = RideRanking.objects.count()
|
||||
context["last_updated"] = (
|
||||
self.object_list[0].last_calculated if self.object_list else None
|
||||
)
|
||||
|
||||
return context
|
||||
|
||||
|
||||
class RideRankingDetailView(DetailView):
|
||||
"""View for displaying detailed ranking information for a specific ride."""
|
||||
|
||||
model = Ride
|
||||
template_name = "rides/ranking_detail.html"
|
||||
slug_url_kwarg = "ride_slug"
|
||||
|
||||
def get_queryset(self):
|
||||
"""Get ride with ranking data."""
|
||||
return Ride.objects.select_related(
|
||||
"park", "manufacturer", "ranking"
|
||||
).prefetch_related("comparisons_as_a", "comparisons_as_b", "ranking_history")
|
||||
|
||||
def get_context_data(self, **kwargs):
|
||||
"""Add ranking details to context."""
|
||||
context = super().get_context_data(**kwargs)
|
||||
|
||||
# Get ranking details from service
|
||||
service = RideRankingService()
|
||||
ranking_details = service.get_ride_ranking_details(self.object)
|
||||
|
||||
if ranking_details:
|
||||
context.update(ranking_details)
|
||||
|
||||
# Get recent movement
|
||||
recent_snapshots = RankingSnapshot.objects.filter(
|
||||
ride=self.object
|
||||
).order_by("-snapshot_date")[:7]
|
||||
|
||||
if len(recent_snapshots) >= 2:
|
||||
context["rank_change"] = (
|
||||
recent_snapshots[0].rank - recent_snapshots[1].rank
|
||||
)
|
||||
context["previous_rank"] = recent_snapshots[1].rank
|
||||
else:
|
||||
context["not_ranked"] = True
|
||||
|
||||
return context
|
||||
|
||||
|
||||
def ranking_history_chart(request: HttpRequest, ride_slug: str) -> HttpResponse:
|
||||
"""HTMX endpoint for ranking history chart data."""
|
||||
ride = get_object_or_404(Ride, slug=ride_slug)
|
||||
|
||||
# Get last 30 days of ranking history
|
||||
history = RankingSnapshot.objects.filter(ride=ride).order_by("-snapshot_date")[:30]
|
||||
|
||||
# Prepare data for chart
|
||||
chart_data = [
|
||||
{
|
||||
"date": snapshot.snapshot_date.isoformat(),
|
||||
"rank": snapshot.rank,
|
||||
"win_pct": float(snapshot.winning_percentage) * 100,
|
||||
}
|
||||
for snapshot in reversed(history)
|
||||
]
|
||||
|
||||
return render(
|
||||
request,
|
||||
"rides/partials/ranking_chart.html",
|
||||
{"chart_data": chart_data, "ride": ride},
|
||||
)
|
||||
|
||||
|
||||
def ranking_comparisons(request: HttpRequest, ride_slug: str) -> HttpResponse:
|
||||
"""HTMX endpoint for ride head-to-head comparisons."""
|
||||
ride = get_object_or_404(Ride, slug=ride_slug)
|
||||
|
||||
# Get head-to-head comparisons
|
||||
from django.db.models import Q
|
||||
from .models.rankings import RidePairComparison
|
||||
|
||||
comparisons = (
|
||||
RidePairComparison.objects.filter(Q(ride_a=ride) | Q(ride_b=ride))
|
||||
.select_related("ride_a", "ride_b", "ride_a__park", "ride_b__park")
|
||||
.order_by("-mutual_riders_count")[:20]
|
||||
)
|
||||
|
||||
# Format comparisons for display
|
||||
comparison_data = []
|
||||
for comp in comparisons:
|
||||
if comp.ride_a == ride:
|
||||
opponent = comp.ride_b
|
||||
wins = comp.ride_a_wins
|
||||
losses = comp.ride_b_wins
|
||||
else:
|
||||
opponent = comp.ride_a
|
||||
wins = comp.ride_b_wins
|
||||
losses = comp.ride_a_wins
|
||||
|
||||
result = "win" if wins > losses else "loss" if losses > wins else "tie"
|
||||
|
||||
comparison_data.append(
|
||||
{
|
||||
"opponent": opponent,
|
||||
"wins": wins,
|
||||
"losses": losses,
|
||||
"ties": comp.ties,
|
||||
"result": result,
|
||||
"mutual_riders": comp.mutual_riders_count,
|
||||
}
|
||||
)
|
||||
|
||||
return render(
|
||||
request,
|
||||
"rides/partials/ranking_comparisons.html",
|
||||
{"comparisons": comparison_data, "ride": ride},
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user