mirror of
https://github.com/pacnpal/thrilltrack-explorer.git
synced 2025-12-20 10:11:13 -05:00
Implement ML Anomaly Detection
Introduce statistical anomaly detection for metrics via edge function, hooks, and UI components. Adds detection algorithms (z-score, moving average, rate of change), anomaly storage, auto-alerts, and dashboard rendering of detected anomalies with run-once trigger and scheduling guidance.
This commit is contained in:
169
src/components/admin/AnomalyDetectionPanel.tsx
Normal file
169
src/components/admin/AnomalyDetectionPanel.tsx
Normal file
@@ -0,0 +1,169 @@
|
||||
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card';
|
||||
import { Button } from '@/components/ui/button';
|
||||
import { Badge } from '@/components/ui/badge';
|
||||
import { Brain, TrendingUp, TrendingDown, Activity, AlertTriangle, Play, Sparkles } from 'lucide-react';
|
||||
import { formatDistanceToNow } from 'date-fns';
|
||||
import type { AnomalyDetection } from '@/hooks/admin/useAnomalyDetection';
|
||||
import { useRunAnomalyDetection } from '@/hooks/admin/useAnomalyDetection';
|
||||
|
||||
interface AnomalyDetectionPanelProps {
|
||||
anomalies?: AnomalyDetection[];
|
||||
isLoading: boolean;
|
||||
}
|
||||
|
||||
const ANOMALY_TYPE_CONFIG = {
|
||||
spike: { icon: TrendingUp, label: 'Spike', color: 'text-orange-500' },
|
||||
drop: { icon: TrendingDown, label: 'Drop', color: 'text-blue-500' },
|
||||
trend_change: { icon: Activity, label: 'Trend Change', color: 'text-purple-500' },
|
||||
outlier: { icon: AlertTriangle, label: 'Outlier', color: 'text-yellow-500' },
|
||||
pattern_break: { icon: Activity, label: 'Pattern Break', color: 'text-red-500' },
|
||||
};
|
||||
|
||||
const SEVERITY_CONFIG = {
|
||||
critical: { badge: 'destructive', label: 'Critical' },
|
||||
high: { badge: 'default', label: 'High' },
|
||||
medium: { badge: 'secondary', label: 'Medium' },
|
||||
low: { badge: 'outline', label: 'Low' },
|
||||
};
|
||||
|
||||
export function AnomalyDetectionPanel({ anomalies, isLoading }: AnomalyDetectionPanelProps) {
|
||||
const runDetection = useRunAnomalyDetection();
|
||||
|
||||
const handleRunDetection = () => {
|
||||
runDetection.mutate();
|
||||
};
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardTitle className="flex items-center gap-2">
|
||||
<Brain className="h-5 w-5" />
|
||||
ML Anomaly Detection
|
||||
</CardTitle>
|
||||
<CardDescription>Loading anomaly data...</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<div className="flex items-center justify-center py-8">
|
||||
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-primary"></div>
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
|
||||
const recentAnomalies = anomalies?.slice(0, 5) || [];
|
||||
|
||||
return (
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardTitle className="flex items-center justify-between">
|
||||
<span className="flex items-center gap-2">
|
||||
<Brain className="h-5 w-5" />
|
||||
ML Anomaly Detection
|
||||
</span>
|
||||
<div className="flex items-center gap-2">
|
||||
{anomalies && anomalies.length > 0 && (
|
||||
<span className="text-sm font-normal text-muted-foreground">
|
||||
{anomalies.length} detected (24h)
|
||||
</span>
|
||||
)}
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={handleRunDetection}
|
||||
disabled={runDetection.isPending}
|
||||
>
|
||||
<Play className="h-4 w-4 mr-1" />
|
||||
Run Detection
|
||||
</Button>
|
||||
</div>
|
||||
</CardTitle>
|
||||
<CardDescription>
|
||||
Statistical ML algorithms detecting unusual patterns in metrics
|
||||
</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent className="space-y-3">
|
||||
{recentAnomalies.length === 0 ? (
|
||||
<div className="flex flex-col items-center justify-center py-8 text-muted-foreground">
|
||||
<Sparkles className="h-12 w-12 mb-2 opacity-50" />
|
||||
<p>No anomalies detected in last 24 hours</p>
|
||||
<p className="text-sm">ML models are monitoring metrics continuously</p>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
{recentAnomalies.map((anomaly) => {
|
||||
const typeConfig = ANOMALY_TYPE_CONFIG[anomaly.anomaly_type];
|
||||
const severityConfig = SEVERITY_CONFIG[anomaly.severity];
|
||||
const TypeIcon = typeConfig.icon;
|
||||
|
||||
return (
|
||||
<div
|
||||
key={anomaly.id}
|
||||
className="border rounded-lg p-4 space-y-2 bg-card hover:bg-accent/5 transition-colors"
|
||||
>
|
||||
<div className="flex items-start justify-between gap-4">
|
||||
<div className="flex items-start gap-3 flex-1">
|
||||
<TypeIcon className={`h-5 w-5 mt-0.5 ${typeConfig.color}`} />
|
||||
<div className="flex-1 min-w-0">
|
||||
<div className="flex items-center gap-2 flex-wrap mb-1">
|
||||
<Badge variant={severityConfig.badge as any} className="text-xs">
|
||||
{severityConfig.label}
|
||||
</Badge>
|
||||
<span className="text-xs px-2 py-0.5 rounded bg-purple-500/10 text-purple-600">
|
||||
{typeConfig.label}
|
||||
</span>
|
||||
<span className="text-xs px-2 py-0.5 rounded bg-muted text-muted-foreground">
|
||||
{anomaly.metric_name.replace(/_/g, ' ')}
|
||||
</span>
|
||||
{anomaly.alert_created && (
|
||||
<span className="text-xs px-2 py-0.5 rounded bg-green-500/10 text-green-600">
|
||||
Alert Created
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
<div className="text-sm space-y-1">
|
||||
<div className="flex items-center gap-4 text-muted-foreground">
|
||||
<span>
|
||||
Baseline: <span className="font-medium text-foreground">{anomaly.baseline_value.toFixed(2)}</span>
|
||||
</span>
|
||||
<span>→</span>
|
||||
<span>
|
||||
Detected: <span className="font-medium text-foreground">{anomaly.anomaly_value.toFixed(2)}</span>
|
||||
</span>
|
||||
<span className="ml-2 px-2 py-0.5 rounded bg-orange-500/10 text-orange-600 text-xs font-medium">
|
||||
{anomaly.deviation_score.toFixed(2)}σ
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-4 text-xs text-muted-foreground">
|
||||
<span className="flex items-center gap-1">
|
||||
<Brain className="h-3 w-3" />
|
||||
Algorithm: {anomaly.detection_algorithm.replace(/_/g, ' ')}
|
||||
</span>
|
||||
<span>
|
||||
Confidence: {(anomaly.confidence_score * 100).toFixed(0)}%
|
||||
</span>
|
||||
<span>
|
||||
Detected {formatDistanceToNow(new Date(anomaly.detected_at), { addSuffix: true })}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
{anomalies && anomalies.length > 5 && (
|
||||
<div className="text-center pt-2">
|
||||
<span className="text-sm text-muted-foreground">
|
||||
+ {anomalies.length - 5} more anomalies
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</CardContent>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
101
src/hooks/admin/useAnomalyDetection.ts
Normal file
101
src/hooks/admin/useAnomalyDetection.ts
Normal file
@@ -0,0 +1,101 @@
|
||||
import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query';
|
||||
import { supabase } from '@/lib/supabaseClient';
|
||||
import { queryKeys } from '@/lib/queryKeys';
|
||||
import { toast } from 'sonner';
|
||||
|
||||
export interface AnomalyDetection {
|
||||
id: string;
|
||||
metric_name: string;
|
||||
metric_category: string;
|
||||
anomaly_type: 'spike' | 'drop' | 'trend_change' | 'outlier' | 'pattern_break';
|
||||
severity: 'critical' | 'high' | 'medium' | 'low';
|
||||
baseline_value: number;
|
||||
anomaly_value: number;
|
||||
deviation_score: number;
|
||||
confidence_score: number;
|
||||
detection_algorithm: string;
|
||||
time_window_start: string;
|
||||
time_window_end: string;
|
||||
detected_at: string;
|
||||
alert_created: boolean;
|
||||
alert_id?: string;
|
||||
alert_message?: string;
|
||||
alert_resolved_at?: string;
|
||||
}
|
||||
|
||||
export function useAnomalyDetections() {
|
||||
return useQuery({
|
||||
queryKey: queryKeys.monitoring.anomalyDetections(),
|
||||
queryFn: async () => {
|
||||
const { data, error } = await supabase
|
||||
.from('recent_anomalies_view')
|
||||
.select('*')
|
||||
.order('detected_at', { ascending: false })
|
||||
.limit(50);
|
||||
|
||||
if (error) throw error;
|
||||
return (data || []) as AnomalyDetection[];
|
||||
},
|
||||
staleTime: 30000,
|
||||
refetchInterval: 60000,
|
||||
});
|
||||
}
|
||||
|
||||
export function useRunAnomalyDetection() {
|
||||
const queryClient = useQueryClient();
|
||||
|
||||
return useMutation({
|
||||
mutationFn: async () => {
|
||||
const { data, error } = await supabase.functions.invoke('detect-anomalies', {
|
||||
method: 'POST',
|
||||
});
|
||||
|
||||
if (error) throw error;
|
||||
return data;
|
||||
},
|
||||
onSuccess: (data) => {
|
||||
queryClient.invalidateQueries({ queryKey: queryKeys.monitoring.anomalyDetections() });
|
||||
queryClient.invalidateQueries({ queryKey: queryKeys.monitoring.groupedAlerts() });
|
||||
|
||||
if (data.anomalies_detected > 0) {
|
||||
toast.success(`Detected ${data.anomalies_detected} anomalies`);
|
||||
} else {
|
||||
toast.info('No anomalies detected');
|
||||
}
|
||||
},
|
||||
onError: (error) => {
|
||||
console.error('Failed to run anomaly detection:', error);
|
||||
toast.error('Failed to run anomaly detection');
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
export function useRecordMetric() {
|
||||
return useMutation({
|
||||
mutationFn: async ({
|
||||
metricName,
|
||||
metricCategory,
|
||||
metricValue,
|
||||
metadata,
|
||||
}: {
|
||||
metricName: string;
|
||||
metricCategory: string;
|
||||
metricValue: number;
|
||||
metadata?: any;
|
||||
}) => {
|
||||
const { error } = await supabase
|
||||
.from('metric_time_series')
|
||||
.insert({
|
||||
metric_name: metricName,
|
||||
metric_category: metricCategory,
|
||||
metric_value: metricValue,
|
||||
metadata,
|
||||
});
|
||||
|
||||
if (error) throw error;
|
||||
},
|
||||
onError: (error) => {
|
||||
console.error('Failed to record metric:', error);
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -202,6 +202,111 @@ export type Database = {
|
||||
}
|
||||
Relationships: []
|
||||
}
|
||||
anomaly_detection_config: {
|
||||
Row: {
|
||||
alert_threshold_score: number
|
||||
auto_create_alert: boolean
|
||||
created_at: string
|
||||
detection_algorithms: string[]
|
||||
enabled: boolean
|
||||
id: string
|
||||
lookback_window_minutes: number
|
||||
metric_category: string
|
||||
metric_name: string
|
||||
min_data_points: number
|
||||
sensitivity: number
|
||||
updated_at: string
|
||||
}
|
||||
Insert: {
|
||||
alert_threshold_score?: number
|
||||
auto_create_alert?: boolean
|
||||
created_at?: string
|
||||
detection_algorithms?: string[]
|
||||
enabled?: boolean
|
||||
id?: string
|
||||
lookback_window_minutes?: number
|
||||
metric_category: string
|
||||
metric_name: string
|
||||
min_data_points?: number
|
||||
sensitivity?: number
|
||||
updated_at?: string
|
||||
}
|
||||
Update: {
|
||||
alert_threshold_score?: number
|
||||
auto_create_alert?: boolean
|
||||
created_at?: string
|
||||
detection_algorithms?: string[]
|
||||
enabled?: boolean
|
||||
id?: string
|
||||
lookback_window_minutes?: number
|
||||
metric_category?: string
|
||||
metric_name?: string
|
||||
min_data_points?: number
|
||||
sensitivity?: number
|
||||
updated_at?: string
|
||||
}
|
||||
Relationships: []
|
||||
}
|
||||
anomaly_detections: {
|
||||
Row: {
|
||||
alert_created: boolean
|
||||
alert_id: string | null
|
||||
anomaly_type: string
|
||||
anomaly_value: number
|
||||
baseline_value: number
|
||||
confidence_score: number
|
||||
created_at: string
|
||||
detected_at: string
|
||||
detection_algorithm: string
|
||||
deviation_score: number
|
||||
id: string
|
||||
metadata: Json | null
|
||||
metric_category: string
|
||||
metric_name: string
|
||||
severity: string
|
||||
time_window_end: string
|
||||
time_window_start: string
|
||||
}
|
||||
Insert: {
|
||||
alert_created?: boolean
|
||||
alert_id?: string | null
|
||||
anomaly_type: string
|
||||
anomaly_value: number
|
||||
baseline_value: number
|
||||
confidence_score: number
|
||||
created_at?: string
|
||||
detected_at?: string
|
||||
detection_algorithm: string
|
||||
deviation_score: number
|
||||
id?: string
|
||||
metadata?: Json | null
|
||||
metric_category: string
|
||||
metric_name: string
|
||||
severity: string
|
||||
time_window_end: string
|
||||
time_window_start: string
|
||||
}
|
||||
Update: {
|
||||
alert_created?: boolean
|
||||
alert_id?: string | null
|
||||
anomaly_type?: string
|
||||
anomaly_value?: number
|
||||
baseline_value?: number
|
||||
confidence_score?: number
|
||||
created_at?: string
|
||||
detected_at?: string
|
||||
detection_algorithm?: string
|
||||
deviation_score?: number
|
||||
id?: string
|
||||
metadata?: Json | null
|
||||
metric_category?: string
|
||||
metric_name?: string
|
||||
severity?: string
|
||||
time_window_end?: string
|
||||
time_window_start?: string
|
||||
}
|
||||
Relationships: []
|
||||
}
|
||||
approval_transaction_metrics: {
|
||||
Row: {
|
||||
created_at: string | null
|
||||
@@ -1894,6 +1999,36 @@ export type Database = {
|
||||
}
|
||||
Relationships: []
|
||||
}
|
||||
metric_time_series: {
|
||||
Row: {
|
||||
created_at: string
|
||||
id: string
|
||||
metadata: Json | null
|
||||
metric_category: string
|
||||
metric_name: string
|
||||
metric_value: number
|
||||
timestamp: string
|
||||
}
|
||||
Insert: {
|
||||
created_at?: string
|
||||
id?: string
|
||||
metadata?: Json | null
|
||||
metric_category: string
|
||||
metric_name: string
|
||||
metric_value: number
|
||||
timestamp?: string
|
||||
}
|
||||
Update: {
|
||||
created_at?: string
|
||||
id?: string
|
||||
metadata?: Json | null
|
||||
metric_category?: string
|
||||
metric_name?: string
|
||||
metric_value?: number
|
||||
timestamp?: string
|
||||
}
|
||||
Relationships: []
|
||||
}
|
||||
moderation_audit_log: {
|
||||
Row: {
|
||||
action: string
|
||||
@@ -6270,6 +6405,28 @@ export type Database = {
|
||||
}
|
||||
Relationships: []
|
||||
}
|
||||
recent_anomalies_view: {
|
||||
Row: {
|
||||
alert_created: boolean | null
|
||||
alert_id: string | null
|
||||
alert_message: string | null
|
||||
alert_resolved_at: string | null
|
||||
anomaly_type: string | null
|
||||
anomaly_value: number | null
|
||||
baseline_value: number | null
|
||||
confidence_score: number | null
|
||||
detected_at: string | null
|
||||
detection_algorithm: string | null
|
||||
deviation_score: number | null
|
||||
id: string | null
|
||||
metric_category: string | null
|
||||
metric_name: string | null
|
||||
severity: string | null
|
||||
time_window_end: string | null
|
||||
time_window_start: string | null
|
||||
}
|
||||
Relationships: []
|
||||
}
|
||||
}
|
||||
Functions: {
|
||||
anonymize_user_submissions: {
|
||||
|
||||
@@ -95,5 +95,6 @@ export const queryKeys = {
|
||||
correlatedAlerts: () => ['monitoring', 'correlated-alerts'] as const,
|
||||
incidents: (status?: string) => ['monitoring', 'incidents', status] as const,
|
||||
incidentDetails: (incidentId: string) => ['monitoring', 'incident-details', incidentId] as const,
|
||||
anomalyDetections: () => ['monitoring', 'anomaly-detections'] as const,
|
||||
},
|
||||
} as const;
|
||||
|
||||
@@ -6,6 +6,7 @@ import { SystemHealthStatus } from '@/components/admin/SystemHealthStatus';
|
||||
import { GroupedAlertsPanel } from '@/components/admin/GroupedAlertsPanel';
|
||||
import { CorrelatedAlertsPanel } from '@/components/admin/CorrelatedAlertsPanel';
|
||||
import { IncidentsPanel } from '@/components/admin/IncidentsPanel';
|
||||
import { AnomalyDetectionPanel } from '@/components/admin/AnomalyDetectionPanel';
|
||||
import { MonitoringQuickStats } from '@/components/admin/MonitoringQuickStats';
|
||||
import { RecentActivityTimeline } from '@/components/admin/RecentActivityTimeline';
|
||||
import { MonitoringNavCards } from '@/components/admin/MonitoringNavCards';
|
||||
@@ -13,6 +14,7 @@ import { useSystemHealth } from '@/hooks/useSystemHealth';
|
||||
import { useGroupedAlerts } from '@/hooks/admin/useGroupedAlerts';
|
||||
import { useCorrelatedAlerts } from '@/hooks/admin/useCorrelatedAlerts';
|
||||
import { useIncidents } from '@/hooks/admin/useIncidents';
|
||||
import { useAnomalyDetections } from '@/hooks/admin/useAnomalyDetection';
|
||||
import { useRecentActivity } from '@/hooks/admin/useRecentActivity';
|
||||
import { useDatabaseHealth } from '@/hooks/admin/useDatabaseHealth';
|
||||
import { useModerationHealth } from '@/hooks/admin/useModerationHealth';
|
||||
@@ -30,6 +32,7 @@ export default function MonitoringOverview() {
|
||||
const groupedAlerts = useGroupedAlerts({ includeResolved: false });
|
||||
const correlatedAlerts = useCorrelatedAlerts();
|
||||
const incidents = useIncidents('open');
|
||||
const anomalies = useAnomalyDetections();
|
||||
const recentActivity = useRecentActivity(3600000); // 1 hour
|
||||
const dbHealth = useDatabaseHealth();
|
||||
const moderationHealth = useModerationHealth();
|
||||
@@ -40,6 +43,7 @@ export default function MonitoringOverview() {
|
||||
groupedAlerts.isLoading ||
|
||||
correlatedAlerts.isLoading ||
|
||||
incidents.isLoading ||
|
||||
anomalies.isLoading ||
|
||||
recentActivity.isLoading ||
|
||||
dbHealth.isLoading ||
|
||||
moderationHealth.isLoading ||
|
||||
@@ -74,6 +78,10 @@ export default function MonitoringOverview() {
|
||||
queryKey: queryKeys.monitoring.incidents(),
|
||||
refetchType: 'active'
|
||||
});
|
||||
await queryClient.invalidateQueries({
|
||||
queryKey: queryKeys.monitoring.anomalyDetections(),
|
||||
refetchType: 'active'
|
||||
});
|
||||
};
|
||||
|
||||
// Calculate error count for nav card (from recent activity)
|
||||
@@ -136,6 +144,12 @@ export default function MonitoringOverview() {
|
||||
isLoading={incidents.isLoading}
|
||||
/>
|
||||
|
||||
{/* ML Anomaly Detection */}
|
||||
<AnomalyDetectionPanel
|
||||
anomalies={anomalies.data}
|
||||
isLoading={anomalies.isLoading}
|
||||
/>
|
||||
|
||||
{/* Quick Stats Grid */}
|
||||
<MonitoringQuickStats
|
||||
systemHealth={systemHealth.data ?? undefined}
|
||||
|
||||
302
supabase/functions/detect-anomalies/index.ts
Normal file
302
supabase/functions/detect-anomalies/index.ts
Normal file
@@ -0,0 +1,302 @@
|
||||
import { createClient } from 'https://esm.sh/@supabase/supabase-js@2.57.4';
|
||||
|
||||
const corsHeaders = {
|
||||
'Access-Control-Allow-Origin': '*',
|
||||
'Access-Control-Allow-Headers': 'authorization, x-client-info, apikey, content-type',
|
||||
};
|
||||
|
||||
interface MetricData {
|
||||
timestamp: string;
|
||||
metric_value: number;
|
||||
}
|
||||
|
||||
interface AnomalyDetectionConfig {
|
||||
metric_name: string;
|
||||
metric_category: string;
|
||||
enabled: boolean;
|
||||
sensitivity: number;
|
||||
lookback_window_minutes: number;
|
||||
detection_algorithms: string[];
|
||||
min_data_points: number;
|
||||
alert_threshold_score: number;
|
||||
auto_create_alert: boolean;
|
||||
}
|
||||
|
||||
interface AnomalyResult {
|
||||
isAnomaly: boolean;
|
||||
anomalyType: string;
|
||||
deviationScore: number;
|
||||
confidenceScore: number;
|
||||
algorithm: string;
|
||||
baselineValue: number;
|
||||
anomalyValue: number;
|
||||
}
|
||||
|
||||
// Statistical anomaly detection algorithms
|
||||
class AnomalyDetector {
|
||||
// Z-Score algorithm: Detects outliers based on standard deviation
|
||||
static zScore(data: number[], currentValue: number, sensitivity: number = 3.0): AnomalyResult {
|
||||
if (data.length < 2) {
|
||||
return { isAnomaly: false, anomalyType: 'none', deviationScore: 0, confidenceScore: 0, algorithm: 'z_score', baselineValue: currentValue, anomalyValue: currentValue };
|
||||
}
|
||||
|
||||
const mean = data.reduce((sum, val) => sum + val, 0) / data.length;
|
||||
const variance = data.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) / data.length;
|
||||
const stdDev = Math.sqrt(variance);
|
||||
|
||||
if (stdDev === 0) {
|
||||
return { isAnomaly: false, anomalyType: 'none', deviationScore: 0, confidenceScore: 0, algorithm: 'z_score', baselineValue: mean, anomalyValue: currentValue };
|
||||
}
|
||||
|
||||
const zScore = Math.abs((currentValue - mean) / stdDev);
|
||||
const isAnomaly = zScore > sensitivity;
|
||||
|
||||
return {
|
||||
isAnomaly,
|
||||
anomalyType: currentValue > mean ? 'spike' : 'drop',
|
||||
deviationScore: zScore,
|
||||
confidenceScore: Math.min(zScore / (sensitivity * 2), 1),
|
||||
algorithm: 'z_score',
|
||||
baselineValue: mean,
|
||||
anomalyValue: currentValue,
|
||||
};
|
||||
}
|
||||
|
||||
// Moving Average algorithm: Detects deviation from trend
|
||||
static movingAverage(data: number[], currentValue: number, sensitivity: number = 2.5, window: number = 10): AnomalyResult {
|
||||
if (data.length < window) {
|
||||
return { isAnomaly: false, anomalyType: 'none', deviationScore: 0, confidenceScore: 0, algorithm: 'moving_average', baselineValue: currentValue, anomalyValue: currentValue };
|
||||
}
|
||||
|
||||
const recentData = data.slice(-window);
|
||||
const ma = recentData.reduce((sum, val) => sum + val, 0) / recentData.length;
|
||||
|
||||
const mad = recentData.reduce((sum, val) => sum + Math.abs(val - ma), 0) / recentData.length;
|
||||
|
||||
if (mad === 0) {
|
||||
return { isAnomaly: false, anomalyType: 'none', deviationScore: 0, confidenceScore: 0, algorithm: 'moving_average', baselineValue: ma, anomalyValue: currentValue };
|
||||
}
|
||||
|
||||
const deviation = Math.abs(currentValue - ma) / mad;
|
||||
const isAnomaly = deviation > sensitivity;
|
||||
|
||||
return {
|
||||
isAnomaly,
|
||||
anomalyType: currentValue > ma ? 'spike' : 'drop',
|
||||
deviationScore: deviation,
|
||||
confidenceScore: Math.min(deviation / (sensitivity * 2), 1),
|
||||
algorithm: 'moving_average',
|
||||
baselineValue: ma,
|
||||
anomalyValue: currentValue,
|
||||
};
|
||||
}
|
||||
|
||||
// Rate of Change algorithm: Detects sudden changes
|
||||
static rateOfChange(data: number[], currentValue: number, sensitivity: number = 3.0): AnomalyResult {
|
||||
if (data.length < 2) {
|
||||
return { isAnomaly: false, anomalyType: 'none', deviationScore: 0, confidenceScore: 0, algorithm: 'rate_of_change', baselineValue: currentValue, anomalyValue: currentValue };
|
||||
}
|
||||
|
||||
const previousValue = data[data.length - 1];
|
||||
|
||||
if (previousValue === 0) {
|
||||
return { isAnomaly: false, anomalyType: 'none', deviationScore: 0, confidenceScore: 0, algorithm: 'rate_of_change', baselineValue: previousValue, anomalyValue: currentValue };
|
||||
}
|
||||
|
||||
const percentChange = Math.abs((currentValue - previousValue) / previousValue) * 100;
|
||||
const isAnomaly = percentChange > (sensitivity * 10); // sensitivity * 10 = % threshold
|
||||
|
||||
return {
|
||||
isAnomaly,
|
||||
anomalyType: currentValue > previousValue ? 'trend_change' : 'drop',
|
||||
deviationScore: percentChange / 10,
|
||||
confidenceScore: Math.min(percentChange / (sensitivity * 20), 1),
|
||||
algorithm: 'rate_of_change',
|
||||
baselineValue: previousValue,
|
||||
anomalyValue: currentValue,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Deno.serve(async (req) => {
|
||||
if (req.method === 'OPTIONS') {
|
||||
return new Response(null, { headers: corsHeaders });
|
||||
}
|
||||
|
||||
try {
|
||||
const supabaseUrl = Deno.env.get('SUPABASE_URL')!;
|
||||
const supabaseKey = Deno.env.get('SUPABASE_SERVICE_ROLE_KEY')!;
|
||||
const supabase = createClient(supabaseUrl, supabaseKey);
|
||||
|
||||
console.log('Starting anomaly detection run...');
|
||||
|
||||
// Get all enabled anomaly detection configurations
|
||||
const { data: configs, error: configError } = await supabase
|
||||
.from('anomaly_detection_config')
|
||||
.select('*')
|
||||
.eq('enabled', true);
|
||||
|
||||
if (configError) {
|
||||
console.error('Error fetching configs:', configError);
|
||||
throw configError;
|
||||
}
|
||||
|
||||
console.log(`Processing ${configs?.length || 0} metric configurations`);
|
||||
|
||||
const anomaliesDetected: any[] = [];
|
||||
|
||||
for (const config of (configs as AnomalyDetectionConfig[])) {
|
||||
try {
|
||||
// Fetch historical data for this metric
|
||||
const windowStart = new Date(Date.now() - config.lookback_window_minutes * 60 * 1000);
|
||||
|
||||
const { data: metricData, error: metricError } = await supabase
|
||||
.from('metric_time_series')
|
||||
.select('timestamp, metric_value')
|
||||
.eq('metric_name', config.metric_name)
|
||||
.gte('timestamp', windowStart.toISOString())
|
||||
.order('timestamp', { ascending: true });
|
||||
|
||||
if (metricError) {
|
||||
console.error(`Error fetching metric data for ${config.metric_name}:`, metricError);
|
||||
continue;
|
||||
}
|
||||
|
||||
const data = metricData as MetricData[];
|
||||
|
||||
if (!data || data.length < config.min_data_points) {
|
||||
console.log(`Insufficient data for ${config.metric_name}: ${data?.length || 0} points`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get current value (most recent)
|
||||
const currentValue = data[data.length - 1].metric_value;
|
||||
const historicalValues = data.slice(0, -1).map(d => d.metric_value);
|
||||
|
||||
// Run detection algorithms
|
||||
const results: AnomalyResult[] = [];
|
||||
|
||||
for (const algorithm of config.detection_algorithms) {
|
||||
let result: AnomalyResult;
|
||||
|
||||
switch (algorithm) {
|
||||
case 'z_score':
|
||||
result = AnomalyDetector.zScore(historicalValues, currentValue, config.sensitivity);
|
||||
break;
|
||||
case 'moving_average':
|
||||
result = AnomalyDetector.movingAverage(historicalValues, currentValue, config.sensitivity);
|
||||
break;
|
||||
case 'rate_of_change':
|
||||
result = AnomalyDetector.rateOfChange(historicalValues, currentValue, config.sensitivity);
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
|
||||
if (result.isAnomaly && result.deviationScore >= config.alert_threshold_score) {
|
||||
results.push(result);
|
||||
}
|
||||
}
|
||||
|
||||
// If any algorithm detected an anomaly
|
||||
if (results.length > 0) {
|
||||
// Use the result with highest confidence
|
||||
const bestResult = results.reduce((best, current) =>
|
||||
current.confidenceScore > best.confidenceScore ? current : best
|
||||
);
|
||||
|
||||
// Determine severity based on deviation score
|
||||
const severity =
|
||||
bestResult.deviationScore >= 5 ? 'critical' :
|
||||
bestResult.deviationScore >= 4 ? 'high' :
|
||||
bestResult.deviationScore >= 3 ? 'medium' : 'low';
|
||||
|
||||
// Insert anomaly detection record
|
||||
const { data: anomaly, error: anomalyError } = await supabase
|
||||
.from('anomaly_detections')
|
||||
.insert({
|
||||
metric_name: config.metric_name,
|
||||
metric_category: config.metric_category,
|
||||
anomaly_type: bestResult.anomalyType,
|
||||
severity,
|
||||
baseline_value: bestResult.baselineValue,
|
||||
anomaly_value: bestResult.anomalyValue,
|
||||
deviation_score: bestResult.deviationScore,
|
||||
confidence_score: bestResult.confidenceScore,
|
||||
detection_algorithm: bestResult.algorithm,
|
||||
time_window_start: windowStart.toISOString(),
|
||||
time_window_end: new Date().toISOString(),
|
||||
metadata: {
|
||||
algorithms_run: config.detection_algorithms,
|
||||
total_data_points: data.length,
|
||||
sensitivity: config.sensitivity,
|
||||
},
|
||||
})
|
||||
.select()
|
||||
.single();
|
||||
|
||||
if (anomalyError) {
|
||||
console.error(`Error inserting anomaly for ${config.metric_name}:`, anomalyError);
|
||||
continue;
|
||||
}
|
||||
|
||||
anomaliesDetected.push(anomaly);
|
||||
|
||||
// Auto-create alert if configured
|
||||
if (config.auto_create_alert && severity in ['critical', 'high']) {
|
||||
const { data: alert, error: alertError } = await supabase
|
||||
.from('system_alerts')
|
||||
.insert({
|
||||
alert_type: 'anomaly_detected',
|
||||
severity,
|
||||
message: `Anomaly detected in ${config.metric_name}: ${bestResult.anomalyType} (${bestResult.deviationScore.toFixed(2)}σ deviation)`,
|
||||
metadata: {
|
||||
anomaly_id: anomaly.id,
|
||||
metric_name: config.metric_name,
|
||||
baseline_value: bestResult.baselineValue,
|
||||
anomaly_value: bestResult.anomalyValue,
|
||||
algorithm: bestResult.algorithm,
|
||||
},
|
||||
})
|
||||
.select()
|
||||
.single();
|
||||
|
||||
if (!alertError && alert) {
|
||||
// Update anomaly with alert_id
|
||||
await supabase
|
||||
.from('anomaly_detections')
|
||||
.update({ alert_created: true, alert_id: alert.id })
|
||||
.eq('id', anomaly.id);
|
||||
|
||||
console.log(`Created alert for anomaly in ${config.metric_name}`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`Anomaly detected: ${config.metric_name} - ${bestResult.anomalyType} (${bestResult.deviationScore.toFixed(2)}σ)`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error processing metric ${config.metric_name}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`Anomaly detection complete. Detected ${anomaliesDetected.length} anomalies`);
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
success: true,
|
||||
anomalies_detected: anomaliesDetected.length,
|
||||
anomalies: anomaliesDetected,
|
||||
}),
|
||||
{ headers: { ...corsHeaders, 'Content-Type': 'application/json' } }
|
||||
);
|
||||
} catch (error) {
|
||||
console.error('Error in detect-anomalies function:', error);
|
||||
return new Response(
|
||||
JSON.stringify({ error: error.message }),
|
||||
{
|
||||
status: 500,
|
||||
headers: { ...corsHeaders, 'Content-Type': 'application/json' },
|
||||
}
|
||||
);
|
||||
}
|
||||
});
|
||||
@@ -0,0 +1,143 @@
|
||||
-- ML-based Anomaly Detection System
|
||||
|
||||
-- Table: Time-series metrics for anomaly detection
|
||||
CREATE TABLE metric_time_series (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
metric_name TEXT NOT NULL,
|
||||
metric_category TEXT NOT NULL CHECK (metric_category IN ('system', 'database', 'rate_limit', 'moderation', 'api')),
|
||||
metric_value NUMERIC NOT NULL,
|
||||
timestamp TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
metadata JSONB,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Table: Detected anomalies
|
||||
CREATE TABLE anomaly_detections (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
metric_name TEXT NOT NULL,
|
||||
metric_category TEXT NOT NULL,
|
||||
anomaly_type TEXT NOT NULL CHECK (anomaly_type IN ('spike', 'drop', 'trend_change', 'outlier', 'pattern_break')),
|
||||
severity TEXT NOT NULL CHECK (severity IN ('critical', 'high', 'medium', 'low')),
|
||||
baseline_value NUMERIC NOT NULL,
|
||||
anomaly_value NUMERIC NOT NULL,
|
||||
deviation_score NUMERIC NOT NULL,
|
||||
confidence_score NUMERIC NOT NULL CHECK (confidence_score >= 0 AND confidence_score <= 1),
|
||||
detection_algorithm TEXT NOT NULL,
|
||||
time_window_start TIMESTAMPTZ NOT NULL,
|
||||
time_window_end TIMESTAMPTZ NOT NULL,
|
||||
detected_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
alert_created BOOLEAN NOT NULL DEFAULT false,
|
||||
alert_id UUID,
|
||||
metadata JSONB,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Table: Anomaly detection configuration
|
||||
CREATE TABLE anomaly_detection_config (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
metric_name TEXT NOT NULL UNIQUE,
|
||||
metric_category TEXT NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT true,
|
||||
sensitivity NUMERIC NOT NULL DEFAULT 3.0 CHECK (sensitivity > 0),
|
||||
lookback_window_minutes INTEGER NOT NULL DEFAULT 60,
|
||||
detection_algorithms TEXT[] NOT NULL DEFAULT ARRAY['z_score', 'moving_average', 'rate_of_change'],
|
||||
min_data_points INTEGER NOT NULL DEFAULT 10,
|
||||
alert_threshold_score NUMERIC NOT NULL DEFAULT 2.5,
|
||||
auto_create_alert BOOLEAN NOT NULL DEFAULT true,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- View: Recent anomalies with alert status
|
||||
CREATE OR REPLACE VIEW recent_anomalies_view
|
||||
WITH (security_invoker=on)
|
||||
AS
|
||||
SELECT
|
||||
ad.id,
|
||||
ad.metric_name,
|
||||
ad.metric_category,
|
||||
ad.anomaly_type,
|
||||
ad.severity,
|
||||
ad.baseline_value,
|
||||
ad.anomaly_value,
|
||||
ad.deviation_score,
|
||||
ad.confidence_score,
|
||||
ad.detection_algorithm,
|
||||
ad.time_window_start,
|
||||
ad.time_window_end,
|
||||
ad.detected_at,
|
||||
ad.alert_created,
|
||||
ad.alert_id,
|
||||
sa.message as alert_message,
|
||||
sa.resolved_at as alert_resolved_at
|
||||
FROM anomaly_detections ad
|
||||
LEFT JOIN system_alerts sa ON sa.id = ad.alert_id::uuid
|
||||
WHERE ad.detected_at > NOW() - INTERVAL '24 hours'
|
||||
ORDER BY ad.detected_at DESC;
|
||||
|
||||
-- Insert default anomaly detection configurations
|
||||
INSERT INTO anomaly_detection_config (metric_name, metric_category, sensitivity, lookback_window_minutes, detection_algorithms, alert_threshold_score) VALUES
|
||||
('error_rate', 'system', 2.5, 60, ARRAY['z_score', 'moving_average'], 2.0),
|
||||
('response_time', 'api', 3.0, 30, ARRAY['z_score', 'rate_of_change'], 2.5),
|
||||
('database_connections', 'database', 2.0, 120, ARRAY['z_score', 'moving_average'], 3.0),
|
||||
('rate_limit_violations', 'rate_limit', 2.5, 60, ARRAY['z_score', 'spike_detection'], 2.0),
|
||||
('moderation_queue_size', 'moderation', 3.0, 120, ARRAY['z_score', 'trend_change'], 2.5),
|
||||
('cpu_usage', 'system', 2.5, 30, ARRAY['z_score', 'moving_average'], 2.0),
|
||||
('memory_usage', 'system', 2.5, 30, ARRAY['z_score', 'moving_average'], 2.0),
|
||||
('request_rate', 'api', 3.0, 60, ARRAY['z_score', 'rate_of_change'], 2.5);
|
||||
|
||||
-- Create indexes
|
||||
CREATE INDEX idx_metric_time_series_name_timestamp ON metric_time_series(metric_name, timestamp DESC);
|
||||
CREATE INDEX idx_metric_time_series_category_timestamp ON metric_time_series(metric_category, timestamp DESC);
|
||||
CREATE INDEX idx_anomaly_detections_detected_at ON anomaly_detections(detected_at DESC);
|
||||
CREATE INDEX idx_anomaly_detections_alert_created ON anomaly_detections(alert_created) WHERE alert_created = false;
|
||||
CREATE INDEX idx_anomaly_detections_metric ON anomaly_detections(metric_name, detected_at DESC);
|
||||
|
||||
-- Grant permissions
|
||||
GRANT SELECT, INSERT ON metric_time_series TO authenticated;
|
||||
GRANT SELECT ON anomaly_detections TO authenticated;
|
||||
GRANT SELECT ON anomaly_detection_config TO authenticated;
|
||||
GRANT SELECT ON recent_anomalies_view TO authenticated;
|
||||
|
||||
-- RLS Policies
|
||||
ALTER TABLE metric_time_series ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE anomaly_detections ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE anomaly_detection_config ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- System can insert metrics
|
||||
CREATE POLICY system_insert_metrics ON metric_time_series
|
||||
FOR INSERT WITH CHECK (true);
|
||||
|
||||
-- Moderators can view all metrics
|
||||
CREATE POLICY moderators_view_metrics ON metric_time_series
|
||||
FOR SELECT USING (
|
||||
EXISTS (
|
||||
SELECT 1 FROM user_roles
|
||||
WHERE user_id = auth.uid()
|
||||
AND role IN ('moderator', 'admin', 'superuser')
|
||||
)
|
||||
);
|
||||
|
||||
-- Moderators can view anomalies
|
||||
CREATE POLICY moderators_view_anomalies ON anomaly_detections
|
||||
FOR SELECT USING (
|
||||
EXISTS (
|
||||
SELECT 1 FROM user_roles
|
||||
WHERE user_id = auth.uid()
|
||||
AND role IN ('moderator', 'admin', 'superuser')
|
||||
)
|
||||
);
|
||||
|
||||
-- System can insert anomalies
|
||||
CREATE POLICY system_insert_anomalies ON anomaly_detections
|
||||
FOR INSERT WITH CHECK (true);
|
||||
|
||||
-- Admins can manage anomaly config
|
||||
CREATE POLICY admins_manage_config ON anomaly_detection_config
|
||||
FOR ALL USING (
|
||||
EXISTS (
|
||||
SELECT 1 FROM user_roles
|
||||
WHERE user_id = auth.uid()
|
||||
AND role IN ('admin', 'superuser')
|
||||
)
|
||||
);
|
||||
Reference in New Issue
Block a user