mirror of
https://github.com/pacnpal/thrillwiki_django_no_react.git
synced 2025-12-20 11:11:10 -05:00
- Add complete backend/ directory with full Django application - Add frontend/ directory with Vite + TypeScript setup ready for Next.js - Add comprehensive shared/ directory with: - Complete documentation and memory-bank archives - Media files and avatars (letters, park/ride images) - Deployment scripts and automation tools - Shared types and utilities - Add architecture/ directory with migration guides - Configure pnpm workspace for monorepo development - Update .gitignore to exclude .django_tailwind_cli/ build artifacts - Preserve all historical documentation in shared/docs/memory-bank/ - Set up proper structure for full-stack development with shared resources
917 lines
28 KiB
Bash
Executable File
917 lines
28 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
#
|
|
# ThrillWiki Step 5B Final Validation Test Script
|
|
# Comprehensive testing of final validation and health checks with cross-shell compatibility
|
|
#
|
|
# Features:
|
|
# - Cross-shell compatible (bash/zsh)
|
|
# - Comprehensive final validation testing
|
|
# - Health check validation
|
|
# - Integration testing validation
|
|
# - System monitoring validation
|
|
# - Cross-shell compatibility testing
|
|
# - Deployment preset validation
|
|
# - Comprehensive reporting
|
|
#
|
|
|
|
set -e
|
|
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
# SCRIPT CONFIGURATION
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
|
|
# Cross-shell compatible script directory detection
|
|
if [ -n "${BASH_SOURCE:-}" ]; then
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
SCRIPT_NAME="$(basename "${BASH_SOURCE[0]}")"
|
|
elif [ -n "${ZSH_NAME:-}" ]; then
|
|
SCRIPT_DIR="$(cd "$(dirname "${(%):-%x}")" && pwd)"
|
|
SCRIPT_NAME="$(basename "${(%):-%x}")"
|
|
else
|
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
SCRIPT_NAME="$(basename "$0")"
|
|
fi
|
|
|
|
PROJECT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
|
DEPLOY_COMPLETE_SCRIPT="$SCRIPT_DIR/deploy-complete.sh"
|
|
|
|
# Test configuration
|
|
TEST_LOG="$PROJECT_DIR/logs/test-step5b-final-validation.log"
|
|
TEST_RESULTS_FILE="$PROJECT_DIR/logs/step5b-test-results.txt"
|
|
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
# COLOR DEFINITIONS
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
PURPLE='\033[0;35m'
|
|
CYAN='\033[0;36m'
|
|
BOLD='\033[1m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
# LOGGING FUNCTIONS
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
|
|
test_log() {
|
|
local level="$1"
|
|
local color="$2"
|
|
local message="$3"
|
|
local timestamp="$(date '+%Y-%m-%d %H:%M:%S')"
|
|
|
|
# Ensure log directory exists
|
|
mkdir -p "$(dirname "$TEST_LOG")"
|
|
|
|
# Log to file (without colors)
|
|
echo "[$timestamp] [$level] [STEP5B-TEST] $message" >> "$TEST_LOG"
|
|
|
|
# Log to console (with colors)
|
|
echo -e "${color}[$timestamp] [STEP5B-TEST-$level]${NC} $message"
|
|
}
|
|
|
|
test_info() {
|
|
test_log "INFO" "$BLUE" "$1"
|
|
}
|
|
|
|
test_success() {
|
|
test_log "SUCCESS" "$GREEN" "✅ $1"
|
|
}
|
|
|
|
test_warning() {
|
|
test_log "WARNING" "$YELLOW" "⚠️ $1"
|
|
}
|
|
|
|
test_error() {
|
|
test_log "ERROR" "$RED" "❌ $1"
|
|
}
|
|
|
|
test_debug() {
|
|
if [ "${TEST_DEBUG:-false}" = "true" ]; then
|
|
test_log "DEBUG" "$PURPLE" "🔍 $1"
|
|
fi
|
|
}
|
|
|
|
test_progress() {
|
|
test_log "PROGRESS" "$CYAN" "🚀 $1"
|
|
}
|
|
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
# UTILITY FUNCTIONS
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
|
|
# Cross-shell compatible command existence check
|
|
command_exists() {
|
|
command -v "$1" >/dev/null 2>&1
|
|
}
|
|
|
|
# Show test banner
|
|
show_test_banner() {
|
|
echo ""
|
|
echo -e "${BOLD}${CYAN}"
|
|
echo "╔═══════════════════════════════════════════════════════════════════════════════╗"
|
|
echo "║ ║"
|
|
echo "║ 🧪 ThrillWiki Step 5B Final Validation Test 🧪 ║"
|
|
echo "║ ║"
|
|
echo "║ Comprehensive Testing of Final Validation and Health Checks ║"
|
|
echo "║ ║"
|
|
echo "╚═══════════════════════════════════════════════════════════════════════════════╝"
|
|
echo -e "${NC}"
|
|
echo ""
|
|
}
|
|
|
|
# Show usage information
|
|
show_usage() {
|
|
cat << 'EOF'
|
|
🧪 ThrillWiki Step 5B Final Validation Test Script
|
|
|
|
DESCRIPTION:
|
|
Comprehensive testing of Step 5B final validation and health checks
|
|
with cross-shell compatibility validation.
|
|
|
|
USAGE:
|
|
./test-step5b-final-validation.sh [OPTIONS]
|
|
|
|
OPTIONS:
|
|
--test-validation-functions Test individual validation functions
|
|
--test-health-checks Test component health checks
|
|
--test-integration Test integration testing functions
|
|
--test-monitoring Test system monitoring functions
|
|
--test-cross-shell Test cross-shell compatibility
|
|
--test-presets Test deployment preset validation
|
|
--test-reporting Test comprehensive reporting
|
|
--test-all Run all tests (default)
|
|
--create-mock-hosts Create mock host configuration for testing
|
|
--debug Enable debug output
|
|
--quiet Reduce output verbosity
|
|
-h, --help Show this help message
|
|
|
|
EXAMPLES:
|
|
# Run all tests
|
|
./test-step5b-final-validation.sh
|
|
|
|
# Test only validation functions
|
|
./test-step5b-final-validation.sh --test-validation-functions
|
|
|
|
# Test with debug output
|
|
./test-step5b-final-validation.sh --debug --test-all
|
|
|
|
# Test cross-shell compatibility
|
|
./test-step5b-final-validation.sh --test-cross-shell
|
|
|
|
FEATURES:
|
|
✅ Validation function testing
|
|
✅ Component health check testing
|
|
✅ Integration testing validation
|
|
✅ System monitoring testing
|
|
✅ Cross-shell compatibility testing
|
|
✅ Deployment preset validation
|
|
✅ Comprehensive reporting testing
|
|
✅ Mock environment creation
|
|
|
|
EOF
|
|
}
|
|
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
# MOCK ENVIRONMENT SETUP
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
|
|
create_mock_environment() {
|
|
test_progress "Creating mock environment for testing"
|
|
|
|
# Create mock host configuration
|
|
local mock_hosts_file="/tmp/thrillwiki-deploy-hosts.$$"
|
|
echo "test-host-1" > "$mock_hosts_file"
|
|
echo "192.168.1.100" >> "$mock_hosts_file"
|
|
echo "demo.thrillwiki.local" >> "$mock_hosts_file"
|
|
|
|
# Set mock environment variables
|
|
export REMOTE_USER="testuser"
|
|
export REMOTE_PORT="22"
|
|
export SSH_KEY="$HOME/.ssh/id_test"
|
|
export DEPLOYMENT_PRESET="dev"
|
|
export GITHUB_TOKEN="mock_token_for_testing"
|
|
export INTERACTIVE_MODE="false"
|
|
|
|
test_success "Mock environment created successfully"
|
|
return 0
|
|
}
|
|
|
|
cleanup_mock_environment() {
|
|
test_debug "Cleaning up mock environment"
|
|
|
|
# Remove mock host configuration
|
|
if [ -f "/tmp/thrillwiki-deploy-hosts.$$" ]; then
|
|
rm -f "/tmp/thrillwiki-deploy-hosts.$$"
|
|
fi
|
|
|
|
# Unset mock environment variables
|
|
unset REMOTE_USER REMOTE_PORT SSH_KEY DEPLOYMENT_PRESET GITHUB_TOKEN INTERACTIVE_MODE
|
|
|
|
test_success "Mock environment cleaned up"
|
|
}
|
|
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
# STEP 5B VALIDATION TESTS
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
|
|
# Test validation functions exist and are callable
|
|
test_validation_functions() {
|
|
test_progress "Testing validation functions"
|
|
|
|
local validation_success=true
|
|
local required_functions=(
|
|
"validate_final_system"
|
|
"validate_end_to_end_system"
|
|
"validate_component_health"
|
|
"validate_integration_testing"
|
|
"validate_system_monitoring"
|
|
"validate_cross_shell_compatibility"
|
|
"validate_deployment_presets"
|
|
)
|
|
|
|
# Source the deploy-complete script to access functions
|
|
if [ -f "$DEPLOY_COMPLETE_SCRIPT" ]; then
|
|
# Source without executing main
|
|
(
|
|
# Prevent main execution during sourcing
|
|
BASH_SOURCE=("$DEPLOY_COMPLETE_SCRIPT" "sourced")
|
|
source "$DEPLOY_COMPLETE_SCRIPT"
|
|
|
|
# Test each required function
|
|
for func in "${required_functions[@]}"; do
|
|
if declare -f "$func" >/dev/null 2>&1; then
|
|
test_success "Function '$func' exists and is callable"
|
|
else
|
|
test_error "Function '$func' not found or not callable"
|
|
validation_success=false
|
|
fi
|
|
done
|
|
)
|
|
else
|
|
test_error "Deploy complete script not found: $DEPLOY_COMPLETE_SCRIPT"
|
|
validation_success=false
|
|
fi
|
|
|
|
# Test helper functions
|
|
local helper_functions=(
|
|
"test_remote_thrillwiki_installation"
|
|
"test_remote_services"
|
|
"test_django_application"
|
|
"check_host_configuration_health"
|
|
"check_github_authentication_health"
|
|
"generate_validation_report"
|
|
)
|
|
|
|
for func in "${helper_functions[@]}"; do
|
|
if grep -q "^$func()" "$DEPLOY_COMPLETE_SCRIPT" 2>/dev/null; then
|
|
test_success "Helper function '$func' exists in script"
|
|
else
|
|
test_warning "Helper function '$func' not found or malformed"
|
|
fi
|
|
done
|
|
|
|
if [ "$validation_success" = true ]; then
|
|
test_success "All validation functions test passed"
|
|
return 0
|
|
else
|
|
test_error "Validation functions test failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test component health checks
|
|
test_component_health_checks() {
|
|
test_progress "Testing component health checks"
|
|
|
|
local health_check_success=true
|
|
|
|
# Test health check functions exist
|
|
local health_check_functions=(
|
|
"check_host_configuration_health"
|
|
"check_github_authentication_health"
|
|
"check_repository_management_health"
|
|
"check_dependency_installation_health"
|
|
"check_django_deployment_health"
|
|
"check_systemd_services_health"
|
|
)
|
|
|
|
for func in "${health_check_functions[@]}"; do
|
|
if grep -q "^$func()" "$DEPLOY_COMPLETE_SCRIPT" 2>/dev/null; then
|
|
test_success "Health check function '$func' exists"
|
|
else
|
|
test_error "Health check function '$func' not found"
|
|
health_check_success=false
|
|
fi
|
|
done
|
|
|
|
# Test health check logic patterns
|
|
if grep -q "validate_component_health" "$DEPLOY_COMPLETE_SCRIPT"; then
|
|
test_success "Component health validation integration found"
|
|
else
|
|
test_error "Component health validation integration not found"
|
|
health_check_success=false
|
|
fi
|
|
|
|
if [ "$health_check_success" = true ]; then
|
|
test_success "Component health checks test passed"
|
|
return 0
|
|
else
|
|
test_error "Component health checks test failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test integration testing functions
|
|
test_integration_testing() {
|
|
test_progress "Testing integration testing functions"
|
|
|
|
local integration_success=true
|
|
|
|
# Test integration testing functions exist
|
|
local integration_functions=(
|
|
"test_complete_deployment_flow"
|
|
"test_automated_deployment_cycle"
|
|
"test_service_integration"
|
|
"test_error_handling_and_recovery"
|
|
)
|
|
|
|
for func in "${integration_functions[@]}"; do
|
|
if grep -q "^$func()" "$DEPLOY_COMPLETE_SCRIPT" 2>/dev/null; then
|
|
test_success "Integration test function '$func' exists"
|
|
else
|
|
test_error "Integration test function '$func' not found"
|
|
integration_success=false
|
|
fi
|
|
done
|
|
|
|
# Test integration testing logic
|
|
if grep -q "validate_integration_testing" "$DEPLOY_COMPLETE_SCRIPT"; then
|
|
test_success "Integration testing validation found"
|
|
else
|
|
test_error "Integration testing validation not found"
|
|
integration_success=false
|
|
fi
|
|
|
|
if [ "$integration_success" = true ]; then
|
|
test_success "Integration testing functions test passed"
|
|
return 0
|
|
else
|
|
test_error "Integration testing functions test failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test system monitoring functions
|
|
test_system_monitoring() {
|
|
test_progress "Testing system monitoring functions"
|
|
|
|
local monitoring_success=true
|
|
|
|
# Test monitoring functions exist
|
|
local monitoring_functions=(
|
|
"test_system_status_monitoring"
|
|
"test_performance_metrics"
|
|
"test_log_analysis"
|
|
"test_network_connectivity_monitoring"
|
|
)
|
|
|
|
for func in "${monitoring_functions[@]}"; do
|
|
if grep -q "^$func()" "$DEPLOY_COMPLETE_SCRIPT" 2>/dev/null; then
|
|
test_success "Monitoring function '$func' exists"
|
|
else
|
|
test_error "Monitoring function '$func' not found"
|
|
monitoring_success=false
|
|
fi
|
|
done
|
|
|
|
# Test monitoring integration
|
|
if grep -q "validate_system_monitoring" "$DEPLOY_COMPLETE_SCRIPT"; then
|
|
test_success "System monitoring validation found"
|
|
else
|
|
test_error "System monitoring validation not found"
|
|
monitoring_success=false
|
|
fi
|
|
|
|
if [ "$monitoring_success" = true ]; then
|
|
test_success "System monitoring functions test passed"
|
|
return 0
|
|
else
|
|
test_error "System monitoring functions test failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test cross-shell compatibility
|
|
test_cross_shell_compatibility() {
|
|
test_progress "Testing cross-shell compatibility"
|
|
|
|
local shell_success=true
|
|
|
|
# Test cross-shell compatibility functions exist
|
|
local shell_functions=(
|
|
"test_bash_compatibility"
|
|
"test_zsh_compatibility"
|
|
"test_posix_compliance"
|
|
)
|
|
|
|
for func in "${shell_functions[@]}"; do
|
|
if grep -q "^$func()" "$DEPLOY_COMPLETE_SCRIPT" 2>/dev/null; then
|
|
test_success "Shell compatibility function '$func' exists"
|
|
else
|
|
test_error "Shell compatibility function '$func' not found"
|
|
shell_success=false
|
|
fi
|
|
done
|
|
|
|
# Test cross-shell script detection logic
|
|
if grep -q "BASH_SOURCE\|ZSH_NAME" "$DEPLOY_COMPLETE_SCRIPT"; then
|
|
test_success "Cross-shell detection logic found"
|
|
else
|
|
test_error "Cross-shell detection logic not found"
|
|
shell_success=false
|
|
fi
|
|
|
|
# Test POSIX compliance patterns
|
|
if grep -q "set -e" "$DEPLOY_COMPLETE_SCRIPT" && ! grep -q "[[" "$DEPLOY_COMPLETE_SCRIPT" | head -1; then
|
|
test_success "POSIX compliance patterns found"
|
|
else
|
|
test_warning "POSIX compliance could be improved"
|
|
fi
|
|
|
|
if [ "$shell_success" = true ]; then
|
|
test_success "Cross-shell compatibility test passed"
|
|
return 0
|
|
else
|
|
test_error "Cross-shell compatibility test failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test deployment preset validation
|
|
test_deployment_presets() {
|
|
test_progress "Testing deployment preset validation"
|
|
|
|
local preset_success=true
|
|
|
|
# Test preset validation functions exist
|
|
if grep -q "test_deployment_preset" "$DEPLOY_COMPLETE_SCRIPT"; then
|
|
test_success "Deployment preset test function exists"
|
|
else
|
|
test_error "Deployment preset test function not found"
|
|
preset_success=false
|
|
fi
|
|
|
|
# Test preset configuration functions
|
|
if grep -q "validate_preset\|get_preset_config" "$DEPLOY_COMPLETE_SCRIPT"; then
|
|
test_success "Preset configuration functions found"
|
|
else
|
|
test_error "Preset configuration functions not found"
|
|
preset_success=false
|
|
fi
|
|
|
|
# Test all required presets are supported
|
|
local required_presets="dev prod demo testing"
|
|
for preset in $required_presets; do
|
|
if grep -q "\"$preset\"" "$DEPLOY_COMPLETE_SCRIPT"; then
|
|
test_success "Preset '$preset' configuration found"
|
|
else
|
|
test_error "Preset '$preset' configuration not found"
|
|
preset_success=false
|
|
fi
|
|
done
|
|
|
|
if [ "$preset_success" = true ]; then
|
|
test_success "Deployment preset validation test passed"
|
|
return 0
|
|
else
|
|
test_error "Deployment preset validation test failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test comprehensive reporting
|
|
test_comprehensive_reporting() {
|
|
test_progress "Testing comprehensive reporting"
|
|
|
|
local reporting_success=true
|
|
|
|
# Test reporting functions exist
|
|
if grep -q "generate_validation_report" "$DEPLOY_COMPLETE_SCRIPT"; then
|
|
test_success "Validation report generation function exists"
|
|
else
|
|
test_error "Validation report generation function not found"
|
|
reporting_success=false
|
|
fi
|
|
|
|
# Test report content patterns
|
|
local report_patterns=(
|
|
"validation_results"
|
|
"total_tests"
|
|
"passed_tests"
|
|
"failed_tests"
|
|
"warning_tests"
|
|
"overall_status"
|
|
)
|
|
|
|
for pattern in "${report_patterns[@]}"; do
|
|
if grep -q "$pattern" "$DEPLOY_COMPLETE_SCRIPT"; then
|
|
test_success "Report pattern '$pattern' found"
|
|
else
|
|
test_error "Report pattern '$pattern' not found"
|
|
reporting_success=false
|
|
fi
|
|
done
|
|
|
|
# Test report file generation
|
|
if grep -q "final-validation-report.txt" "$DEPLOY_COMPLETE_SCRIPT"; then
|
|
test_success "Report file generation pattern found"
|
|
else
|
|
test_error "Report file generation pattern not found"
|
|
reporting_success=false
|
|
fi
|
|
|
|
if [ "$reporting_success" = true ]; then
|
|
test_success "Comprehensive reporting test passed"
|
|
return 0
|
|
else
|
|
test_error "Comprehensive reporting test failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test Step 5B integration in main deployment flow
|
|
test_step5b_integration() {
|
|
test_progress "Testing Step 5B integration in main deployment flow"
|
|
|
|
local integration_success=true
|
|
|
|
# Test Step 5B is called in main function
|
|
if grep -q "validate_final_system" "$DEPLOY_COMPLETE_SCRIPT" && grep -A5 -B5 "validate_final_system" "$DEPLOY_COMPLETE_SCRIPT" | grep -q "Step 5B"; then
|
|
test_success "Step 5B integration found in main deployment flow"
|
|
else
|
|
test_error "Step 5B integration not found in main deployment flow"
|
|
integration_success=false
|
|
fi
|
|
|
|
# Test proper error handling for validation failures
|
|
if grep -A10 "validate_final_system" "$DEPLOY_COMPLETE_SCRIPT" | grep -q "FORCE_DEPLOY"; then
|
|
test_success "Validation failure handling with force deploy option found"
|
|
else
|
|
test_warning "Validation failure handling could be improved"
|
|
fi
|
|
|
|
# Test validation is called at the right time (after deployment)
|
|
if grep -B20 "validate_final_system" "$DEPLOY_COMPLETE_SCRIPT" | grep -q "setup_smart_automated_deployment"; then
|
|
test_success "Step 5B is properly positioned after deployment steps"
|
|
else
|
|
test_warning "Step 5B positioning in deployment flow could be improved"
|
|
fi
|
|
|
|
if [ "$integration_success" = true ]; then
|
|
test_success "Step 5B integration test passed"
|
|
return 0
|
|
else
|
|
test_error "Step 5B integration test failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
# MAIN TEST EXECUTION
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
|
|
# Run all Step 5B tests
|
|
run_all_tests() {
|
|
test_progress "Running comprehensive Step 5B final validation tests"
|
|
|
|
local start_time
|
|
start_time=$(date +%s)
|
|
|
|
local total_tests=0
|
|
local passed_tests=0
|
|
local failed_tests=0
|
|
local test_results=""
|
|
|
|
# Create mock environment for testing
|
|
create_mock_environment
|
|
|
|
# Test validation functions
|
|
total_tests=$((total_tests + 1))
|
|
if test_validation_functions; then
|
|
test_results="${test_results}✅ Validation functions test: PASS\n"
|
|
passed_tests=$((passed_tests + 1))
|
|
else
|
|
test_results="${test_results}❌ Validation functions test: FAIL\n"
|
|
failed_tests=$((failed_tests + 1))
|
|
fi
|
|
|
|
# Test component health checks
|
|
total_tests=$((total_tests + 1))
|
|
if test_component_health_checks; then
|
|
test_results="${test_results}✅ Component health checks test: PASS\n"
|
|
passed_tests=$((passed_tests + 1))
|
|
else
|
|
test_results="${test_results}❌ Component health checks test: FAIL\n"
|
|
failed_tests=$((failed_tests + 1))
|
|
fi
|
|
|
|
# Test integration testing
|
|
total_tests=$((total_tests + 1))
|
|
if test_integration_testing; then
|
|
test_results="${test_results}✅ Integration testing test: PASS\n"
|
|
passed_tests=$((passed_tests + 1))
|
|
else
|
|
test_results="${test_results}❌ Integration testing test: FAIL\n"
|
|
failed_tests=$((failed_tests + 1))
|
|
fi
|
|
|
|
# Test system monitoring
|
|
total_tests=$((total_tests + 1))
|
|
if test_system_monitoring; then
|
|
test_results="${test_results}✅ System monitoring test: PASS\n"
|
|
passed_tests=$((passed_tests + 1))
|
|
else
|
|
test_results="${test_results}❌ System monitoring test: FAIL\n"
|
|
failed_tests=$((failed_tests + 1))
|
|
fi
|
|
|
|
# Test cross-shell compatibility
|
|
total_tests=$((total_tests + 1))
|
|
if test_cross_shell_compatibility; then
|
|
test_results="${test_results}✅ Cross-shell compatibility test: PASS\n"
|
|
passed_tests=$((passed_tests + 1))
|
|
else
|
|
test_results="${test_results}❌ Cross-shell compatibility test: FAIL\n"
|
|
failed_tests=$((failed_tests + 1))
|
|
fi
|
|
|
|
# Test deployment presets
|
|
total_tests=$((total_tests + 1))
|
|
if test_deployment_presets; then
|
|
test_results="${test_results}✅ Deployment presets test: PASS\n"
|
|
passed_tests=$((passed_tests + 1))
|
|
else
|
|
test_results="${test_results}❌ Deployment presets test: FAIL\n"
|
|
failed_tests=$((failed_tests + 1))
|
|
fi
|
|
|
|
# Test comprehensive reporting
|
|
total_tests=$((total_tests + 1))
|
|
if test_comprehensive_reporting; then
|
|
test_results="${test_results}✅ Comprehensive reporting test: PASS\n"
|
|
passed_tests=$((passed_tests + 1))
|
|
else
|
|
test_results="${test_results}❌ Comprehensive reporting test: FAIL\n"
|
|
failed_tests=$((failed_tests + 1))
|
|
fi
|
|
|
|
# Test Step 5B integration
|
|
total_tests=$((total_tests + 1))
|
|
if test_step5b_integration; then
|
|
test_results="${test_results}✅ Step 5B integration test: PASS\n"
|
|
passed_tests=$((passed_tests + 1))
|
|
else
|
|
test_results="${test_results}❌ Step 5B integration test: FAIL\n"
|
|
failed_tests=$((failed_tests + 1))
|
|
fi
|
|
|
|
# Calculate test duration
|
|
local end_time
|
|
end_time=$(date +%s)
|
|
local test_duration=$((end_time - start_time))
|
|
|
|
# Generate test report
|
|
generate_test_report "$test_results" "$total_tests" "$passed_tests" "$failed_tests" "$test_duration"
|
|
|
|
# Cleanup mock environment
|
|
cleanup_mock_environment
|
|
|
|
# Determine overall test result
|
|
if [ "$failed_tests" -eq 0 ]; then
|
|
test_success "All Step 5B tests passed! ($passed_tests/$total_tests)"
|
|
return 0
|
|
else
|
|
test_error "Step 5B tests failed: $failed_tests/$total_tests tests failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Generate test report
|
|
generate_test_report() {
|
|
local test_results="$1"
|
|
local total_tests="$2"
|
|
local passed_tests="$3"
|
|
local failed_tests="$4"
|
|
local test_duration="$5"
|
|
|
|
mkdir -p "$(dirname "$TEST_RESULTS_FILE")"
|
|
|
|
{
|
|
echo "ThrillWiki Step 5B Final Validation Test Report"
|
|
echo "[AWS-SECRET-REMOVED]======"
|
|
echo ""
|
|
echo "Generated: $(date '+%Y-%m-%d %H:%M:%S')"
|
|
echo "Test Duration: ${test_duration} seconds"
|
|
echo "Shell: $0"
|
|
echo ""
|
|
echo "Test Results Summary:"
|
|
echo "===================="
|
|
echo "Total tests: $total_tests"
|
|
echo "Passed: $passed_tests"
|
|
echo "Failed: $failed_tests"
|
|
echo "Success rate: $(( (passed_tests * 100) / total_tests ))%"
|
|
echo ""
|
|
echo "Detailed Results:"
|
|
echo "================"
|
|
echo -e "$test_results"
|
|
echo ""
|
|
echo "Environment Information:"
|
|
echo "======================="
|
|
echo "Operating System: $(uname -s)"
|
|
echo "Architecture: $(uname -m)"
|
|
echo "Shell: ${SHELL:-unknown}"
|
|
echo "User: $(whoami)"
|
|
echo "Working Directory: $(pwd)"
|
|
echo "Project Directory: $PROJECT_DIR"
|
|
echo ""
|
|
} > "$TEST_RESULTS_FILE"
|
|
|
|
test_success "Test report saved to: $TEST_RESULTS_FILE"
|
|
}
|
|
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
# ARGUMENT PARSING AND MAIN EXECUTION
|
|
# [AWS-SECRET-REMOVED]====================================
|
|
|
|
# Parse command line arguments
|
|
parse_arguments() {
|
|
local test_validation_functions=false
|
|
local test_health_checks=false
|
|
local test_integration=false
|
|
local test_monitoring=false
|
|
local test_cross_shell=false
|
|
local test_presets=false
|
|
local test_reporting=false
|
|
local test_all=true
|
|
local create_mock_hosts=false
|
|
local quiet=false
|
|
|
|
while [[ $# -gt 0 ]]; do
|
|
case $1 in
|
|
--test-validation-functions)
|
|
test_validation_functions=true
|
|
test_all=false
|
|
shift
|
|
;;
|
|
--test-health-checks)
|
|
test_health_checks=true
|
|
test_all=false
|
|
shift
|
|
;;
|
|
--test-integration)
|
|
test_integration=true
|
|
test_all=false
|
|
shift
|
|
;;
|
|
--test-monitoring)
|
|
test_monitoring=true
|
|
test_all=false
|
|
shift
|
|
;;
|
|
--test-cross-shell)
|
|
test_cross_shell=true
|
|
test_all=false
|
|
shift
|
|
;;
|
|
--test-presets)
|
|
test_presets=true
|
|
test_all=false
|
|
shift
|
|
;;
|
|
--test-reporting)
|
|
test_reporting=true
|
|
test_all=false
|
|
shift
|
|
;;
|
|
--test-all)
|
|
test_all=true
|
|
shift
|
|
;;
|
|
--create-mock-hosts)
|
|
create_mock_hosts=true
|
|
shift
|
|
;;
|
|
--debug)
|
|
export TEST_DEBUG=true
|
|
shift
|
|
;;
|
|
--quiet)
|
|
quiet=true
|
|
shift
|
|
;;
|
|
-h|--help)
|
|
show_usage
|
|
exit 0
|
|
;;
|
|
*)
|
|
test_error "Unknown option: $1"
|
|
echo "Use --help for usage information"
|
|
exit 1
|
|
;;
|
|
esac
|
|
done
|
|
|
|
# Execute requested tests
|
|
if [ "$test_all" = true ]; then
|
|
run_all_tests
|
|
else
|
|
# Run individual tests as requested
|
|
if [ "$create_mock_hosts" = true ]; then
|
|
create_mock_environment
|
|
fi
|
|
|
|
local any_test_run=false
|
|
|
|
if [ "$test_validation_functions" = true ]; then
|
|
test_validation_functions
|
|
any_test_run=true
|
|
fi
|
|
|
|
if [ "$test_health_checks" = true ]; then
|
|
test_component_health_checks
|
|
any_test_run=true
|
|
fi
|
|
|
|
if [ "$test_integration" = true ]; then
|
|
test_integration_testing
|
|
any_test_run=true
|
|
fi
|
|
|
|
if [ "$test_monitoring" = true ]; then
|
|
test_system_monitoring
|
|
any_test_run=true
|
|
fi
|
|
|
|
if [ "$test_cross_shell" = true ]; then
|
|
test_cross_shell_compatibility
|
|
any_test_run=true
|
|
fi
|
|
|
|
if [ "$test_presets" = true ]; then
|
|
test_deployment_presets
|
|
any_test_run=true
|
|
fi
|
|
|
|
if [ "$test_reporting" = true ]; then
|
|
test_comprehensive_reporting
|
|
any_test_run=true
|
|
fi
|
|
|
|
if [ "$any_test_run" = false ]; then
|
|
test_warning "No specific tests requested, running all tests"
|
|
run_all_tests
|
|
fi
|
|
|
|
if [ "$create_mock_hosts" = true ]; then
|
|
cleanup_mock_environment
|
|
fi
|
|
fi
|
|
}
|
|
|
|
# Main function
|
|
main() {
|
|
if [ "${1:-}" != "--quiet" ]; then
|
|
show_test_banner
|
|
fi
|
|
|
|
test_info "Starting ThrillWiki Step 5B Final Validation Test"
|
|
test_info "Project Directory: $PROJECT_DIR"
|
|
test_info "Deploy Complete Script: $DEPLOY_COMPLETE_SCRIPT"
|
|
|
|
# Validate prerequisites
|
|
if [ ! -f "$DEPLOY_COMPLETE_SCRIPT" ]; then
|
|
test_error "Deploy complete script not found: $DEPLOY_COMPLETE_SCRIPT"
|
|
exit 1
|
|
fi
|
|
|
|
# Parse arguments and run tests
|
|
parse_arguments "$@"
|
|
}
|
|
|
|
# Cross-shell compatible script execution check
|
|
if [ -n "${BASH_SOURCE:-}" ]; then
|
|
# In bash, check if script is executed directly
|
|
if [ "${BASH_SOURCE[0]}" = "${0}" ]; then
|
|
main "$@"
|
|
fi
|
|
elif [ -n "${ZSH_NAME:-}" ]; then
|
|
# In zsh, check if script is executed directly
|
|
if [ "${(%):-%x}" = "${0}" ]; then
|
|
main "$@"
|
|
fi
|
|
else
|
|
# In other shells, assume direct execution
|
|
main "$@"
|
|
fi |