# Testing Documentation ## Testing Architecture ### Test Organization ``` tests/ ├── unit/ │ ├── test_models.py │ ├── test_views.py │ └── test_forms.py ├── integration/ │ ├── test_workflows.py │ └── test_apis.py └── e2e/ └── test_user_journeys.py ``` ### Test Configuration ```python # pytest configuration pytest_plugins = [ "tests.fixtures.parks", "tests.fixtures.users", "tests.fixtures.media" ] # Test settings TEST_RUNNER = 'django.test.runner.DiscoverRunner' TEST_MODE = True ``` ## Test Types ### Unit Tests #### Model Tests ```python class ParkModelTest(TestCase): def setUp(self): self.park = Park.objects.create( name="Test Park", status="OPERATING" ) def test_slug_generation(self): self.assertEqual(self.park.slug, "test-park") def test_status_validation(self): with self.assertRaises(ValidationError): Park.objects.create( name="Invalid Park", status="INVALID" ) ``` #### View Tests ```python class ParkViewTest(TestCase): def setUp(self): self.client = Client() self.user = User.objects.create_user( username="testuser", [PASSWORD-REMOVED]" ) def test_park_list_view(self): response = self.client.get(reverse('parks:list')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'parks/park_list.html') ``` #### Form Tests ```python class RideFormTest(TestCase): def test_valid_form(self): form = RideForm({ 'name': 'Test Ride', 'status': 'OPERATING', 'height_requirement': 48 }) self.assertTrue(form.is_valid()) ``` ### Integration Tests #### Workflow Tests ```python class ReviewWorkflowTest(TestCase): def test_review_moderation_flow(self): # Create review review = self.create_review() # Submit for moderation response = self.client.post( reverse('reviews:submit_moderation', kwargs={'pk': review.pk}) ) self.assertEqual(review.refresh_from_db().status, 'PENDING') # Approve review moderator = self.create_moderator() self.client.force_login(moderator) response = self.client.post( reverse('reviews:approve', kwargs={'pk': review.pk}) ) self.assertEqual(review.refresh_from_db().status, 'APPROVED') ``` #### API Tests ```python class ParkAPITest(APITestCase): def test_park_list_api(self): url = reverse('api:park-list') response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_park_create_api(self): url = reverse('api:park-create') data = { 'name': 'New Park', 'status': 'OPERATING' } response = self.client.post(url, data, format='json') self.assertEqual(response.status_code, 201) ``` ### End-to-End Tests #### User Journey Tests ```python class UserJourneyTest(LiveServerTestCase): def test_park_review_journey(self): # User logs in self.login_user() # Navigate to park self.browser.get(f'{self.live_server_url}/parks/test-park/') # Create review self.browser.find_element_by_id('write-review').click() self.browser.find_element_by_id('review-text').send_keys('Great park!') self.browser.find_element_by_id('submit').click() # Verify review appears review_element = self.browser.find_element_by_class_name('review-item') self.assertIn('Great park!', review_element.text) ``` ## CI/CD Pipeline ### GitHub Actions Configuration ```yaml name: ThrillWiki CI on: push: branches: [ main, develop ] pull_request: branches: [ main, develop ] jobs: test: runs-on: ubuntu-latest services: postgres: image: postgres:13 env: POSTGRES_PASSWORD: postgres options: >- --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 ports: - 5432:5432 steps: - uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v2 with: python-version: '3.11' - name: Install Dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt - name: Run Tests env: DATABASE_URL: postgres://postgres:postgres@localhost:5432/thrillwiki_test run: | pytest --cov=./ --cov-report=xml - name: Upload Coverage uses: codecov/codecov-action@v1 ``` ## Quality Metrics ### Code Coverage ```python # Coverage configuration [coverage:run] source = . omit = */migrations/* */tests/* manage.py [coverage:report] exclude_lines = pragma: no cover def __str__ raise NotImplementedError ``` ### Code Quality Tools ```python # flake8 configuration [flake8] max-line-length = 88 extend-ignore = E203 exclude = .git,__pycache__,build,dist # black configuration [tool.black] line-length = 88 target-version = ['py311'] include = '\.pyi?$' ``` ## Test Data Management ### Fixtures ```python # fixtures/parks.json [ { "model": "parks.park", "pk": 1, "fields": { "name": "Test Park", "slug": "test-park", "status": "OPERATING" } } ] ``` ### Factory Classes ```python from factory.django import DjangoModelFactory class ParkFactory(DjangoModelFactory): class Meta: model = Park name = factory.Sequence(lambda n: f'Test Park {n}') status = 'OPERATING' ``` ## Performance Testing ### Load Testing ```python from locust import HttpUser, task, between class ParkUser(HttpUser): wait_time = between(1, 3) @task def view_park_list(self): self.client.get("/parks/") @task def view_park_detail(self): self.client.get("/parks/test-park/") ``` ### Benchmark Tests ```python class ParkBenchmarkTest(TestCase): def test_park_list_performance(self): start_time = time.time() Park.objects.all().select_related('owner') end_time = time.time() self.assertLess(end_time - start_time, 0.1) ``` ## Test Automation ### Test Runner Configuration ```python # Custom test runner class CustomTestRunner(DiscoverRunner): def setup_databases(self, **kwargs): # Custom database setup return super().setup_databases(**kwargs) def teardown_databases(self, old_config, **kwargs): # Custom cleanup return super().teardown_databases(old_config, **kwargs) ``` ### Automated Test Execution ```bash # Test execution script #!/bin/bash # Run unit tests pytest tests/unit/ # Run integration tests pytest tests/integration/ # Run e2e tests pytest tests/e2e/ # Generate coverage report coverage run -m pytest coverage report coverage html ``` ## Monitoring and Reporting ### Test Reports ```python # pytest-html configuration pytest_html_report_title = "ThrillWiki Test Report" def pytest_html_report_data(report): report.description = "Test Results for ThrillWiki" ``` ### Coverage Reports ```python # Coverage reporting configuration COVERAGE_REPORT_OPTIONS = { 'report_type': 'html', 'directory': 'coverage_html', 'title': 'ThrillWiki Coverage Report', 'show_contexts': True }