Files
maternal-app/maternal-app/maternal-app-backend/TESTING.md
2025-10-01 19:01:52 +00:00

13 KiB

Backend Testing Guide

Comprehensive testing documentation for the Maternal App Backend (NestJS).

Table of Contents

Overview

The backend testing suite includes:

  • Unit Tests: Testing individual services, controllers, and utilities
  • Integration Tests: Testing database interactions and module integration
  • E2E Tests: Testing complete API workflows with real HTTP requests
  • Performance Tests: Load testing with Artillery

Testing Stack

  • Jest: Testing framework
  • Supertest: HTTP assertions for E2E tests
  • NestJS Testing Module: Dependency injection for unit tests
  • Artillery: Performance and load testing
  • PostgreSQL/Redis/MongoDB: Test database services

Test Structure

maternal-app-backend/
├── src/
│   ├── modules/
│   │   ├── auth/
│   │   │   ├── auth.service.spec.ts       # Unit tests
│   │   │   ├── auth.controller.spec.ts
│   │   │   └── ...
│   │   ├── tracking/
│   │   │   ├── tracking.service.spec.ts
│   │   │   └── ...
│   │   └── ...
│   └── ...
├── test/
│   ├── app.e2e-spec.ts                    # E2E tests
│   ├── auth.e2e-spec.ts
│   ├── tracking.e2e-spec.ts
│   ├── children.e2e-spec.ts
│   └── jest-e2e.json                       # E2E Jest config
├── artillery.yml                           # Performance test scenarios
└── TESTING.md                              # This file

Running Tests

Unit Tests

# Run all unit tests
npm test

# Run tests in watch mode (for development)
npm run test:watch

# Run tests with coverage report
npm run test:cov

# Run tests in debug mode
npm run test:debug

Integration/E2E Tests

# Run all E2E tests
npm run test:e2e

# Requires PostgreSQL, Redis, and MongoDB to be running
# Use Docker Compose for test dependencies:
docker-compose -f docker-compose.test.yml up -d

Performance Tests

# Install Artillery globally
npm install -g artillery@latest

# Start the application
npm run start:prod

# Run performance tests
artillery run artillery.yml

# Generate detailed report
artillery run artillery.yml --output report.json
artillery report report.json

Quick Test Commands

# Run specific test file
npm test -- auth.service.spec.ts

# Run tests matching pattern
npm test -- --testNamePattern="should create user"

# Update snapshots
npm test -- -u

# Run with verbose output
npm test -- --verbose

Writing Tests

Unit Test Example

import { Test, TestingModule } from '@nestjs/testing';
import { getRepositoryToken } from '@nestjs/typeorm';
import { Repository } from 'typeorm';
import { MyService } from './my.service';
import { MyEntity } from './entities/my.entity';

describe('MyService', () => {
  let service: MyService;
  let repository: Repository<MyEntity>;

  const mockRepository = {
    find: jest.fn(),
    findOne: jest.fn(),
    save: jest.fn(),
    create: jest.fn(),
    delete: jest.fn(),
  };

  beforeEach(async () => {
    const module: TestingModule = await Test.createTestingModule({
      providers: [
        MyService,
        {
          provide: getRepositoryToken(MyEntity),
          useValue: mockRepository,
        },
      ],
    }).compile();

    service = module.get<MyService>(MyService);
    repository = module.get<Repository<MyEntity>>(
      getRepositoryToken(MyEntity),
    );
  });

  afterEach(() => {
    jest.clearAllMocks();
  });

  describe('findAll', () => {
    it('should return an array of entities', async () => {
      const expected = [{ id: '1', name: 'Test' }];
      jest.spyOn(repository, 'find').mockResolvedValue(expected as any);

      const result = await service.findAll();

      expect(result).toEqual(expected);
      expect(repository.find).toHaveBeenCalled();
    });
  });

  describe('create', () => {
    it('should create and return a new entity', async () => {
      const dto = { name: 'New Entity' };
      const created = { id: '1', ...dto };

      jest.spyOn(repository, 'create').mockReturnValue(created as any);
      jest.spyOn(repository, 'save').mockResolvedValue(created as any);

      const result = await service.create(dto);

      expect(result).toEqual(created);
      expect(repository.create).toHaveBeenCalledWith(dto);
      expect(repository.save).toHaveBeenCalledWith(created);
    });
  });

  describe('error handling', () => {
    it('should throw NotFoundException when entity not found', async () => {
      jest.spyOn(repository, 'findOne').mockResolvedValue(null);

      await expect(service.findOne('invalid-id')).rejects.toThrow(
        NotFoundException,
      );
    });
  });
});

E2E Test Example

import { Test, TestingModule } from '@nestjs/testing';
import { INestApplication, ValidationPipe } from '@nestjs/common';
import { DataSource } from 'typeorm';
import * as request from 'supertest';
import { AppModule } from '../src/app.module';

describe('MyController (e2e)', () => {
  let app: INestApplication;
  let dataSource: DataSource;
  let accessToken: string;

  beforeAll(async () => {
    const moduleFixture: TestingModule = await Test.createTestingModule({
      imports: [AppModule],
    }).compile();

    app = moduleFixture.createNestApplication();

    // Apply same configuration as main.ts
    app.useGlobalPipes(
      new ValidationPipe({
        whitelist: true,
        forbidNonWhitelisted: true,
        transform: true,
      }),
    );

    await app.init();
    dataSource = app.get(DataSource);

    // Setup: Create test user and get token
    const response = await request(app.getHttpServer())
      .post('/api/v1/auth/register')
      .send({
        email: 'test@example.com',
        password: 'TestPassword123!',
        name: 'Test User',
      });

    accessToken = response.body.data.tokens.accessToken;
  });

  afterAll(async () => {
    // Cleanup: Delete test data
    await dataSource.query('DELETE FROM users WHERE email = $1', [
      'test@example.com',
    ]);
    await app.close();
  });

  describe('POST /api/v1/resource', () => {
    it('should create a resource', () => {
      return request(app.getHttpServer())
        .post('/api/v1/resource')
        .set('Authorization', `Bearer ${accessToken}`)
        .send({ name: 'Test Resource' })
        .expect(201)
        .expect((res) => {
          expect(res.body.data).toHaveProperty('id');
          expect(res.body.data.name).toBe('Test Resource');
        });
    });

    it('should return 401 without authentication', () => {
      return request(app.getHttpServer())
        .post('/api/v1/resource')
        .send({ name: 'Test Resource' })
        .expect(401);
    });

    it('should validate request body', () => {
      return request(app.getHttpServer())
        .post('/api/v1/resource')
        .set('Authorization', `Bearer ${accessToken}`)
        .send({ invalid: 'field' })
        .expect(400);
    });
  });
});

Coverage Goals

Target Coverage

Following the testing strategy document:

  • Overall: 80% line coverage
  • Critical modules (auth, tracking, families): 90%+ coverage
  • Services: 85%+ coverage
  • Controllers: 70%+ coverage

Current Coverage (as of Phase 6)

Overall Coverage: 27.93%

By Module:
- AI Service: 97% ✅
- Auth Service: 86% ✅
- Tracking Service: 88% ✅
- Children Service: 91% ✅
- Families Service: 59% ⚠️
- Analytics Services: 0% ❌
- Voice Service: 0% ❌
- Controllers: 0% ❌

Checking Coverage

# Generate HTML coverage report
npm run test:cov

# View report in browser
open coverage/lcov-report/index.html

# Check specific file coverage
npm run test:cov -- --collectCoverageFrom="src/modules/tracking/**/*.ts"

Performance Testing

Artillery Test Scenarios

The artillery.yml file defines 5 realistic scenarios:

  1. User Registration and Login (10% of traffic)
  2. Track Baby Activities (50% - most common operation)
  3. View Analytics Dashboard (20% - read-heavy)
  4. AI Chat Interaction (15%)
  5. Family Collaboration (5%)

Load Testing Phases

  1. Warm-up: 5 users/sec for 60s
  2. Ramp-up: 5→50 users/sec over 120s
  3. Sustained: 50 users/sec for 300s
  4. Spike: 100 users/sec for 60s

Performance Thresholds

  • Error Rate: < 1%
  • P95 Response Time: < 2 seconds
  • P99 Response Time: < 3 seconds

Running Performance Tests

# Quick smoke test
artillery quick --count 10 --num 100 http://localhost:3000/api/v1/health

# Full test suite
artillery run artillery.yml

# With custom variables
artillery run artillery.yml --variables '{"testEmail": "custom@test.com"}'

# Generate and view report
artillery run artillery.yml -o report.json
artillery report report.json -o report.html
open report.html

CI/CD Integration

Tests run automatically on every push and pull request via GitHub Actions.

Workflow: .github/workflows/backend-ci.yml

Jobs:

  1. lint-and-test: ESLint + Jest unit tests with coverage
  2. e2e-tests: Full E2E test suite with database services
  3. build: NestJS production build
  4. performance-test: Artillery load testing (PRs only)

Services:

  • PostgreSQL 15
  • Redis 7
  • MongoDB 7

Local CI Simulation

# Run the same checks as CI
npm run lint
npm run test:cov
npm run test:e2e
npm run build

Best Practices

General Guidelines

  1. Test Behavior, Not Implementation

    • Focus on what the code does, not how it does it
    • Avoid testing private methods directly
  2. Use Descriptive Test Names

    // ✅ Good
    it('should throw ForbiddenException when user lacks invite permissions', () => {})
    
    // ❌ Bad
    it('test invite', () => {})
    
  3. Follow AAA Pattern

    • Arrange: Set up test data and mocks
    • Act: Execute the code under test
    • Assert: Verify the results
  4. One Assertion Per Test (when possible)

    • Makes failures easier to diagnose
    • Each test has a clear purpose
  5. Isolate Tests

    • Tests should not depend on each other
    • Use beforeEach/afterEach for setup/cleanup

Mocking Guidelines

// ✅ Mock external dependencies
jest.spyOn(repository, 'findOne').mockResolvedValue(mockData);

// ✅ Mock HTTP calls
jest.spyOn(httpService, 'post').mockImplementation(() => of(mockResponse));

// ✅ Mock date/time for consistency
jest.useFakeTimers().setSystemTime(new Date('2024-01-01'));

// ❌ Don't mock what you're testing
// If testing AuthService, don't mock AuthService methods

E2E Test Best Practices

  1. Database Cleanup: Always clean up test data in afterAll
  2. Real Configuration: Use environment similar to production
  3. Meaningful Assertions: Check response structure and content
  4. Error Cases: Test both success and failure scenarios

Performance Test Best Practices

  1. Realistic Data: Use production-like data volumes
  2. Gradual Ramp-up: Don't spike from 0→1000 instantly
  3. Monitor Resources: Track CPU, memory, database connections
  4. Test Edge Cases: Include long-running operations, large payloads

Troubleshooting

Common Issues

Tests timing out:

// Increase timeout for specific test
it('slow operation', async () => {}, 10000); // 10 seconds

// Or globally in jest.config.js
testTimeout: 10000

Database connection errors in E2E tests:

# Ensure test database is running
docker-compose -f docker-compose.test.yml up -d postgres

# Check connection
psql -h localhost -U testuser -d maternal_test

Module not found errors:

// Check jest.config.js moduleNameMapper
{
  "moduleNameMapper": {
    "^src/(.*)$": "<rootDir>/src/$1"
  }
}

Flaky tests:

  • Add explicit waits instead of fixed timeouts
  • Use waitFor utilities for async operations
  • Check for race conditions in parallel tests

Resources

Coverage Reports

Coverage reports are uploaded to Codecov on every CI run:

  • Frontend: codecov.io/gh/your-org/maternal-app/flags/frontend
  • Backend: codecov.io/gh/your-org/maternal-app/flags/backend

Continuous Improvement

  • Weekly: Review coverage reports and identify gaps
  • Monthly: Analyze performance test trends
  • Per Sprint: Add tests for new features before merging
  • Quarterly: Update test data and scenarios to match production usage