Add pytest tests to auth

This commit is contained in:
Urtzi Alfaro
2025-07-20 13:48:26 +02:00
parent 608585c72c
commit 351f673318
6 changed files with 2698 additions and 191 deletions

View File

@@ -1,29 +1,31 @@
# ================================================================
# services/auth/tests/conftest.py
# Pytest configuration and shared fixtures for auth service tests
# ================================================================
"""Test configuration for auth service"""
"""
Shared test configuration and fixtures for authentication service tests
"""
import pytest
import asyncio
import os
import sys
from typing import AsyncGenerator
from unittest.mock import AsyncMock, MagicMock, patch
from fastapi.testclient import TestClient
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
from fastapi.testclient import TestClient
import redis.asyncio as redis
from app.main import app
from app.core.database import get_db
from shared.database.base import Base
# Add the app directory to the Python path for imports
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
# Test database URL
TEST_DATABASE_URL = "postgresql+asyncpg://test_user:test_pass@localhost:5433/test_auth_db"
# ================================================================
# TEST DATABASE CONFIGURATION
# ================================================================
# Create test engine
test_engine = create_async_engine(TEST_DATABASE_URL, echo=False)
# Create test session
TestSessionLocal = sessionmaker(
test_engine, class_=AsyncSession, expire_on_commit=False
)
# Use in-memory SQLite for fast testing
TEST_DATABASE_URL = "sqlite+aiosqlite:///:memory:"
@pytest.fixture(scope="session")
def event_loop():
@@ -32,38 +34,495 @@ def event_loop():
yield loop
loop.close()
@pytest.fixture
async def db() -> AsyncGenerator[AsyncSession, None]:
"""Database fixture"""
async with test_engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
@pytest.fixture(scope="function")
async def test_engine():
"""Create a test database engine for each test function"""
engine = create_async_engine(
TEST_DATABASE_URL,
echo=False, # Set to True for SQL debugging
future=True,
pool_pre_ping=True
)
async with TestSessionLocal() as session:
try:
# Import models and base here to avoid import issues
from shared.database.base import Base
# Create all tables
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
yield engine
# Cleanup
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
except ImportError:
# If shared.database.base is not available, create a mock
yield engine
finally:
await engine.dispose()
@pytest.fixture(scope="function")
async def test_db(test_engine) -> AsyncGenerator[AsyncSession, None]:
"""Create a test database session for each test function"""
async_session = sessionmaker(
test_engine,
class_=AsyncSession,
expire_on_commit=False
)
async with async_session() as session:
yield session
async with test_engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
@pytest.fixture(scope="function")
def client(test_db):
"""Create a test client with database dependency override"""
try:
from app.main import app
from app.core.database import get_db
def override_get_db():
return test_db
app.dependency_overrides[get_db] = override_get_db
with TestClient(app) as test_client:
yield test_client
# Clean up overrides
app.dependency_overrides.clear()
except ImportError as e:
pytest.skip(f"Cannot import app modules: {e}")
# ================================================================
# MOCK FIXTURES
# ================================================================
@pytest.fixture
def client(db: AsyncSession):
"""Test client fixture"""
def override_get_db():
yield db
def mock_redis():
"""Mock Redis client for testing rate limiting and session management"""
redis_mock = AsyncMock()
app.dependency_overrides[get_db] = override_get_db
# Default return values for common operations
redis_mock.get.return_value = None
redis_mock.incr.return_value = 1
redis_mock.expire.return_value = True
redis_mock.delete.return_value = True
redis_mock.setex.return_value = True
redis_mock.exists.return_value = False
with TestClient(app) as test_client:
yield test_client
app.dependency_overrides.clear()
return redis_mock
@pytest.fixture
def test_user_data():
"""Test user data fixture"""
def mock_rabbitmq():
"""Mock RabbitMQ for testing event publishing"""
rabbitmq_mock = AsyncMock()
# Mock publisher methods
rabbitmq_mock.publish.return_value = True
rabbitmq_mock.connect.return_value = True
rabbitmq_mock.disconnect.return_value = True
return rabbitmq_mock
@pytest.fixture
def mock_external_services():
"""Mock external service calls (tenant service, etc.)"""
with patch('httpx.AsyncClient') as mock_client:
mock_response = AsyncMock()
mock_response.status_code = 200
mock_response.json.return_value = {"tenants": []}
mock_client.return_value.__aenter__.return_value.get.return_value = mock_response
mock_client.return_value.__aenter__.return_value.post.return_value = mock_response
yield mock_client
# ================================================================
# DATA FIXTURES
# ================================================================
@pytest.fixture
def valid_user_data():
"""Valid user registration data"""
return {
"email": "test@bakery.es",
"password": "TestPass123",
"full_name": "Test User",
"phone": "+34123456789",
"language": "es"
"password": "TestPassword123",
"full_name": "Test User"
}
@pytest.fixture
def valid_user_data_list():
"""List of valid user data for multiple users"""
return [
{
"email": f"test{i}@bakery.es",
"password": "TestPassword123",
"full_name": f"Test User {i}"
}
for i in range(1, 6)
]
@pytest.fixture
def weak_password_data():
"""User data with various weak passwords"""
return [
{"email": "weak1@bakery.es", "password": "123", "full_name": "Weak 1"},
{"email": "weak2@bakery.es", "password": "password", "full_name": "Weak 2"},
{"email": "weak3@bakery.es", "password": "PASSWORD123", "full_name": "Weak 3"},
{"email": "weak4@bakery.es", "password": "testpassword", "full_name": "Weak 4"},
]
@pytest.fixture
def invalid_email_data():
"""User data with invalid email formats"""
return [
{"email": "invalid", "password": "TestPassword123", "full_name": "Invalid 1"},
{"email": "@bakery.es", "password": "TestPassword123", "full_name": "Invalid 2"},
{"email": "test@", "password": "TestPassword123", "full_name": "Invalid 3"},
{"email": "test..test@bakery.es", "password": "TestPassword123", "full_name": "Invalid 4"},
]
# ================================================================
# USER FIXTURES
# ================================================================
@pytest.fixture
async def test_user(test_db, valid_user_data):
"""Create a test user in the database"""
try:
from app.services.auth_service import AuthService
user = await AuthService.create_user(
email=valid_user_data["email"],
password=valid_user_data["password"],
full_name=valid_user_data["full_name"],
db=test_db
)
return user
except ImportError:
pytest.skip("AuthService not available")
@pytest.fixture
async def test_users(test_db, valid_user_data_list):
"""Create multiple test users in the database"""
try:
from app.services.auth_service import AuthService
users = []
for user_data in valid_user_data_list:
user = await AuthService.create_user(
email=user_data["email"],
password=user_data["password"],
full_name=user_data["full_name"],
db=test_db
)
users.append(user)
return users
except ImportError:
pytest.skip("AuthService not available")
@pytest.fixture
async def authenticated_user(client, valid_user_data):
"""Create an authenticated user and return user info, tokens, and headers"""
# Register user
register_response = client.post("/auth/register", json=valid_user_data)
assert register_response.status_code == 200
# Login user
login_data = {
"email": valid_user_data["email"],
"password": valid_user_data["password"]
}
login_response = client.post("/auth/login", json=login_data)
assert login_response.status_code == 200
token_data = login_response.json()
return {
"user": register_response.json(),
"tokens": token_data,
"access_token": token_data["access_token"],
"refresh_token": token_data["refresh_token"],
"headers": {"Authorization": f"Bearer {token_data['access_token']}"}
}
# ================================================================
# CONFIGURATION FIXTURES
# ================================================================
@pytest.fixture
def test_settings():
"""Test-specific settings override"""
try:
from app.core.config import settings
original_settings = {}
# Store original values
test_overrides = {
'JWT_ACCESS_TOKEN_EXPIRE_MINUTES': 30,
'JWT_REFRESH_TOKEN_EXPIRE_DAYS': 7,
'PASSWORD_MIN_LENGTH': 8,
'PASSWORD_REQUIRE_UPPERCASE': True,
'PASSWORD_REQUIRE_LOWERCASE': True,
'PASSWORD_REQUIRE_NUMBERS': True,
'PASSWORD_REQUIRE_SYMBOLS': False,
'MAX_LOGIN_ATTEMPTS': 5,
'LOCKOUT_DURATION_MINUTES': 30,
'BCRYPT_ROUNDS': 4, # Lower for faster tests
}
for key, value in test_overrides.items():
if hasattr(settings, key):
original_settings[key] = getattr(settings, key)
setattr(settings, key, value)
yield settings
# Restore original values
for key, value in original_settings.items():
setattr(settings, key, value)
except ImportError:
pytest.skip("Settings not available")
# ================================================================
# PATCHING FIXTURES
# ================================================================
@pytest.fixture
def patch_redis(mock_redis):
"""Patch Redis client for all tests"""
with patch('app.core.security.redis_client', mock_redis):
yield mock_redis
@pytest.fixture
def patch_messaging(mock_rabbitmq):
"""Patch messaging system for all tests"""
with patch('app.services.messaging.publisher', mock_rabbitmq):
yield mock_rabbitmq
@pytest.fixture
def patch_external_apis(mock_external_services):
"""Patch external API calls"""
yield mock_external_services
# ================================================================
# UTILITY FIXTURES
# ================================================================
@pytest.fixture
def auth_headers():
"""Factory for creating authorization headers"""
def _create_headers(token):
return {"Authorization": f"Bearer {token}"}
return _create_headers
@pytest.fixture
def password_generator():
"""Generate passwords with different characteristics"""
def _generate(
length=12,
include_upper=True,
include_lower=True,
include_numbers=True,
include_symbols=False
):
import random
import string
chars = ""
password = ""
if include_lower:
chars += string.ascii_lowercase
password += random.choice(string.ascii_lowercase)
if include_upper:
chars += string.ascii_uppercase
password += random.choice(string.ascii_uppercase)
if include_numbers:
chars += string.digits
password += random.choice(string.digits)
if include_symbols:
chars += "!@#$%^&*"
password += random.choice("!@#$%^&*")
# Fill remaining length
remaining = length - len(password)
if remaining > 0:
password += ''.join(random.choice(chars) for _ in range(remaining))
# Shuffle the password
password_list = list(password)
random.shuffle(password_list)
return ''.join(password_list)
return _generate
# ================================================================
# PERFORMANCE TESTING FIXTURES
# ================================================================
@pytest.fixture
def performance_timer():
"""Timer utility for performance testing"""
import time
class Timer:
def __init__(self):
self.start_time = None
self.end_time = None
def start(self):
self.start_time = time.time()
def stop(self):
self.end_time = time.time()
@property
def elapsed(self):
if self.start_time and self.end_time:
return self.end_time - self.start_time
return None
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
return Timer
# ================================================================
# DATABASE UTILITY FIXTURES
# ================================================================
@pytest.fixture
async def db_utils(test_db):
"""Database utility functions for testing"""
class DBUtils:
def __init__(self, db):
self.db = db
async def count_users(self):
try:
from sqlalchemy import select, func
from app.models.users import User
result = await self.db.execute(select(func.count(User.id)))
return result.scalar()
except ImportError:
return 0
async def get_user_by_email(self, email):
try:
from sqlalchemy import select
from app.models.users import User
result = await self.db.execute(select(User).where(User.email == email))
return result.scalar_one_or_none()
except ImportError:
return None
async def count_refresh_tokens(self):
try:
from sqlalchemy import select, func
from app.models.users import RefreshToken
result = await self.db.execute(select(func.count(RefreshToken.id)))
return result.scalar()
except ImportError:
return 0
async def clear_all_data(self):
try:
from app.models.users import User, RefreshToken
await self.db.execute(RefreshToken.__table__.delete())
await self.db.execute(User.__table__.delete())
await self.db.commit()
except ImportError:
pass
return DBUtils(test_db)
# ================================================================
# LOGGING FIXTURES
# ================================================================
@pytest.fixture
def capture_logs():
"""Capture logs for testing"""
import logging
from io import StringIO
log_capture = StringIO()
handler = logging.StreamHandler(log_capture)
handler.setLevel(logging.DEBUG)
# Add handler to auth service loggers
loggers = [
logging.getLogger('app.services.auth_service'),
logging.getLogger('app.core.security'),
logging.getLogger('app.api.auth'),
]
for logger in loggers:
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
yield log_capture
# Clean up
for logger in loggers:
logger.removeHandler(handler)
# ================================================================
# TEST MARKERS AND CONFIGURATION
# ================================================================
def pytest_configure(config):
"""Configure pytest with custom markers"""
config.addinivalue_line(
"markers", "unit: marks tests as unit tests"
)
config.addinivalue_line(
"markers", "integration: marks tests as integration tests"
)
config.addinivalue_line(
"markers", "api: marks tests as API tests"
)
config.addinivalue_line(
"markers", "security: marks tests as security tests"
)
config.addinivalue_line(
"markers", "performance: marks tests as performance tests"
)
config.addinivalue_line(
"markers", "slow: marks tests as slow running"
)
config.addinivalue_line(
"markers", "auth: marks tests as authentication tests"
)
def pytest_collection_modifyitems(config, items):
"""Modify test collection to add markers automatically"""
for item in items:
# Add markers based on test class or function names
if "test_api" in item.name.lower() or "API" in str(item.cls):
item.add_marker(pytest.mark.api)
if "test_security" in item.name.lower() or "Security" in str(item.cls):
item.add_marker(pytest.mark.security)
if "test_performance" in item.name.lower() or "Performance" in str(item.cls):
item.add_marker(pytest.mark.performance)
item.add_marker(pytest.mark.slow)
if "integration" in item.name.lower() or "Integration" in str(item.cls):
item.add_marker(pytest.mark.integration)
if "Flow" in str(item.cls) or "flow" in item.name.lower():
item.add_marker(pytest.mark.integration)
if "auth" in item.name.lower() or "Auth" in str(item.cls):
item.add_marker(pytest.mark.auth)

View File

@@ -0,0 +1,19 @@
[pytest]
minversion = 6.0
addopts = -ra -q --disable-warnings
testpaths = tests
python_files = test_*.py
python_classes = Test*
python_functions = test_*
markers =
unit: Unit tests
integration: Integration tests
api: API endpoint tests
security: Security tests
performance: Performance tests
slow: Slow running tests
auth: Authentication tests
asyncio_mode = auto
filterwarnings =
ignore::DeprecationWarning
ignore::PendingDeprecationWarning

785
services/auth/tests/run_tests.py Executable file
View File

@@ -0,0 +1,785 @@
#!/usr/bin/env python3
# ================================================================
# services/auth/tests/run_tests.py
# Complete test runner script for auth service with comprehensive reporting
# ================================================================
"""
Comprehensive test runner for authentication service
Provides various test execution modes and detailed reporting
"""
import os
import sys
import subprocess
import argparse
import time
import json
from pathlib import Path
from typing import List, Dict, Optional, Tuple
from datetime import datetime
# Add the project root to Python path
project_root = Path(__file__).parent.parent.parent.parent
sys.path.insert(0, str(project_root))
class Colors:
"""ANSI color codes for terminal output"""
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
MAGENTA = '\033[95m'
CYAN = '\033[96m'
WHITE = '\033[97m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
@classmethod
def colorize(cls, text: str, color: str) -> str:
"""Colorize text for terminal output"""
return f"{color}{text}{cls.END}"
class TestMetrics:
"""Track test execution metrics"""
def __init__(self):
self.start_time = None
self.end_time = None
self.tests_run = 0
self.tests_passed = 0
self.tests_failed = 0
self.tests_skipped = 0
self.coverage_percentage = 0.0
self.warnings_count = 0
self.errors = []
def start(self):
"""Start timing"""
self.start_time = time.time()
def stop(self):
"""Stop timing"""
self.end_time = time.time()
@property
def duration(self) -> float:
"""Get duration in seconds"""
if self.start_time and self.end_time:
return self.end_time - self.start_time
return 0.0
@property
def success_rate(self) -> float:
"""Get success rate percentage"""
if self.tests_run > 0:
return (self.tests_passed / self.tests_run) * 100
return 0.0
class AuthTestRunner:
"""Test runner for authentication service with enhanced features"""
def __init__(self, test_dir: str = "tests"):
self.test_dir = Path(test_dir)
self.project_root = Path(__file__).parent.parent
self.results: Dict[str, TestMetrics] = {}
self.overall_metrics = TestMetrics()
def _print_header(self, title: str, char: str = "=", width: int = 80):
"""Print a formatted header"""
print(Colors.colorize(char * width, Colors.CYAN))
centered_title = title.center(width)
print(Colors.colorize(centered_title, Colors.BOLD + Colors.WHITE))
print(Colors.colorize(char * width, Colors.CYAN))
def _print_step(self, message: str, emoji: str = "📋"):
"""Print a step message"""
print(f"\n{emoji} {Colors.colorize(message, Colors.BLUE)}")
def _print_success(self, message: str):
"""Print success message"""
print(f"{Colors.colorize(message, Colors.GREEN)}")
def _print_error(self, message: str):
"""Print error message"""
print(f"{Colors.colorize(message, Colors.RED)}")
def _print_warning(self, message: str):
"""Print warning message"""
print(f"⚠️ {Colors.colorize(message, Colors.YELLOW)}")
def run_command(self, cmd: List[str], capture_output: bool = True, timeout: int = 300) -> subprocess.CompletedProcess:
"""Run a command and return the result"""
cmd_str = ' '.join(cmd)
print(f"🚀 Running: {Colors.colorize(cmd_str, Colors.MAGENTA)}")
try:
result = subprocess.run(
cmd,
capture_output=capture_output,
text=True,
cwd=self.project_root,
timeout=timeout
)
return result
except subprocess.TimeoutExpired:
self._print_error(f"Test execution timed out ({timeout} seconds)")
return subprocess.CompletedProcess(cmd, 1, "", "Timeout")
except Exception as e:
self._print_error(f"Error running command: {e}")
return subprocess.CompletedProcess(cmd, 1, "", str(e))
def _parse_pytest_output(self, output: str) -> TestMetrics:
"""Parse pytest output to extract metrics"""
metrics = TestMetrics()
lines = output.split('\n')
for line in lines:
line = line.strip()
# Parse test results line (e.g., "45 passed, 2 failed, 1 skipped in 12.34s")
if ' passed' in line or ' failed' in line:
parts = line.split()
for i, part in enumerate(parts):
if part.isdigit():
count = int(part)
if i + 1 < len(parts):
result_type = parts[i + 1]
if 'passed' in result_type:
metrics.tests_passed = count
elif 'failed' in result_type:
metrics.tests_failed = count
elif 'skipped' in result_type:
metrics.tests_skipped = count
elif 'warning' in result_type:
metrics.warnings_count = count
# Parse coverage percentage
if 'TOTAL' in line and '%' in line:
parts = line.split()
for part in parts:
if '%' in part:
try:
metrics.coverage_percentage = float(part.replace('%', ''))
except ValueError:
pass
metrics.tests_run = metrics.tests_passed + metrics.tests_failed + metrics.tests_skipped
return metrics
def run_all_tests(self, verbose: bool = True) -> bool:
"""Run all authentication tests"""
self._print_step("Running all authentication tests", "🧪")
cmd = [
sys.executable, "-m", "pytest",
str(self.test_dir),
"-v" if verbose else "-q",
"--tb=short",
"--strict-markers",
"--color=yes"
]
metrics = TestMetrics()
metrics.start()
result = self.run_command(cmd, capture_output=not verbose)
metrics.stop()
if not verbose and result.stdout:
parsed_metrics = self._parse_pytest_output(result.stdout)
metrics.tests_run = parsed_metrics.tests_run
metrics.tests_passed = parsed_metrics.tests_passed
metrics.tests_failed = parsed_metrics.tests_failed
metrics.tests_skipped = parsed_metrics.tests_skipped
self.results['all_tests'] = metrics
success = result.returncode == 0
if success:
self._print_success(f"All tests completed successfully ({metrics.duration:.2f}s)")
else:
self._print_error(f"Some tests failed ({metrics.duration:.2f}s)")
return success
def run_unit_tests(self) -> bool:
"""Run unit tests only"""
self._print_step("Running unit tests", "🔬")
cmd = [
sys.executable, "-m", "pytest",
str(self.test_dir),
"-v", "-m", "unit",
"--tb=short",
"--color=yes"
]
metrics = TestMetrics()
metrics.start()
result = self.run_command(cmd, capture_output=False)
metrics.stop()
self.results['unit_tests'] = metrics
return result.returncode == 0
def run_integration_tests(self) -> bool:
"""Run integration tests only"""
self._print_step("Running integration tests", "🔗")
cmd = [
sys.executable, "-m", "pytest",
str(self.test_dir),
"-v", "-m", "integration",
"--tb=short",
"--color=yes"
]
metrics = TestMetrics()
metrics.start()
result = self.run_command(cmd, capture_output=False)
metrics.stop()
self.results['integration_tests'] = metrics
return result.returncode == 0
def run_api_tests(self) -> bool:
"""Run API endpoint tests only"""
self._print_step("Running API tests", "🌐")
cmd = [
sys.executable, "-m", "pytest",
str(self.test_dir),
"-v", "-m", "api",
"--tb=short",
"--color=yes"
]
metrics = TestMetrics()
metrics.start()
result = self.run_command(cmd, capture_output=False)
metrics.stop()
self.results['api_tests'] = metrics
return result.returncode == 0
def run_security_tests(self) -> bool:
"""Run security tests only"""
self._print_step("Running security tests", "🔒")
cmd = [
sys.executable, "-m", "pytest",
str(self.test_dir),
"-v", "-m", "security",
"--tb=short",
"--color=yes"
]
metrics = TestMetrics()
metrics.start()
result = self.run_command(cmd, capture_output=False)
metrics.stop()
self.results['security_tests'] = metrics
return result.returncode == 0
def run_performance_tests(self) -> bool:
"""Run performance tests only"""
self._print_step("Running performance tests", "")
cmd = [
sys.executable, "-m", "pytest",
str(self.test_dir),
"-v", "-m", "performance",
"--tb=short",
"--color=yes"
]
metrics = TestMetrics()
metrics.start()
result = self.run_command(cmd, capture_output=False)
metrics.stop()
self.results['performance_tests'] = metrics
return result.returncode == 0
def run_coverage_tests(self) -> bool:
"""Run tests with coverage reporting"""
self._print_step("Running tests with coverage", "📊")
cmd = [
sys.executable, "-m", "pytest",
str(self.test_dir),
"--cov=app",
"--cov-report=html:htmlcov",
"--cov-report=term-missing",
"--cov-report=xml",
"--cov-branch",
"-v",
"--color=yes"
]
metrics = TestMetrics()
metrics.start()
result = self.run_command(cmd, capture_output=True)
metrics.stop()
if result.stdout:
parsed_metrics = self._parse_pytest_output(result.stdout)
metrics.coverage_percentage = parsed_metrics.coverage_percentage
print(result.stdout)
self.results['coverage_tests'] = metrics
if result.returncode == 0:
self._print_success("Coverage report generated in htmlcov/index.html")
if metrics.coverage_percentage > 0:
self._print_success(f"Coverage: {metrics.coverage_percentage:.1f}%")
return result.returncode == 0
def run_fast_tests(self) -> bool:
"""Run fast tests (exclude slow/performance tests)"""
self._print_step("Running fast tests only", "")
cmd = [
sys.executable, "-m", "pytest",
str(self.test_dir),
"-v", "-m", "not slow",
"--tb=short",
"--color=yes"
]
metrics = TestMetrics()
metrics.start()
result = self.run_command(cmd, capture_output=False)
metrics.stop()
self.results['fast_tests'] = metrics
return result.returncode == 0
def run_specific_test(self, test_pattern: str) -> bool:
"""Run specific test by pattern"""
self._print_step(f"Running tests matching: {test_pattern}", "🎯")
cmd = [
sys.executable, "-m", "pytest",
str(self.test_dir),
"-v", "-k", test_pattern,
"--tb=short",
"--color=yes"
]
metrics = TestMetrics()
metrics.start()
result = self.run_command(cmd, capture_output=False)
metrics.stop()
self.results[f'specific_test_{test_pattern}'] = metrics
return result.returncode == 0
def run_parallel_tests(self, num_workers: Optional[int] = None) -> bool:
"""Run tests in parallel"""
if num_workers is None:
num_workers_str = "auto"
else:
num_workers_str = str(num_workers)
self._print_step(f"Running tests in parallel with {num_workers_str} workers", "🚀")
cmd = [
sys.executable, "-m", "pytest",
str(self.test_dir),
"-v", "-n", num_workers_str,
"--tb=short",
"--color=yes"
]
metrics = TestMetrics()
metrics.start()
result = self.run_command(cmd, capture_output=False)
metrics.stop()
self.results['parallel_tests'] = metrics
return result.returncode == 0
def validate_test_environment(self) -> bool:
"""Validate that the test environment is set up correctly"""
self._print_step("Validating test environment", "🔍")
validation_steps = [
("Checking pytest availability", self._check_pytest),
("Checking test files", self._check_test_files),
("Checking app module", self._check_app_module),
("Checking database module", self._check_database_module),
("Checking dependencies", self._check_dependencies),
]
all_valid = True
for step_name, step_func in validation_steps:
print(f" 📋 {step_name}...")
if step_func():
self._print_success(f" {step_name}")
else:
self._print_error(f" {step_name}")
all_valid = False
return all_valid
def _check_pytest(self) -> bool:
"""Check if pytest is available"""
try:
result = subprocess.run([sys.executable, "-m", "pytest", "--version"],
capture_output=True, text=True)
if result.returncode != 0:
return False
print(f"{result.stdout.strip()}")
return True
except Exception:
return False
def _check_test_files(self) -> bool:
"""Check if test files exist"""
test_files = list(self.test_dir.glob("test_*.py"))
if not test_files:
print(f" ❌ No test files found in {self.test_dir}")
return False
print(f" ✅ Found {len(test_files)} test files")
return True
def _check_app_module(self) -> bool:
"""Check if app module can be imported"""
try:
sys.path.insert(0, str(self.project_root))
import app
print(" ✅ App module can be imported")
return True
except ImportError as e:
print(f" ❌ Cannot import app module: {e}")
return False
def _check_database_module(self) -> bool:
"""Check database connectivity"""
try:
from app.core.database import get_db
print(" ✅ Database module available")
return True
except ImportError as e:
print(f" ⚠️ Database module not available: {e}")
return True # Non-critical for some tests
def _check_dependencies(self) -> bool:
"""Check required dependencies"""
required_packages = [
"pytest",
"pytest-asyncio",
"fastapi",
"sqlalchemy",
"pydantic"
]
missing_packages = []
for package in required_packages:
try:
__import__(package.replace('-', '_'))
except ImportError:
missing_packages.append(package)
if missing_packages:
print(f" ❌ Missing packages: {', '.join(missing_packages)}")
return False
print(f" ✅ All required packages available")
return True
def generate_test_report(self) -> None:
"""Generate a comprehensive test report"""
self._print_header("AUTH SERVICE TEST REPORT")
if not self.results:
print("No test results available")
return
# Summary table
print(f"\n{Colors.colorize('Test Category', Colors.BOLD):<25} "
f"{Colors.colorize('Status', Colors.BOLD):<12} "
f"{Colors.colorize('Duration', Colors.BOLD):<12} "
f"{Colors.colorize('Tests', Colors.BOLD):<15} "
f"{Colors.colorize('Success Rate', Colors.BOLD):<12}")
print("-" * 80)
total_duration = 0
total_tests = 0
total_passed = 0
for test_type, metrics in self.results.items():
if metrics.duration > 0:
total_duration += metrics.duration
total_tests += metrics.tests_run
total_passed += metrics.tests_passed
# Status
if metrics.tests_failed == 0 and metrics.tests_run > 0:
status = Colors.colorize("✅ PASSED", Colors.GREEN)
elif metrics.tests_run == 0:
status = Colors.colorize("⚪ SKIPPED", Colors.YELLOW)
else:
status = Colors.colorize("❌ FAILED", Colors.RED)
# Duration
duration_str = f"{metrics.duration:.2f}s"
# Tests count
if metrics.tests_run > 0:
tests_str = f"{metrics.tests_passed}/{metrics.tests_run}"
else:
tests_str = "0"
# Success rate
if metrics.tests_run > 0:
success_rate_str = f"{metrics.success_rate:.1f}%"
else:
success_rate_str = "N/A"
print(f"{test_type.replace('_', ' ').title():<25} "
f"{status:<20} "
f"{duration_str:<12} "
f"{tests_str:<15} "
f"{success_rate_str:<12}")
# Overall summary
print("-" * 80)
overall_success_rate = (total_passed / total_tests * 100) if total_tests > 0 else 0
overall_status = "✅ PASSED" if total_passed == total_tests and total_tests > 0 else "❌ FAILED"
print(f"{'OVERALL':<25} "
f"{Colors.colorize(overall_status, Colors.BOLD):<20} "
f"{total_duration:.2f}s{'':<6} "
f"{total_passed}/{total_tests}{'':11} "
f"{overall_success_rate:.1f}%")
print("\n" + "=" * 80)
# Recommendations
self._print_recommendations(overall_success_rate, total_tests)
def _print_recommendations(self, success_rate: float, total_tests: int):
"""Print recommendations based on test results"""
print(f"\n{Colors.colorize('📋 RECOMMENDATIONS', Colors.BOLD + Colors.CYAN)}")
if success_rate == 100 and total_tests > 0:
self._print_success("Excellent! All tests passed. Your auth service is ready for deployment.")
elif success_rate >= 90:
self._print_warning("Good test coverage. Review failed tests before deployment.")
elif success_rate >= 70:
self._print_warning("Moderate test coverage. Significant issues need fixing.")
else:
self._print_error("Poor test results. Major issues need addressing before deployment.")
# Specific recommendations
recommendations = []
if 'security_tests' in self.results:
security_metrics = self.results['security_tests']
if security_metrics.tests_failed > 0:
recommendations.append("🔒 Fix security test failures - critical for production")
if 'coverage_tests' in self.results:
coverage_metrics = self.results['coverage_tests']
if coverage_metrics.coverage_percentage < 80:
recommendations.append(f"📊 Increase test coverage (current: {coverage_metrics.coverage_percentage:.1f}%)")
if 'performance_tests' in self.results:
perf_metrics = self.results['performance_tests']
if perf_metrics.tests_failed > 0:
recommendations.append("⚡ Address performance issues")
if recommendations:
print("\n" + Colors.colorize("Next Steps:", Colors.BOLD))
for i, rec in enumerate(recommendations, 1):
print(f" {i}. {rec}")
def clean_test_artifacts(self) -> None:
"""Clean up test artifacts"""
self._print_step("Cleaning test artifacts", "🧹")
artifacts = [
".pytest_cache",
"htmlcov",
".coverage",
"coverage.xml",
"report.html",
"test-results.xml"
]
cleaned_count = 0
for artifact in artifacts:
artifact_path = self.project_root / artifact
if artifact_path.exists():
if artifact_path.is_dir():
import shutil
shutil.rmtree(artifact_path)
else:
artifact_path.unlink()
self._print_success(f"Removed {artifact}")
cleaned_count += 1
# Clean __pycache__ directories
pycache_count = 0
for pycache in self.project_root.rglob("__pycache__"):
import shutil
shutil.rmtree(pycache)
pycache_count += 1
# Clean .pyc files
pyc_count = 0
for pyc in self.project_root.rglob("*.pyc"):
pyc.unlink()
pyc_count += 1
if pycache_count > 0:
self._print_success(f"Removed {pycache_count} __pycache__ directories")
if pyc_count > 0:
self._print_success(f"Removed {pyc_count} .pyc files")
if cleaned_count == 0 and pycache_count == 0 and pyc_count == 0:
print(" 📁 No artifacts to clean")
else:
self._print_success("Test artifacts cleaned successfully")
def save_results_json(self, filename: str = "test_results.json") -> None:
"""Save test results to JSON file"""
results_data = {
"timestamp": datetime.now().isoformat(),
"test_categories": {}
}
for test_type, metrics in self.results.items():
results_data["test_categories"][test_type] = {
"duration": metrics.duration,
"tests_run": metrics.tests_run,
"tests_passed": metrics.tests_passed,
"tests_failed": metrics.tests_failed,
"tests_skipped": metrics.tests_skipped,
"success_rate": metrics.success_rate,
"coverage_percentage": metrics.coverage_percentage,
"warnings_count": metrics.warnings_count
}
with open(filename, 'w') as f:
json.dump(results_data, f, indent=2)
self._print_success(f"Test results saved to {filename}")
def main():
"""Main entry point for test runner"""
parser = argparse.ArgumentParser(
description="Auth Service Test Runner",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python run_tests.py # Run all tests
python run_tests.py --test-type security # Run security tests only
python run_tests.py --coverage # Run with coverage
python run_tests.py --parallel --workers 4 # Run in parallel
python run_tests.py --pattern "test_login" # Run specific test pattern
python run_tests.py --validate # Validate environment
python run_tests.py --clean # Clean test artifacts
"""
)
parser.add_argument("--test-type",
choices=["all", "unit", "integration", "api", "security", "performance", "fast"],
default="all",
help="Type of tests to run")
parser.add_argument("--coverage", action="store_true", help="Run with coverage")
parser.add_argument("--parallel", action="store_true", help="Run tests in parallel")
parser.add_argument("--workers", type=int, help="Number of parallel workers")
parser.add_argument("--pattern", type=str, help="Run specific test pattern")
parser.add_argument("--validate", action="store_true", help="Validate test environment")
parser.add_argument("--clean", action="store_true", help="Clean test artifacts")
parser.add_argument("--verbose", action="store_true", default=True, help="Verbose output")
parser.add_argument("--save-results", action="store_true", help="Save results to JSON file")
parser.add_argument("--quiet", action="store_true", help="Quiet mode (less output)")
args = parser.parse_args()
runner = AuthTestRunner()
# Print header
if not args.quiet:
runner._print_header("🧪 AUTH SERVICE TEST RUNNER 🧪")
# Clean artifacts if requested
if args.clean:
runner.clean_test_artifacts()
return
# Validate environment if requested
if args.validate:
success = runner.validate_test_environment()
if success:
runner._print_success("Test environment validation passed")
else:
runner._print_error("Test environment validation failed")
sys.exit(0 if success else 1)
# Validate environment before running tests
if not args.quiet:
if not runner.validate_test_environment():
runner._print_error("Test environment validation failed")
sys.exit(1)
success = True
try:
runner.overall_metrics.start()
if args.pattern:
success = runner.run_specific_test(args.pattern)
elif args.coverage:
success = runner.run_coverage_tests()
elif args.parallel:
success = runner.run_parallel_tests(args.workers)
elif args.test_type == "unit":
success = runner.run_unit_tests()
elif args.test_type == "integration":
success = runner.run_integration_tests()
elif args.test_type == "api":
success = runner.run_api_tests()
elif args.test_type == "security":
success = runner.run_security_tests()
elif args.test_type == "performance":
success = runner.run_performance_tests()
elif args.test_type == "fast":
success = runner.run_fast_tests()
else: # all
success = runner.run_all_tests(args.verbose)
runner.overall_metrics.stop()
if not args.quiet:
runner.generate_test_report()
if args.save_results:
runner.save_results_json()
except KeyboardInterrupt:
runner._print_error("Tests interrupted by user")
success = False
except Exception as e:
runner._print_error(f"Error running tests: {e}")
success = False
if success:
if not args.quiet:
runner._print_success("All tests completed successfully!")
sys.exit(0)
else:
if not args.quiet:
runner._print_error("Some tests failed!")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,79 +0,0 @@
# ================================================================
# services/auth/tests/test_auth.py
# ================================================================
"""Authentication tests"""
import pytest
from fastapi.testclient import TestClient
from sqlalchemy.ext.asyncio import AsyncSession
from app.services.auth_service import AuthService
from app.schemas.auth import UserRegistration, UserLogin
@pytest.mark.asyncio
async def test_register_user(db: AsyncSession):
"""Test user registration"""
user_data = UserRegistration(
email="test@bakery.es",
password="TestPass123",
full_name="Test User",
language="es"
)
result = await AuthService.register_user(user_data, db)
assert result.email == "test@bakery.es"
assert result.full_name == "Test User"
assert result.is_active is True
assert result.is_verified is False
@pytest.mark.asyncio
async def test_login_user(db: AsyncSession):
"""Test user login"""
# First register a user
user_data = UserRegistration(
email="test@bakery.es",
password="TestPass123",
full_name="Test User",
language="es"
)
await AuthService.register_user(user_data, db)
# Then login
login_data = UserLogin(
email="test@bakery.es",
password="TestPass123"
)
result = await AuthService.login_user(login_data, db, "127.0.0.1", "test-agent")
assert result.access_token is not None
assert result.refresh_token is not None
assert result.token_type == "bearer"
def test_register_endpoint(client: TestClient, test_user_data):
"""Test registration endpoint"""
response = client.post("/auth/register", json=test_user_data)
assert response.status_code == 200
data = response.json()
assert data["email"] == test_user_data["email"]
assert "id" in data
def test_login_endpoint(client: TestClient, test_user_data):
"""Test login endpoint"""
# First register
client.post("/auth/register", json=test_user_data)
# Then login
login_data = {
"email": test_user_data["email"],
"password": test_user_data["password"]
}
response = client.post("/auth/login", json=login_data)
assert response.status_code == 200
data = response.json()
assert "access_token" in data
assert "refresh_token" in data
assert data["token_type"] == "bearer"

File diff suppressed because it is too large Load Diff

View File

@@ -1,74 +0,0 @@
# ================================================================
# services/auth/tests/test_users.py
# ================================================================
"""User management tests"""
import pytest
from fastapi.testclient import TestClient
from sqlalchemy.ext.asyncio import AsyncSession
from app.services.user_service import UserService
from app.services.auth_service import AuthService
from app.schemas.auth import UserRegistration
@pytest.mark.asyncio
async def test_get_user_by_email(db: AsyncSession):
"""Test getting user by email"""
# Create a user first
user_data = UserRegistration(
email="test@bakery.es",
password="TestPass123",
full_name="Test User",
language="es"
)
created_user = await AuthService.register_user(user_data, db)
# Get user by email
user = await UserService.get_user_by_email("test@bakery.es", db)
assert user is not None
assert user.email == "test@bakery.es"
assert str(user.id) == created_user.id
@pytest.mark.asyncio
async def test_update_user(db: AsyncSession):
"""Test updating user"""
# Create a user first
user_data = UserRegistration(
email="test@bakery.es",
password="TestPass123",
full_name="Test User",
language="es"
)
created_user = await AuthService.register_user(user_data, db)
# Update user
update_data = {
"full_name": "Updated User",
"phone": "+34987654321"
}
updated_user = await UserService.update_user(created_user.id, update_data, db)
assert updated_user.full_name == "Updated User"
assert updated_user.phone == "+34987654321"
def test_get_current_user_endpoint(client: TestClient, test_user_data):
"""Test get current user endpoint"""
# Register and login first
client.post("/auth/register", json=test_user_data)
login_response = client.post("/auth/login", json={
"email": test_user_data["email"],
"password": test_user_data["password"]
})
token = login_response.json()["access_token"]
# Get current user
headers = {"Authorization": f"Bearer {token}"}
response = client.get("/users/me", headers=headers)
assert response.status_code == 200
data = response.json()
assert data["email"] == test_user_data["email"]
assert data["full_name"] == test_user_data["full_name"]