785 lines
28 KiB
Python
Executable File
785 lines
28 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# ================================================================
|
|
# services/auth/tests/run_tests.py
|
|
# Complete test runner script for auth service with comprehensive reporting
|
|
# ================================================================
|
|
"""
|
|
Comprehensive test runner for authentication service
|
|
Provides various test execution modes and detailed reporting
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import subprocess
|
|
import argparse
|
|
import time
|
|
import json
|
|
from pathlib import Path
|
|
from typing import List, Dict, Optional, Tuple
|
|
from datetime import datetime
|
|
|
|
# Add the project root to Python path
|
|
project_root = Path(__file__).parent.parent.parent.parent
|
|
sys.path.insert(0, str(project_root))
|
|
|
|
class Colors:
|
|
"""ANSI color codes for terminal output"""
|
|
RED = '\033[91m'
|
|
GREEN = '\033[92m'
|
|
YELLOW = '\033[93m'
|
|
BLUE = '\033[94m'
|
|
MAGENTA = '\033[95m'
|
|
CYAN = '\033[96m'
|
|
WHITE = '\033[97m'
|
|
BOLD = '\033[1m'
|
|
UNDERLINE = '\033[4m'
|
|
END = '\033[0m'
|
|
|
|
@classmethod
|
|
def colorize(cls, text: str, color: str) -> str:
|
|
"""Colorize text for terminal output"""
|
|
return f"{color}{text}{cls.END}"
|
|
|
|
class TestMetrics:
|
|
"""Track test execution metrics"""
|
|
|
|
def __init__(self):
|
|
self.start_time = None
|
|
self.end_time = None
|
|
self.tests_run = 0
|
|
self.tests_passed = 0
|
|
self.tests_failed = 0
|
|
self.tests_skipped = 0
|
|
self.coverage_percentage = 0.0
|
|
self.warnings_count = 0
|
|
self.errors = []
|
|
|
|
def start(self):
|
|
"""Start timing"""
|
|
self.start_time = time.time()
|
|
|
|
def stop(self):
|
|
"""Stop timing"""
|
|
self.end_time = time.time()
|
|
|
|
@property
|
|
def duration(self) -> float:
|
|
"""Get duration in seconds"""
|
|
if self.start_time and self.end_time:
|
|
return self.end_time - self.start_time
|
|
return 0.0
|
|
|
|
@property
|
|
def success_rate(self) -> float:
|
|
"""Get success rate percentage"""
|
|
if self.tests_run > 0:
|
|
return (self.tests_passed / self.tests_run) * 100
|
|
return 0.0
|
|
|
|
class AuthTestRunner:
|
|
"""Test runner for authentication service with enhanced features"""
|
|
|
|
def __init__(self, test_dir: str = "tests"):
|
|
self.test_dir = Path(test_dir)
|
|
self.project_root = Path(__file__).parent.parent
|
|
self.results: Dict[str, TestMetrics] = {}
|
|
self.overall_metrics = TestMetrics()
|
|
|
|
def _print_header(self, title: str, char: str = "=", width: int = 80):
|
|
"""Print a formatted header"""
|
|
print(Colors.colorize(char * width, Colors.CYAN))
|
|
centered_title = title.center(width)
|
|
print(Colors.colorize(centered_title, Colors.BOLD + Colors.WHITE))
|
|
print(Colors.colorize(char * width, Colors.CYAN))
|
|
|
|
def _print_step(self, message: str, emoji: str = "📋"):
|
|
"""Print a step message"""
|
|
print(f"\n{emoji} {Colors.colorize(message, Colors.BLUE)}")
|
|
|
|
def _print_success(self, message: str):
|
|
"""Print success message"""
|
|
print(f"✅ {Colors.colorize(message, Colors.GREEN)}")
|
|
|
|
def _print_error(self, message: str):
|
|
"""Print error message"""
|
|
print(f"❌ {Colors.colorize(message, Colors.RED)}")
|
|
|
|
def _print_warning(self, message: str):
|
|
"""Print warning message"""
|
|
print(f"⚠️ {Colors.colorize(message, Colors.YELLOW)}")
|
|
|
|
def run_command(self, cmd: List[str], capture_output: bool = True, timeout: int = 300) -> subprocess.CompletedProcess:
|
|
"""Run a command and return the result"""
|
|
cmd_str = ' '.join(cmd)
|
|
print(f"🚀 Running: {Colors.colorize(cmd_str, Colors.MAGENTA)}")
|
|
|
|
try:
|
|
result = subprocess.run(
|
|
cmd,
|
|
capture_output=capture_output,
|
|
text=True,
|
|
cwd=self.project_root,
|
|
timeout=timeout
|
|
)
|
|
return result
|
|
except subprocess.TimeoutExpired:
|
|
self._print_error(f"Test execution timed out ({timeout} seconds)")
|
|
return subprocess.CompletedProcess(cmd, 1, "", "Timeout")
|
|
except Exception as e:
|
|
self._print_error(f"Error running command: {e}")
|
|
return subprocess.CompletedProcess(cmd, 1, "", str(e))
|
|
|
|
def _parse_pytest_output(self, output: str) -> TestMetrics:
|
|
"""Parse pytest output to extract metrics"""
|
|
metrics = TestMetrics()
|
|
|
|
lines = output.split('\n')
|
|
for line in lines:
|
|
line = line.strip()
|
|
|
|
# Parse test results line (e.g., "45 passed, 2 failed, 1 skipped in 12.34s")
|
|
if ' passed' in line or ' failed' in line:
|
|
parts = line.split()
|
|
for i, part in enumerate(parts):
|
|
if part.isdigit():
|
|
count = int(part)
|
|
if i + 1 < len(parts):
|
|
result_type = parts[i + 1]
|
|
if 'passed' in result_type:
|
|
metrics.tests_passed = count
|
|
elif 'failed' in result_type:
|
|
metrics.tests_failed = count
|
|
elif 'skipped' in result_type:
|
|
metrics.tests_skipped = count
|
|
elif 'warning' in result_type:
|
|
metrics.warnings_count = count
|
|
|
|
# Parse coverage percentage
|
|
if 'TOTAL' in line and '%' in line:
|
|
parts = line.split()
|
|
for part in parts:
|
|
if '%' in part:
|
|
try:
|
|
metrics.coverage_percentage = float(part.replace('%', ''))
|
|
except ValueError:
|
|
pass
|
|
|
|
metrics.tests_run = metrics.tests_passed + metrics.tests_failed + metrics.tests_skipped
|
|
return metrics
|
|
|
|
def run_all_tests(self, verbose: bool = True) -> bool:
|
|
"""Run all authentication tests"""
|
|
self._print_step("Running all authentication tests", "🧪")
|
|
|
|
cmd = [
|
|
sys.executable, "-m", "pytest",
|
|
str(self.test_dir),
|
|
"-v" if verbose else "-q",
|
|
"--tb=short",
|
|
"--strict-markers",
|
|
"--color=yes"
|
|
]
|
|
|
|
metrics = TestMetrics()
|
|
metrics.start()
|
|
|
|
result = self.run_command(cmd, capture_output=not verbose)
|
|
|
|
metrics.stop()
|
|
|
|
if not verbose and result.stdout:
|
|
parsed_metrics = self._parse_pytest_output(result.stdout)
|
|
metrics.tests_run = parsed_metrics.tests_run
|
|
metrics.tests_passed = parsed_metrics.tests_passed
|
|
metrics.tests_failed = parsed_metrics.tests_failed
|
|
metrics.tests_skipped = parsed_metrics.tests_skipped
|
|
|
|
self.results['all_tests'] = metrics
|
|
|
|
success = result.returncode == 0
|
|
if success:
|
|
self._print_success(f"All tests completed successfully ({metrics.duration:.2f}s)")
|
|
else:
|
|
self._print_error(f"Some tests failed ({metrics.duration:.2f}s)")
|
|
|
|
return success
|
|
|
|
def run_unit_tests(self) -> bool:
|
|
"""Run unit tests only"""
|
|
self._print_step("Running unit tests", "🔬")
|
|
|
|
cmd = [
|
|
sys.executable, "-m", "pytest",
|
|
str(self.test_dir),
|
|
"-v", "-m", "unit",
|
|
"--tb=short",
|
|
"--color=yes"
|
|
]
|
|
|
|
metrics = TestMetrics()
|
|
metrics.start()
|
|
result = self.run_command(cmd, capture_output=False)
|
|
metrics.stop()
|
|
|
|
self.results['unit_tests'] = metrics
|
|
return result.returncode == 0
|
|
|
|
def run_integration_tests(self) -> bool:
|
|
"""Run integration tests only"""
|
|
self._print_step("Running integration tests", "🔗")
|
|
|
|
cmd = [
|
|
sys.executable, "-m", "pytest",
|
|
str(self.test_dir),
|
|
"-v", "-m", "integration",
|
|
"--tb=short",
|
|
"--color=yes"
|
|
]
|
|
|
|
metrics = TestMetrics()
|
|
metrics.start()
|
|
result = self.run_command(cmd, capture_output=False)
|
|
metrics.stop()
|
|
|
|
self.results['integration_tests'] = metrics
|
|
return result.returncode == 0
|
|
|
|
def run_api_tests(self) -> bool:
|
|
"""Run API endpoint tests only"""
|
|
self._print_step("Running API tests", "🌐")
|
|
|
|
cmd = [
|
|
sys.executable, "-m", "pytest",
|
|
str(self.test_dir),
|
|
"-v", "-m", "api",
|
|
"--tb=short",
|
|
"--color=yes"
|
|
]
|
|
|
|
metrics = TestMetrics()
|
|
metrics.start()
|
|
result = self.run_command(cmd, capture_output=False)
|
|
metrics.stop()
|
|
|
|
self.results['api_tests'] = metrics
|
|
return result.returncode == 0
|
|
|
|
def run_security_tests(self) -> bool:
|
|
"""Run security tests only"""
|
|
self._print_step("Running security tests", "🔒")
|
|
|
|
cmd = [
|
|
sys.executable, "-m", "pytest",
|
|
str(self.test_dir),
|
|
"-v", "-m", "security",
|
|
"--tb=short",
|
|
"--color=yes"
|
|
]
|
|
|
|
metrics = TestMetrics()
|
|
metrics.start()
|
|
result = self.run_command(cmd, capture_output=False)
|
|
metrics.stop()
|
|
|
|
self.results['security_tests'] = metrics
|
|
return result.returncode == 0
|
|
|
|
def run_performance_tests(self) -> bool:
|
|
"""Run performance tests only"""
|
|
self._print_step("Running performance tests", "⚡")
|
|
|
|
cmd = [
|
|
sys.executable, "-m", "pytest",
|
|
str(self.test_dir),
|
|
"-v", "-m", "performance",
|
|
"--tb=short",
|
|
"--color=yes"
|
|
]
|
|
|
|
metrics = TestMetrics()
|
|
metrics.start()
|
|
result = self.run_command(cmd, capture_output=False)
|
|
metrics.stop()
|
|
|
|
self.results['performance_tests'] = metrics
|
|
return result.returncode == 0
|
|
|
|
def run_coverage_tests(self) -> bool:
|
|
"""Run tests with coverage reporting"""
|
|
self._print_step("Running tests with coverage", "📊")
|
|
|
|
cmd = [
|
|
sys.executable, "-m", "pytest",
|
|
str(self.test_dir),
|
|
"--cov=app",
|
|
"--cov-report=html:htmlcov",
|
|
"--cov-report=term-missing",
|
|
"--cov-report=xml",
|
|
"--cov-branch",
|
|
"-v",
|
|
"--color=yes"
|
|
]
|
|
|
|
metrics = TestMetrics()
|
|
metrics.start()
|
|
result = self.run_command(cmd, capture_output=True)
|
|
metrics.stop()
|
|
|
|
if result.stdout:
|
|
parsed_metrics = self._parse_pytest_output(result.stdout)
|
|
metrics.coverage_percentage = parsed_metrics.coverage_percentage
|
|
print(result.stdout)
|
|
|
|
self.results['coverage_tests'] = metrics
|
|
|
|
if result.returncode == 0:
|
|
self._print_success("Coverage report generated in htmlcov/index.html")
|
|
if metrics.coverage_percentage > 0:
|
|
self._print_success(f"Coverage: {metrics.coverage_percentage:.1f}%")
|
|
|
|
return result.returncode == 0
|
|
|
|
def run_fast_tests(self) -> bool:
|
|
"""Run fast tests (exclude slow/performance tests)"""
|
|
self._print_step("Running fast tests only", "⚡")
|
|
|
|
cmd = [
|
|
sys.executable, "-m", "pytest",
|
|
str(self.test_dir),
|
|
"-v", "-m", "not slow",
|
|
"--tb=short",
|
|
"--color=yes"
|
|
]
|
|
|
|
metrics = TestMetrics()
|
|
metrics.start()
|
|
result = self.run_command(cmd, capture_output=False)
|
|
metrics.stop()
|
|
|
|
self.results['fast_tests'] = metrics
|
|
return result.returncode == 0
|
|
|
|
def run_specific_test(self, test_pattern: str) -> bool:
|
|
"""Run specific test by pattern"""
|
|
self._print_step(f"Running tests matching: {test_pattern}", "🎯")
|
|
|
|
cmd = [
|
|
sys.executable, "-m", "pytest",
|
|
str(self.test_dir),
|
|
"-v", "-k", test_pattern,
|
|
"--tb=short",
|
|
"--color=yes"
|
|
]
|
|
|
|
metrics = TestMetrics()
|
|
metrics.start()
|
|
result = self.run_command(cmd, capture_output=False)
|
|
metrics.stop()
|
|
|
|
self.results[f'specific_test_{test_pattern}'] = metrics
|
|
return result.returncode == 0
|
|
|
|
def run_parallel_tests(self, num_workers: Optional[int] = None) -> bool:
|
|
"""Run tests in parallel"""
|
|
if num_workers is None:
|
|
num_workers_str = "auto"
|
|
else:
|
|
num_workers_str = str(num_workers)
|
|
|
|
self._print_step(f"Running tests in parallel with {num_workers_str} workers", "🚀")
|
|
|
|
cmd = [
|
|
sys.executable, "-m", "pytest",
|
|
str(self.test_dir),
|
|
"-v", "-n", num_workers_str,
|
|
"--tb=short",
|
|
"--color=yes"
|
|
]
|
|
|
|
metrics = TestMetrics()
|
|
metrics.start()
|
|
result = self.run_command(cmd, capture_output=False)
|
|
metrics.stop()
|
|
|
|
self.results['parallel_tests'] = metrics
|
|
return result.returncode == 0
|
|
|
|
def validate_test_environment(self) -> bool:
|
|
"""Validate that the test environment is set up correctly"""
|
|
self._print_step("Validating test environment", "🔍")
|
|
|
|
validation_steps = [
|
|
("Checking pytest availability", self._check_pytest),
|
|
("Checking test files", self._check_test_files),
|
|
("Checking app module", self._check_app_module),
|
|
("Checking database module", self._check_database_module),
|
|
("Checking dependencies", self._check_dependencies),
|
|
]
|
|
|
|
all_valid = True
|
|
for step_name, step_func in validation_steps:
|
|
print(f" 📋 {step_name}...")
|
|
if step_func():
|
|
self._print_success(f" {step_name}")
|
|
else:
|
|
self._print_error(f" {step_name}")
|
|
all_valid = False
|
|
|
|
return all_valid
|
|
|
|
def _check_pytest(self) -> bool:
|
|
"""Check if pytest is available"""
|
|
try:
|
|
result = subprocess.run([sys.executable, "-m", "pytest", "--version"],
|
|
capture_output=True, text=True)
|
|
if result.returncode != 0:
|
|
return False
|
|
print(f" ✅ {result.stdout.strip()}")
|
|
return True
|
|
except Exception:
|
|
return False
|
|
|
|
def _check_test_files(self) -> bool:
|
|
"""Check if test files exist"""
|
|
test_files = list(self.test_dir.glob("test_*.py"))
|
|
if not test_files:
|
|
print(f" ❌ No test files found in {self.test_dir}")
|
|
return False
|
|
print(f" ✅ Found {len(test_files)} test files")
|
|
return True
|
|
|
|
def _check_app_module(self) -> bool:
|
|
"""Check if app module can be imported"""
|
|
try:
|
|
sys.path.insert(0, str(self.project_root))
|
|
import app
|
|
print(" ✅ App module can be imported")
|
|
return True
|
|
except ImportError as e:
|
|
print(f" ❌ Cannot import app module: {e}")
|
|
return False
|
|
|
|
def _check_database_module(self) -> bool:
|
|
"""Check database connectivity"""
|
|
try:
|
|
from app.core.database import get_db
|
|
print(" ✅ Database module available")
|
|
return True
|
|
except ImportError as e:
|
|
print(f" ⚠️ Database module not available: {e}")
|
|
return True # Non-critical for some tests
|
|
|
|
def _check_dependencies(self) -> bool:
|
|
"""Check required dependencies"""
|
|
required_packages = [
|
|
"pytest",
|
|
"pytest-asyncio",
|
|
"fastapi",
|
|
"sqlalchemy",
|
|
"pydantic"
|
|
]
|
|
|
|
missing_packages = []
|
|
for package in required_packages:
|
|
try:
|
|
__import__(package.replace('-', '_'))
|
|
except ImportError:
|
|
missing_packages.append(package)
|
|
|
|
if missing_packages:
|
|
print(f" ❌ Missing packages: {', '.join(missing_packages)}")
|
|
return False
|
|
|
|
print(f" ✅ All required packages available")
|
|
return True
|
|
|
|
def generate_test_report(self) -> None:
|
|
"""Generate a comprehensive test report"""
|
|
self._print_header("AUTH SERVICE TEST REPORT")
|
|
|
|
if not self.results:
|
|
print("No test results available")
|
|
return
|
|
|
|
# Summary table
|
|
print(f"\n{Colors.colorize('Test Category', Colors.BOLD):<25} "
|
|
f"{Colors.colorize('Status', Colors.BOLD):<12} "
|
|
f"{Colors.colorize('Duration', Colors.BOLD):<12} "
|
|
f"{Colors.colorize('Tests', Colors.BOLD):<15} "
|
|
f"{Colors.colorize('Success Rate', Colors.BOLD):<12}")
|
|
print("-" * 80)
|
|
|
|
total_duration = 0
|
|
total_tests = 0
|
|
total_passed = 0
|
|
|
|
for test_type, metrics in self.results.items():
|
|
if metrics.duration > 0:
|
|
total_duration += metrics.duration
|
|
total_tests += metrics.tests_run
|
|
total_passed += metrics.tests_passed
|
|
|
|
# Status
|
|
if metrics.tests_failed == 0 and metrics.tests_run > 0:
|
|
status = Colors.colorize("✅ PASSED", Colors.GREEN)
|
|
elif metrics.tests_run == 0:
|
|
status = Colors.colorize("⚪ SKIPPED", Colors.YELLOW)
|
|
else:
|
|
status = Colors.colorize("❌ FAILED", Colors.RED)
|
|
|
|
# Duration
|
|
duration_str = f"{metrics.duration:.2f}s"
|
|
|
|
# Tests count
|
|
if metrics.tests_run > 0:
|
|
tests_str = f"{metrics.tests_passed}/{metrics.tests_run}"
|
|
else:
|
|
tests_str = "0"
|
|
|
|
# Success rate
|
|
if metrics.tests_run > 0:
|
|
success_rate_str = f"{metrics.success_rate:.1f}%"
|
|
else:
|
|
success_rate_str = "N/A"
|
|
|
|
print(f"{test_type.replace('_', ' ').title():<25} "
|
|
f"{status:<20} "
|
|
f"{duration_str:<12} "
|
|
f"{tests_str:<15} "
|
|
f"{success_rate_str:<12}")
|
|
|
|
# Overall summary
|
|
print("-" * 80)
|
|
overall_success_rate = (total_passed / total_tests * 100) if total_tests > 0 else 0
|
|
overall_status = "✅ PASSED" if total_passed == total_tests and total_tests > 0 else "❌ FAILED"
|
|
|
|
print(f"{'OVERALL':<25} "
|
|
f"{Colors.colorize(overall_status, Colors.BOLD):<20} "
|
|
f"{total_duration:.2f}s{'':<6} "
|
|
f"{total_passed}/{total_tests}{'':11} "
|
|
f"{overall_success_rate:.1f}%")
|
|
|
|
print("\n" + "=" * 80)
|
|
|
|
# Recommendations
|
|
self._print_recommendations(overall_success_rate, total_tests)
|
|
|
|
def _print_recommendations(self, success_rate: float, total_tests: int):
|
|
"""Print recommendations based on test results"""
|
|
print(f"\n{Colors.colorize('📋 RECOMMENDATIONS', Colors.BOLD + Colors.CYAN)}")
|
|
|
|
if success_rate == 100 and total_tests > 0:
|
|
self._print_success("Excellent! All tests passed. Your auth service is ready for deployment.")
|
|
elif success_rate >= 90:
|
|
self._print_warning("Good test coverage. Review failed tests before deployment.")
|
|
elif success_rate >= 70:
|
|
self._print_warning("Moderate test coverage. Significant issues need fixing.")
|
|
else:
|
|
self._print_error("Poor test results. Major issues need addressing before deployment.")
|
|
|
|
# Specific recommendations
|
|
recommendations = []
|
|
|
|
if 'security_tests' in self.results:
|
|
security_metrics = self.results['security_tests']
|
|
if security_metrics.tests_failed > 0:
|
|
recommendations.append("🔒 Fix security test failures - critical for production")
|
|
|
|
if 'coverage_tests' in self.results:
|
|
coverage_metrics = self.results['coverage_tests']
|
|
if coverage_metrics.coverage_percentage < 80:
|
|
recommendations.append(f"📊 Increase test coverage (current: {coverage_metrics.coverage_percentage:.1f}%)")
|
|
|
|
if 'performance_tests' in self.results:
|
|
perf_metrics = self.results['performance_tests']
|
|
if perf_metrics.tests_failed > 0:
|
|
recommendations.append("⚡ Address performance issues")
|
|
|
|
if recommendations:
|
|
print("\n" + Colors.colorize("Next Steps:", Colors.BOLD))
|
|
for i, rec in enumerate(recommendations, 1):
|
|
print(f" {i}. {rec}")
|
|
|
|
def clean_test_artifacts(self) -> None:
|
|
"""Clean up test artifacts"""
|
|
self._print_step("Cleaning test artifacts", "🧹")
|
|
|
|
artifacts = [
|
|
".pytest_cache",
|
|
"htmlcov",
|
|
".coverage",
|
|
"coverage.xml",
|
|
"report.html",
|
|
"test-results.xml"
|
|
]
|
|
|
|
cleaned_count = 0
|
|
for artifact in artifacts:
|
|
artifact_path = self.project_root / artifact
|
|
if artifact_path.exists():
|
|
if artifact_path.is_dir():
|
|
import shutil
|
|
shutil.rmtree(artifact_path)
|
|
else:
|
|
artifact_path.unlink()
|
|
self._print_success(f"Removed {artifact}")
|
|
cleaned_count += 1
|
|
|
|
# Clean __pycache__ directories
|
|
pycache_count = 0
|
|
for pycache in self.project_root.rglob("__pycache__"):
|
|
import shutil
|
|
shutil.rmtree(pycache)
|
|
pycache_count += 1
|
|
|
|
# Clean .pyc files
|
|
pyc_count = 0
|
|
for pyc in self.project_root.rglob("*.pyc"):
|
|
pyc.unlink()
|
|
pyc_count += 1
|
|
|
|
if pycache_count > 0:
|
|
self._print_success(f"Removed {pycache_count} __pycache__ directories")
|
|
if pyc_count > 0:
|
|
self._print_success(f"Removed {pyc_count} .pyc files")
|
|
|
|
if cleaned_count == 0 and pycache_count == 0 and pyc_count == 0:
|
|
print(" 📁 No artifacts to clean")
|
|
else:
|
|
self._print_success("Test artifacts cleaned successfully")
|
|
|
|
def save_results_json(self, filename: str = "test_results.json") -> None:
|
|
"""Save test results to JSON file"""
|
|
results_data = {
|
|
"timestamp": datetime.now().isoformat(),
|
|
"test_categories": {}
|
|
}
|
|
|
|
for test_type, metrics in self.results.items():
|
|
results_data["test_categories"][test_type] = {
|
|
"duration": metrics.duration,
|
|
"tests_run": metrics.tests_run,
|
|
"tests_passed": metrics.tests_passed,
|
|
"tests_failed": metrics.tests_failed,
|
|
"tests_skipped": metrics.tests_skipped,
|
|
"success_rate": metrics.success_rate,
|
|
"coverage_percentage": metrics.coverage_percentage,
|
|
"warnings_count": metrics.warnings_count
|
|
}
|
|
|
|
with open(filename, 'w') as f:
|
|
json.dump(results_data, f, indent=2)
|
|
|
|
self._print_success(f"Test results saved to {filename}")
|
|
|
|
def main():
|
|
"""Main entry point for test runner"""
|
|
parser = argparse.ArgumentParser(
|
|
description="Auth Service Test Runner",
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
epilog="""
|
|
Examples:
|
|
python run_tests.py # Run all tests
|
|
python run_tests.py --test-type security # Run security tests only
|
|
python run_tests.py --coverage # Run with coverage
|
|
python run_tests.py --parallel --workers 4 # Run in parallel
|
|
python run_tests.py --pattern "test_login" # Run specific test pattern
|
|
python run_tests.py --validate # Validate environment
|
|
python run_tests.py --clean # Clean test artifacts
|
|
"""
|
|
)
|
|
|
|
parser.add_argument("--test-type",
|
|
choices=["all", "unit", "integration", "api", "security", "performance", "fast"],
|
|
default="all",
|
|
help="Type of tests to run")
|
|
parser.add_argument("--coverage", action="store_true", help="Run with coverage")
|
|
parser.add_argument("--parallel", action="store_true", help="Run tests in parallel")
|
|
parser.add_argument("--workers", type=int, help="Number of parallel workers")
|
|
parser.add_argument("--pattern", type=str, help="Run specific test pattern")
|
|
parser.add_argument("--validate", action="store_true", help="Validate test environment")
|
|
parser.add_argument("--clean", action="store_true", help="Clean test artifacts")
|
|
parser.add_argument("--verbose", action="store_true", default=True, help="Verbose output")
|
|
parser.add_argument("--save-results", action="store_true", help="Save results to JSON file")
|
|
parser.add_argument("--quiet", action="store_true", help="Quiet mode (less output)")
|
|
|
|
args = parser.parse_args()
|
|
|
|
runner = AuthTestRunner()
|
|
|
|
# Print header
|
|
if not args.quiet:
|
|
runner._print_header("🧪 AUTH SERVICE TEST RUNNER 🧪")
|
|
|
|
# Clean artifacts if requested
|
|
if args.clean:
|
|
runner.clean_test_artifacts()
|
|
return
|
|
|
|
# Validate environment if requested
|
|
if args.validate:
|
|
success = runner.validate_test_environment()
|
|
if success:
|
|
runner._print_success("Test environment validation passed")
|
|
else:
|
|
runner._print_error("Test environment validation failed")
|
|
sys.exit(0 if success else 1)
|
|
|
|
# Validate environment before running tests
|
|
if not args.quiet:
|
|
if not runner.validate_test_environment():
|
|
runner._print_error("Test environment validation failed")
|
|
sys.exit(1)
|
|
|
|
success = True
|
|
|
|
try:
|
|
runner.overall_metrics.start()
|
|
|
|
if args.pattern:
|
|
success = runner.run_specific_test(args.pattern)
|
|
elif args.coverage:
|
|
success = runner.run_coverage_tests()
|
|
elif args.parallel:
|
|
success = runner.run_parallel_tests(args.workers)
|
|
elif args.test_type == "unit":
|
|
success = runner.run_unit_tests()
|
|
elif args.test_type == "integration":
|
|
success = runner.run_integration_tests()
|
|
elif args.test_type == "api":
|
|
success = runner.run_api_tests()
|
|
elif args.test_type == "security":
|
|
success = runner.run_security_tests()
|
|
elif args.test_type == "performance":
|
|
success = runner.run_performance_tests()
|
|
elif args.test_type == "fast":
|
|
success = runner.run_fast_tests()
|
|
else: # all
|
|
success = runner.run_all_tests(args.verbose)
|
|
|
|
runner.overall_metrics.stop()
|
|
|
|
if not args.quiet:
|
|
runner.generate_test_report()
|
|
|
|
if args.save_results:
|
|
runner.save_results_json()
|
|
|
|
except KeyboardInterrupt:
|
|
runner._print_error("Tests interrupted by user")
|
|
success = False
|
|
except Exception as e:
|
|
runner._print_error(f"Error running tests: {e}")
|
|
success = False
|
|
|
|
if success:
|
|
if not args.quiet:
|
|
runner._print_success("All tests completed successfully!")
|
|
sys.exit(0)
|
|
else:
|
|
if not args.quiet:
|
|
runner._print_error("Some tests failed!")
|
|
sys.exit(1)
|
|
|
|
if __name__ == "__main__":
|
|
main() |