357 lines
No EOL
12 KiB
Python
Executable file
357 lines
No EOL
12 KiB
Python
Executable file
#!/usr/bin/env python3
|
|
"""
|
|
Test runner script with comprehensive test execution options.
|
|
|
|
This script provides different test execution modes for development,
|
|
CI/CD, and performance testing scenarios.
|
|
"""
|
|
import sys
|
|
import os
|
|
import subprocess
|
|
import argparse
|
|
from pathlib import Path
|
|
import time
|
|
|
|
# Add project root to path
|
|
project_root = Path(__file__).parent.parent
|
|
sys.path.insert(0, str(project_root))
|
|
|
|
|
|
class TestRunner:
|
|
"""Test runner with various execution modes."""
|
|
|
|
def __init__(self):
|
|
self.project_root = project_root
|
|
self.test_dir = self.project_root / "tests"
|
|
|
|
def run_unit_tests(self, verbose=False, coverage=True):
|
|
"""Run unit tests only."""
|
|
print("🧪 Running unit tests...")
|
|
|
|
cmd = [
|
|
"python", "-m", "pytest",
|
|
"tests/unit",
|
|
"-m", "unit",
|
|
"--tb=short"
|
|
]
|
|
|
|
if verbose:
|
|
cmd.append("-v")
|
|
|
|
if coverage:
|
|
cmd.extend([
|
|
"--cov=detector_worker",
|
|
"--cov-report=term-missing"
|
|
])
|
|
|
|
return self._run_command(cmd)
|
|
|
|
def run_integration_tests(self, verbose=False):
|
|
"""Run integration tests only."""
|
|
print("🔗 Running integration tests...")
|
|
|
|
cmd = [
|
|
"python", "-m", "pytest",
|
|
"tests/integration",
|
|
"-m", "integration",
|
|
"--tb=short"
|
|
]
|
|
|
|
if verbose:
|
|
cmd.append("-v")
|
|
|
|
return self._run_command(cmd)
|
|
|
|
def run_performance_tests(self, verbose=False):
|
|
"""Run performance benchmarks."""
|
|
print("🚀 Running performance tests...")
|
|
|
|
cmd = [
|
|
"python", "-m", "pytest",
|
|
"tests/performance",
|
|
"-m", "performance",
|
|
"--tb=short",
|
|
"--durations=0" # Show all durations
|
|
]
|
|
|
|
if verbose:
|
|
cmd.append("-v")
|
|
|
|
return self._run_command(cmd)
|
|
|
|
def run_all_tests(self, verbose=False, coverage=True, skip_slow=False):
|
|
"""Run all tests with full coverage."""
|
|
print("🧪 Running complete test suite...")
|
|
|
|
cmd = [
|
|
"python", "-m", "pytest",
|
|
"tests/",
|
|
"--tb=short",
|
|
"--durations=10"
|
|
]
|
|
|
|
if skip_slow:
|
|
cmd.extend(["-m", "not slow"])
|
|
|
|
if verbose:
|
|
cmd.append("-v")
|
|
|
|
if coverage:
|
|
cmd.extend([
|
|
"--cov=detector_worker",
|
|
"--cov-report=term-missing",
|
|
"--cov-report=html:htmlcov",
|
|
"--cov-report=xml:coverage.xml",
|
|
"--cov-fail-under=80"
|
|
])
|
|
|
|
return self._run_command(cmd)
|
|
|
|
def run_fast_tests(self, verbose=False):
|
|
"""Run only fast tests (unit tests, no slow markers)."""
|
|
print("⚡ Running fast tests only...")
|
|
|
|
cmd = [
|
|
"python", "-m", "pytest",
|
|
"tests/unit",
|
|
"-m", "unit and not slow",
|
|
"--tb=short"
|
|
]
|
|
|
|
if verbose:
|
|
cmd.append("-v")
|
|
|
|
return self._run_command(cmd)
|
|
|
|
def run_specific_test(self, test_pattern, verbose=False):
|
|
"""Run specific test(s) matching pattern."""
|
|
print(f"🎯 Running tests matching: {test_pattern}")
|
|
|
|
cmd = [
|
|
"python", "-m", "pytest",
|
|
"-k", test_pattern,
|
|
"--tb=short"
|
|
]
|
|
|
|
if verbose:
|
|
cmd.append("-v")
|
|
|
|
return self._run_command(cmd)
|
|
|
|
def run_failed_tests(self, verbose=False):
|
|
"""Rerun only failed tests from last run."""
|
|
print("🔄 Rerunning failed tests...")
|
|
|
|
cmd = [
|
|
"python", "-m", "pytest",
|
|
"--lf", # Last failed
|
|
"--tb=short"
|
|
]
|
|
|
|
if verbose:
|
|
cmd.append("-v")
|
|
|
|
return self._run_command(cmd)
|
|
|
|
def run_with_coverage_report(self, open_browser=False):
|
|
"""Run tests and generate detailed coverage report."""
|
|
print("📊 Running tests with detailed coverage analysis...")
|
|
|
|
cmd = [
|
|
"python", "-m", "pytest",
|
|
"tests/",
|
|
"-m", "not performance", # Skip performance tests for coverage
|
|
"--cov=detector_worker",
|
|
"--cov-report=html:htmlcov",
|
|
"--cov-report=xml:coverage.xml",
|
|
"--cov-report=term-missing",
|
|
"--cov-fail-under=80",
|
|
"--tb=short"
|
|
]
|
|
|
|
result = self._run_command(cmd)
|
|
|
|
if result == 0 and open_browser:
|
|
coverage_html = self.project_root / "htmlcov" / "index.html"
|
|
if coverage_html.exists():
|
|
import webbrowser
|
|
webbrowser.open(f"file://{coverage_html}")
|
|
print(f"📖 Coverage report opened in browser: {coverage_html}")
|
|
|
|
return result
|
|
|
|
def check_code_quality(self):
|
|
"""Run code quality checks (linting, formatting)."""
|
|
print("🔍 Running code quality checks...")
|
|
|
|
# Check if tools are available
|
|
tools = []
|
|
|
|
# Check for flake8
|
|
if self._command_exists("flake8"):
|
|
tools.append(("flake8", ["flake8", "detector_worker", "--max-line-length=120"]))
|
|
|
|
# Check for black
|
|
if self._command_exists("black"):
|
|
tools.append(("black", ["black", "--check", "--diff", "detector_worker"]))
|
|
|
|
# Check for isort
|
|
if self._command_exists("isort"):
|
|
tools.append(("isort", ["isort", "--check-only", "--diff", "detector_worker"]))
|
|
|
|
# Check for mypy
|
|
if self._command_exists("mypy"):
|
|
tools.append(("mypy", ["mypy", "detector_worker", "--ignore-missing-imports"]))
|
|
|
|
if not tools:
|
|
print("⚠️ No code quality tools found. Install flake8, black, isort, mypy for quality checks.")
|
|
return 0
|
|
|
|
all_passed = True
|
|
for tool_name, cmd in tools:
|
|
print(f" Running {tool_name}...")
|
|
result = self._run_command(cmd, capture_output=True)
|
|
if result != 0:
|
|
all_passed = False
|
|
print(f" ❌ {tool_name} failed")
|
|
else:
|
|
print(f" ✅ {tool_name} passed")
|
|
|
|
return 0 if all_passed else 1
|
|
|
|
def _run_command(self, cmd, capture_output=False):
|
|
"""Run a command and return exit code."""
|
|
try:
|
|
if capture_output:
|
|
result = subprocess.run(cmd, cwd=self.project_root, capture_output=True)
|
|
return result.returncode
|
|
else:
|
|
result = subprocess.run(cmd, cwd=self.project_root)
|
|
return result.returncode
|
|
except KeyboardInterrupt:
|
|
print("\n⚠️ Tests interrupted by user")
|
|
return 1
|
|
except Exception as e:
|
|
print(f"❌ Error running command: {e}")
|
|
return 1
|
|
|
|
def _command_exists(self, command):
|
|
"""Check if a command exists in PATH."""
|
|
try:
|
|
subprocess.run([command, "--version"], capture_output=True, check=True)
|
|
return True
|
|
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
return False
|
|
|
|
def print_test_summary(self):
|
|
"""Print test directory structure and available tests."""
|
|
print("\n📁 Test Directory Structure:")
|
|
print("├── tests/")
|
|
print("│ ├── unit/ # Fast, isolated unit tests")
|
|
print("│ ├── integration/ # Multi-component integration tests")
|
|
print("│ ├── performance/ # Performance benchmarks")
|
|
print("│ └── conftest.py # Shared fixtures and configuration")
|
|
print()
|
|
|
|
if self.test_dir.exists():
|
|
unit_tests = len(list((self.test_dir / "unit").rglob("test_*.py")))
|
|
integration_tests = len(list((self.test_dir / "integration").rglob("test_*.py")))
|
|
performance_tests = len(list((self.test_dir / "performance").rglob("test_*.py")))
|
|
|
|
print(f"📊 Test Counts:")
|
|
print(f" Unit tests: {unit_tests} files")
|
|
print(f" Integration tests: {integration_tests} files")
|
|
print(f" Performance tests: {performance_tests} files")
|
|
print(f" Total: {unit_tests + integration_tests + performance_tests} test files")
|
|
print()
|
|
|
|
|
|
def main():
|
|
"""Main entry point for test runner."""
|
|
parser = argparse.ArgumentParser(
|
|
description="Detector Worker Test Runner",
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
epilog="""
|
|
Examples:
|
|
%(prog)s --all # Run all tests with coverage
|
|
%(prog)s --unit # Run unit tests only
|
|
%(prog)s --integration # Run integration tests only
|
|
%(prog)s --performance # Run performance benchmarks
|
|
%(prog)s --fast # Run only fast tests
|
|
%(prog)s --failed # Rerun failed tests
|
|
%(prog)s --specific "test_config" # Run tests matching pattern
|
|
%(prog)s --coverage # Generate coverage report
|
|
%(prog)s --quality # Run code quality checks
|
|
"""
|
|
)
|
|
|
|
parser.add_argument("--all", action="store_true", help="Run all tests")
|
|
parser.add_argument("--unit", action="store_true", help="Run unit tests only")
|
|
parser.add_argument("--integration", action="store_true", help="Run integration tests only")
|
|
parser.add_argument("--performance", action="store_true", help="Run performance benchmarks")
|
|
parser.add_argument("--fast", action="store_true", help="Run fast tests only")
|
|
parser.add_argument("--failed", action="store_true", help="Rerun failed tests")
|
|
parser.add_argument("--specific", metavar="PATTERN", help="Run specific tests matching pattern")
|
|
parser.add_argument("--coverage", action="store_true", help="Generate coverage report")
|
|
parser.add_argument("--quality", action="store_true", help="Run code quality checks")
|
|
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
|
parser.add_argument("--no-coverage", action="store_true", help="Skip coverage reporting")
|
|
parser.add_argument("--skip-slow", action="store_true", help="Skip slow tests")
|
|
parser.add_argument("--open-browser", action="store_true", help="Open coverage report in browser")
|
|
parser.add_argument("--summary", action="store_true", help="Print test summary and exit")
|
|
|
|
args = parser.parse_args()
|
|
|
|
runner = TestRunner()
|
|
|
|
if args.summary:
|
|
runner.print_test_summary()
|
|
return 0
|
|
|
|
# If no specific test type specified, show help
|
|
if not any([args.all, args.unit, args.integration, args.performance,
|
|
args.fast, args.failed, args.specific, args.coverage, args.quality]):
|
|
parser.print_help()
|
|
print("\n💡 Use --summary to see available tests")
|
|
return 0
|
|
|
|
start_time = time.time()
|
|
exit_code = 0
|
|
|
|
try:
|
|
if args.quality:
|
|
exit_code = runner.check_code_quality()
|
|
elif args.unit:
|
|
exit_code = runner.run_unit_tests(args.verbose, not args.no_coverage)
|
|
elif args.integration:
|
|
exit_code = runner.run_integration_tests(args.verbose)
|
|
elif args.performance:
|
|
exit_code = runner.run_performance_tests(args.verbose)
|
|
elif args.fast:
|
|
exit_code = runner.run_fast_tests(args.verbose)
|
|
elif args.failed:
|
|
exit_code = runner.run_failed_tests(args.verbose)
|
|
elif args.specific:
|
|
exit_code = runner.run_specific_test(args.specific, args.verbose)
|
|
elif args.coverage:
|
|
exit_code = runner.run_with_coverage_report(args.open_browser)
|
|
elif args.all:
|
|
exit_code = runner.run_all_tests(args.verbose, not args.no_coverage, args.skip_slow)
|
|
|
|
end_time = time.time()
|
|
duration = end_time - start_time
|
|
|
|
if exit_code == 0:
|
|
print(f"\n✅ Tests completed successfully in {duration:.1f} seconds")
|
|
else:
|
|
print(f"\n❌ Tests failed in {duration:.1f} seconds")
|
|
|
|
except KeyboardInterrupt:
|
|
print("\n⚠️ Test execution interrupted")
|
|
exit_code = 1
|
|
|
|
return exit_code
|
|
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main()) |