Refactor: PHASE 8: Testing & Integration
This commit is contained in:
parent
af34f4fd08
commit
9e8c6804a7
32 changed files with 17128 additions and 0 deletions
19
tests/performance/__init__.py
Normal file
19
tests/performance/__init__.py
Normal file
|
@ -0,0 +1,19 @@
|
|||
"""
|
||||
Performance tests for the detector worker application.
|
||||
|
||||
This package contains performance benchmarks and load tests to ensure
|
||||
the application meets scalability and throughput requirements.
|
||||
"""
|
||||
|
||||
# Performance test modules
|
||||
from . import (
|
||||
test_detection_performance,
|
||||
test_websocket_performance,
|
||||
test_storage_performance
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"test_detection_performance",
|
||||
"test_websocket_performance",
|
||||
"test_storage_performance"
|
||||
]
|
672
tests/performance/test_detection_performance.py
Normal file
672
tests/performance/test_detection_performance.py
Normal file
|
@ -0,0 +1,672 @@
|
|||
"""
|
||||
Performance tests for detection pipeline components.
|
||||
|
||||
These tests benchmark the performance of key detection pipeline
|
||||
components to ensure they meet performance requirements.
|
||||
"""
|
||||
import pytest
|
||||
import time
|
||||
import asyncio
|
||||
import statistics
|
||||
from unittest.mock import Mock, patch
|
||||
import numpy as np
|
||||
import psutil
|
||||
import gc
|
||||
|
||||
from detector_worker.detection.yolo_detector import YOLODetector
|
||||
from detector_worker.detection.tracking_manager import TrackingManager
|
||||
from detector_worker.detection.stability_validator import StabilityValidator
|
||||
from detector_worker.pipeline.pipeline_executor import PipelineExecutor
|
||||
from detector_worker.models.model_manager import ModelManager
|
||||
from detector_worker.streams.stream_manager import StreamManager
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_frame():
|
||||
"""Create a sample frame for performance testing."""
|
||||
return np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def large_frame():
|
||||
"""Create a large frame for stress testing."""
|
||||
return np.random.randint(0, 255, (1080, 1920, 3), dtype=np.uint8)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def performance_config():
|
||||
"""Configuration for performance tests."""
|
||||
return {
|
||||
"target_fps": 30,
|
||||
"max_detection_time_ms": 100,
|
||||
"max_tracking_time_ms": 50,
|
||||
"max_pipeline_time_ms": 500,
|
||||
"memory_limit_mb": 1024
|
||||
}
|
||||
|
||||
|
||||
class TestDetectionPerformance:
|
||||
"""Test detection performance benchmarks."""
|
||||
|
||||
def test_yolo_detection_speed(self, sample_frame, performance_config):
|
||||
"""Benchmark YOLO detection speed."""
|
||||
|
||||
detector = YOLODetector()
|
||||
|
||||
with patch('torch.load') as mock_torch_load:
|
||||
# Setup fast mock model
|
||||
mock_model = Mock()
|
||||
mock_result = Mock()
|
||||
mock_result.boxes = Mock()
|
||||
mock_result.boxes.xyxy = Mock()
|
||||
mock_result.boxes.conf = Mock()
|
||||
mock_result.boxes.cls = Mock()
|
||||
mock_result.names = {0: "car", 1: "person"}
|
||||
|
||||
# Mock detection results
|
||||
mock_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([
|
||||
[100, 200, 300, 400],
|
||||
[150, 250, 350, 450]
|
||||
])
|
||||
mock_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.9, 0.8])
|
||||
mock_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0, 1])
|
||||
|
||||
mock_model.return_value = mock_result
|
||||
mock_torch_load.return_value = mock_model
|
||||
|
||||
# Warm up
|
||||
for _ in range(5):
|
||||
detector.detect(sample_frame, confidence_threshold=0.5)
|
||||
|
||||
# Benchmark detection speed
|
||||
detection_times = []
|
||||
num_iterations = 100
|
||||
|
||||
for _ in range(num_iterations):
|
||||
start_time = time.perf_counter()
|
||||
detections = detector.detect(sample_frame, confidence_threshold=0.5)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
detection_time_ms = (end_time - start_time) * 1000
|
||||
detection_times.append(detection_time_ms)
|
||||
|
||||
# Calculate statistics
|
||||
avg_detection_time = statistics.mean(detection_times)
|
||||
median_detection_time = statistics.median(detection_times)
|
||||
max_detection_time = max(detection_times)
|
||||
min_detection_time = min(detection_times)
|
||||
|
||||
# Performance assertions
|
||||
assert avg_detection_time < performance_config["max_detection_time_ms"]
|
||||
assert median_detection_time < performance_config["max_detection_time_ms"]
|
||||
|
||||
# Calculate theoretical FPS
|
||||
theoretical_fps = 1000 / avg_detection_time
|
||||
assert theoretical_fps >= performance_config["target_fps"]
|
||||
|
||||
print(f"\nDetection Performance Metrics:")
|
||||
print(f"Average detection time: {avg_detection_time:.2f} ms")
|
||||
print(f"Median detection time: {median_detection_time:.2f} ms")
|
||||
print(f"Min detection time: {min_detection_time:.2f} ms")
|
||||
print(f"Max detection time: {max_detection_time:.2f} ms")
|
||||
print(f"Theoretical FPS: {theoretical_fps:.1f}")
|
||||
|
||||
def test_tracking_performance(self, sample_frame, performance_config):
|
||||
"""Benchmark object tracking performance."""
|
||||
|
||||
tracking_manager = TrackingManager()
|
||||
|
||||
# Create mock detections
|
||||
detections = [
|
||||
{"class": "car", "confidence": 0.9, "bbox": [100, 200, 300, 400]},
|
||||
{"class": "car", "confidence": 0.8, "bbox": [150, 250, 350, 450]},
|
||||
{"class": "person", "confidence": 0.7, "bbox": [200, 300, 250, 400]}
|
||||
]
|
||||
|
||||
# Warm up tracking
|
||||
for i in range(10):
|
||||
tracking_manager.update_tracks(detections, frame_id=i)
|
||||
|
||||
# Benchmark tracking speed
|
||||
tracking_times = []
|
||||
num_iterations = 100
|
||||
|
||||
for i in range(num_iterations):
|
||||
# Simulate moving detections
|
||||
moving_detections = []
|
||||
for det in detections:
|
||||
moved_det = det.copy()
|
||||
# Add small random movement
|
||||
bbox = moved_det["bbox"]
|
||||
moved_det["bbox"] = [
|
||||
bbox[0] + np.random.randint(-5, 5),
|
||||
bbox[1] + np.random.randint(-5, 5),
|
||||
bbox[2] + np.random.randint(-5, 5),
|
||||
bbox[3] + np.random.randint(-5, 5)
|
||||
]
|
||||
moving_detections.append(moved_det)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
tracks = tracking_manager.update_tracks(moving_detections, frame_id=i + 10)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
tracking_time_ms = (end_time - start_time) * 1000
|
||||
tracking_times.append(tracking_time_ms)
|
||||
|
||||
# Calculate statistics
|
||||
avg_tracking_time = statistics.mean(tracking_times)
|
||||
max_tracking_time = max(tracking_times)
|
||||
|
||||
# Performance assertions
|
||||
assert avg_tracking_time < performance_config["max_tracking_time_ms"]
|
||||
assert max_tracking_time < performance_config["max_tracking_time_ms"] * 2
|
||||
|
||||
print(f"\nTracking Performance Metrics:")
|
||||
print(f"Average tracking time: {avg_tracking_time:.2f} ms")
|
||||
print(f"Max tracking time: {max_tracking_time:.2f} ms")
|
||||
|
||||
def test_stability_validation_performance(self, performance_config):
|
||||
"""Benchmark stability validation performance."""
|
||||
|
||||
validator = StabilityValidator()
|
||||
|
||||
# Create stable detections sequence
|
||||
base_detection = {
|
||||
"class": "car",
|
||||
"confidence": 0.9,
|
||||
"bbox": [100, 200, 300, 400],
|
||||
"track_id": 1001
|
||||
}
|
||||
|
||||
# Add sequence of stable detections
|
||||
for i in range(20):
|
||||
detection = base_detection.copy()
|
||||
# Add small variations to simulate real detection noise
|
||||
detection["confidence"] = 0.9 + np.random.normal(0, 0.02)
|
||||
bbox = detection["bbox"]
|
||||
detection["bbox"] = [
|
||||
bbox[0] + np.random.normal(0, 2),
|
||||
bbox[1] + np.random.normal(0, 2),
|
||||
bbox[2] + np.random.normal(0, 2),
|
||||
bbox[3] + np.random.normal(0, 2)
|
||||
]
|
||||
|
||||
validator.add_detection(detection, frame_id=i)
|
||||
|
||||
# Benchmark validation performance
|
||||
validation_times = []
|
||||
num_iterations = 1000
|
||||
|
||||
for i in range(num_iterations):
|
||||
test_detection = base_detection.copy()
|
||||
test_detection["confidence"] = 0.85 + np.random.normal(0, 0.05)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
is_stable = validator.is_detection_stable(
|
||||
test_detection,
|
||||
stability_frames=10,
|
||||
confidence_threshold=0.8
|
||||
)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
validation_time_ms = (end_time - start_time) * 1000
|
||||
validation_times.append(validation_time_ms)
|
||||
|
||||
avg_validation_time = statistics.mean(validation_times)
|
||||
max_validation_time = max(validation_times)
|
||||
|
||||
# Should be very fast (< 1ms typically)
|
||||
assert avg_validation_time < 1.0
|
||||
assert max_validation_time < 5.0
|
||||
|
||||
print(f"\nStability Validation Performance Metrics:")
|
||||
print(f"Average validation time: {avg_validation_time:.3f} ms")
|
||||
print(f"Max validation time: {max_validation_time:.3f} ms")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pipeline_executor_performance(self, sample_frame, performance_config):
|
||||
"""Benchmark complete pipeline execution performance."""
|
||||
|
||||
pipeline_executor = PipelineExecutor()
|
||||
|
||||
# Simple pipeline configuration
|
||||
pipeline_config = {
|
||||
"modelId": "fast_detection_model",
|
||||
"modelFile": "fast_model.pt",
|
||||
"expectedClasses": ["car"],
|
||||
"minConfidence": 0.5,
|
||||
"actions": [],
|
||||
"branches": []
|
||||
}
|
||||
|
||||
detection_context = {
|
||||
"camera_id": "perf_camera",
|
||||
"display_id": "perf_display",
|
||||
"frame": sample_frame,
|
||||
"timestamp": int(time.time() * 1000),
|
||||
"session_id": "perf_session"
|
||||
}
|
||||
|
||||
with patch('torch.load') as mock_torch_load, \
|
||||
patch('os.path.exists', return_value=True):
|
||||
|
||||
# Setup fast mock model
|
||||
mock_model = Mock()
|
||||
mock_result = Mock()
|
||||
mock_result.boxes = Mock()
|
||||
mock_result.boxes.xyxy = Mock()
|
||||
mock_result.boxes.conf = Mock()
|
||||
mock_result.boxes.cls = Mock()
|
||||
mock_result.names = {0: "car"}
|
||||
|
||||
mock_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([[100, 200, 300, 400]])
|
||||
mock_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.9])
|
||||
mock_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0])
|
||||
|
||||
mock_model.return_value = mock_result
|
||||
mock_torch_load.return_value = mock_model
|
||||
|
||||
# Warm up
|
||||
for _ in range(3):
|
||||
await pipeline_executor.execute_pipeline(pipeline_config, detection_context)
|
||||
|
||||
# Benchmark pipeline execution
|
||||
pipeline_times = []
|
||||
num_iterations = 50
|
||||
|
||||
for _ in range(num_iterations):
|
||||
start_time = time.perf_counter()
|
||||
result = await pipeline_executor.execute_pipeline(pipeline_config, detection_context)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
pipeline_time_ms = (end_time - start_time) * 1000
|
||||
pipeline_times.append(pipeline_time_ms)
|
||||
|
||||
# Ensure result is valid
|
||||
assert result is not None
|
||||
|
||||
avg_pipeline_time = statistics.mean(pipeline_times)
|
||||
max_pipeline_time = max(pipeline_times)
|
||||
|
||||
# Performance assertions
|
||||
assert avg_pipeline_time < performance_config["max_pipeline_time_ms"]
|
||||
|
||||
print(f"\nPipeline Execution Performance Metrics:")
|
||||
print(f"Average pipeline time: {avg_pipeline_time:.2f} ms")
|
||||
print(f"Max pipeline time: {max_pipeline_time:.2f} ms")
|
||||
|
||||
def test_memory_usage_detection(self, sample_frame, performance_config):
|
||||
"""Test memory usage during detection operations."""
|
||||
|
||||
detector = YOLODetector()
|
||||
|
||||
with patch('torch.load') as mock_torch_load:
|
||||
# Setup mock model
|
||||
mock_model = Mock()
|
||||
mock_result = Mock()
|
||||
mock_result.boxes = Mock()
|
||||
mock_result.boxes.xyxy = Mock()
|
||||
mock_result.boxes.conf = Mock()
|
||||
mock_result.boxes.cls = Mock()
|
||||
mock_result.names = {0: "car"}
|
||||
|
||||
mock_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([[100, 200, 300, 400]])
|
||||
mock_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.9])
|
||||
mock_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0])
|
||||
|
||||
mock_model.return_value = mock_result
|
||||
mock_torch_load.return_value = mock_model
|
||||
|
||||
# Measure memory usage
|
||||
gc.collect() # Clean up before measurement
|
||||
initial_memory = psutil.Process().memory_info().rss / 1024 / 1024 # MB
|
||||
|
||||
# Run detections and monitor memory
|
||||
memory_measurements = []
|
||||
for i in range(100):
|
||||
detections = detector.detect(sample_frame, confidence_threshold=0.5)
|
||||
|
||||
if i % 10 == 0: # Measure every 10 iterations
|
||||
current_memory = psutil.Process().memory_info().rss / 1024 / 1024
|
||||
memory_measurements.append(current_memory - initial_memory)
|
||||
|
||||
# Final memory measurement
|
||||
gc.collect()
|
||||
final_memory = psutil.Process().memory_info().rss / 1024 / 1024
|
||||
memory_increase = final_memory - initial_memory
|
||||
|
||||
# Memory should not grow significantly
|
||||
assert memory_increase < 100 # Less than 100MB increase
|
||||
|
||||
# Memory should be relatively stable (not constantly growing)
|
||||
if len(memory_measurements) > 1:
|
||||
memory_trend = memory_measurements[-1] - memory_measurements[0]
|
||||
assert memory_trend < 50 # Less than 50MB trend growth
|
||||
|
||||
print(f"\nMemory Usage Metrics:")
|
||||
print(f"Initial memory: {initial_memory:.1f} MB")
|
||||
print(f"Final memory: {final_memory:.1f} MB")
|
||||
print(f"Memory increase: {memory_increase:.1f} MB")
|
||||
|
||||
def test_concurrent_detection_performance(self, sample_frame):
|
||||
"""Test performance with concurrent detection operations."""
|
||||
|
||||
with patch('torch.load') as mock_torch_load:
|
||||
# Setup mock model
|
||||
mock_model = Mock()
|
||||
mock_result = Mock()
|
||||
mock_result.boxes = Mock()
|
||||
mock_result.boxes.xyxy = Mock()
|
||||
mock_result.boxes.conf = Mock()
|
||||
mock_result.boxes.cls = Mock()
|
||||
mock_result.names = {0: "car"}
|
||||
|
||||
mock_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([[100, 200, 300, 400]])
|
||||
mock_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.9])
|
||||
mock_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0])
|
||||
|
||||
mock_model.return_value = mock_result
|
||||
mock_torch_load.return_value = mock_model
|
||||
|
||||
# Create multiple detectors
|
||||
detectors = [YOLODetector() for _ in range(4)]
|
||||
|
||||
import threading
|
||||
import concurrent.futures
|
||||
|
||||
def run_detection(detector, frame, iterations=25):
|
||||
"""Run detection iterations."""
|
||||
times = []
|
||||
for _ in range(iterations):
|
||||
start_time = time.perf_counter()
|
||||
detections = detector.detect(frame, confidence_threshold=0.5)
|
||||
end_time = time.perf_counter()
|
||||
times.append((end_time - start_time) * 1000)
|
||||
return times
|
||||
|
||||
# Run concurrent detections
|
||||
start_time = time.perf_counter()
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
|
||||
futures = [
|
||||
executor.submit(run_detection, detector, sample_frame)
|
||||
for detector in detectors
|
||||
]
|
||||
|
||||
results = [future.result() for future in concurrent.futures.as_completed(futures)]
|
||||
|
||||
end_time = time.perf_counter()
|
||||
total_time = end_time - start_time
|
||||
|
||||
# Analyze results
|
||||
all_times = [time_ms for result in results for time_ms in result]
|
||||
total_detections = len(all_times)
|
||||
avg_detection_time = statistics.mean(all_times)
|
||||
|
||||
# Calculate effective throughput
|
||||
effective_fps = total_detections / total_time
|
||||
|
||||
print(f"\nConcurrent Detection Performance:")
|
||||
print(f"Total detections: {total_detections}")
|
||||
print(f"Total time: {total_time:.2f} seconds")
|
||||
print(f"Average detection time: {avg_detection_time:.2f} ms")
|
||||
print(f"Effective throughput: {effective_fps:.1f} FPS")
|
||||
|
||||
# Should maintain reasonable performance under load
|
||||
assert avg_detection_time < 200 # Less than 200ms average
|
||||
assert effective_fps > 20 # More than 20 effective FPS
|
||||
|
||||
def test_large_frame_performance(self, large_frame):
|
||||
"""Test detection performance with large frames."""
|
||||
|
||||
detector = YOLODetector()
|
||||
|
||||
with patch('torch.load') as mock_torch_load:
|
||||
# Setup mock model
|
||||
mock_model = Mock()
|
||||
mock_result = Mock()
|
||||
mock_result.boxes = Mock()
|
||||
mock_result.boxes.xyxy = Mock()
|
||||
mock_result.boxes.conf = Mock()
|
||||
mock_result.boxes.cls = Mock()
|
||||
mock_result.names = {0: "car", 1: "person"}
|
||||
|
||||
# Larger frame might have more detections
|
||||
mock_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([
|
||||
[100, 200, 300, 400],
|
||||
[500, 600, 700, 800],
|
||||
[1000, 200, 1200, 400]
|
||||
])
|
||||
mock_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.9, 0.8, 0.7])
|
||||
mock_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0, 1, 0])
|
||||
|
||||
mock_model.return_value = mock_result
|
||||
mock_torch_load.return_value = mock_model
|
||||
|
||||
# Benchmark large frame detection
|
||||
detection_times = []
|
||||
num_iterations = 20 # Fewer iterations for large frames
|
||||
|
||||
for _ in range(num_iterations):
|
||||
start_time = time.perf_counter()
|
||||
detections = detector.detect(large_frame, confidence_threshold=0.5)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
detection_time_ms = (end_time - start_time) * 1000
|
||||
detection_times.append(detection_time_ms)
|
||||
|
||||
avg_detection_time = statistics.mean(detection_times)
|
||||
max_detection_time = max(detection_times)
|
||||
|
||||
print(f"\nLarge Frame Detection Performance:")
|
||||
print(f"Frame size: {large_frame.shape}")
|
||||
print(f"Average detection time: {avg_detection_time:.2f} ms")
|
||||
print(f"Max detection time: {max_detection_time:.2f} ms")
|
||||
|
||||
# Large frames should still be processed in reasonable time
|
||||
assert avg_detection_time < 300 # Less than 300ms for large frames
|
||||
assert max_detection_time < 500 # Less than 500ms max
|
||||
|
||||
|
||||
class TestStreamPerformance:
|
||||
"""Test stream management performance."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stream_creation_performance(self):
|
||||
"""Test performance of stream creation and management."""
|
||||
|
||||
stream_manager = StreamManager()
|
||||
|
||||
with patch('cv2.VideoCapture') as mock_video_cap:
|
||||
# Setup fast mock
|
||||
mock_cap_instance = Mock()
|
||||
mock_video_cap.return_value = mock_cap_instance
|
||||
mock_cap_instance.isOpened.return_value = True
|
||||
mock_cap_instance.read.return_value = (True, np.ones((480, 640, 3), dtype=np.uint8))
|
||||
|
||||
# Benchmark stream creation
|
||||
creation_times = []
|
||||
num_streams = 20
|
||||
|
||||
try:
|
||||
for i in range(num_streams):
|
||||
from detector_worker.streams.stream_manager import StreamConfig
|
||||
config = StreamConfig(
|
||||
stream_url=f"rtsp://test{i}.example.com/stream",
|
||||
stream_type="rtsp"
|
||||
)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
await stream_manager.create_stream(f"camera_{i}", config, f"sub_{i}")
|
||||
end_time = time.perf_counter()
|
||||
|
||||
creation_time_ms = (end_time - start_time) * 1000
|
||||
creation_times.append(creation_time_ms)
|
||||
|
||||
avg_creation_time = statistics.mean(creation_times)
|
||||
max_creation_time = max(creation_times)
|
||||
|
||||
# Stream creation should be fast
|
||||
assert avg_creation_time < 100 # Less than 100ms average
|
||||
assert max_creation_time < 500 # Less than 500ms max
|
||||
|
||||
print(f"\nStream Creation Performance:")
|
||||
print(f"Streams created: {num_streams}")
|
||||
print(f"Average creation time: {avg_creation_time:.2f} ms")
|
||||
print(f"Max creation time: {max_creation_time:.2f} ms")
|
||||
|
||||
finally:
|
||||
await stream_manager.stop_all_streams()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_frame_retrieval_performance(self, sample_frame):
|
||||
"""Test performance of frame retrieval operations."""
|
||||
|
||||
stream_manager = StreamManager()
|
||||
|
||||
with patch('cv2.VideoCapture') as mock_video_cap:
|
||||
mock_cap_instance = Mock()
|
||||
mock_video_cap.return_value = mock_cap_instance
|
||||
mock_cap_instance.isOpened.return_value = True
|
||||
mock_cap_instance.read.return_value = (True, sample_frame)
|
||||
|
||||
try:
|
||||
# Create test stream
|
||||
from detector_worker.streams.stream_manager import StreamConfig
|
||||
config = StreamConfig(
|
||||
stream_url="rtsp://perf.example.com/stream",
|
||||
stream_type="rtsp"
|
||||
)
|
||||
|
||||
await stream_manager.create_stream("perf_camera", config, "perf_sub")
|
||||
|
||||
# Let stream capture some frames
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
# Benchmark frame retrieval
|
||||
retrieval_times = []
|
||||
num_retrievals = 1000
|
||||
|
||||
for _ in range(num_retrievals):
|
||||
start_time = time.perf_counter()
|
||||
frame = stream_manager.get_latest_frame("perf_camera")
|
||||
end_time = time.perf_counter()
|
||||
|
||||
retrieval_time_ms = (end_time - start_time) * 1000
|
||||
retrieval_times.append(retrieval_time_ms)
|
||||
|
||||
avg_retrieval_time = statistics.mean(retrieval_times)
|
||||
max_retrieval_time = max(retrieval_times)
|
||||
|
||||
# Frame retrieval should be very fast
|
||||
assert avg_retrieval_time < 1.0 # Less than 1ms average
|
||||
assert max_retrieval_time < 10.0 # Less than 10ms max
|
||||
|
||||
print(f"\nFrame Retrieval Performance:")
|
||||
print(f"Frame retrievals: {num_retrievals}")
|
||||
print(f"Average retrieval time: {avg_retrieval_time:.3f} ms")
|
||||
print(f"Max retrieval time: {max_retrieval_time:.3f} ms")
|
||||
|
||||
finally:
|
||||
await stream_manager.stop_all_streams()
|
||||
|
||||
|
||||
class TestModelPerformance:
|
||||
"""Test model management performance."""
|
||||
|
||||
def test_model_loading_performance(self):
|
||||
"""Test performance of model loading operations."""
|
||||
|
||||
model_manager = ModelManager()
|
||||
|
||||
with patch('torch.load') as mock_torch_load, \
|
||||
patch('os.path.exists', return_value=True):
|
||||
|
||||
# Setup mock model
|
||||
def create_mock_model():
|
||||
model = Mock()
|
||||
# Mock model parameters for memory estimation
|
||||
param = Mock()
|
||||
param.numel.return_value = 1000000 # 1M parameters
|
||||
param.element_size.return_value = 4 # 4 bytes each
|
||||
model.parameters.return_value = [param]
|
||||
return model
|
||||
|
||||
mock_torch_load.side_effect = lambda *args, **kwargs: create_mock_model()
|
||||
|
||||
# Benchmark model loading
|
||||
loading_times = []
|
||||
num_models = 10
|
||||
|
||||
for i in range(num_models):
|
||||
from detector_worker.models.model_manager import ModelConfig
|
||||
config = ModelConfig(
|
||||
model_id=f"perf_model_{i}",
|
||||
model_path=f"/fake/path/model_{i}.pt",
|
||||
model_type="detection",
|
||||
device="cpu"
|
||||
)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
model = model_manager.load_model(config)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
loading_time_ms = (end_time - start_time) * 1000
|
||||
loading_times.append(loading_time_ms)
|
||||
|
||||
avg_loading_time = statistics.mean(loading_times)
|
||||
max_loading_time = max(loading_times)
|
||||
|
||||
print(f"\nModel Loading Performance:")
|
||||
print(f"Models loaded: {num_models}")
|
||||
print(f"Average loading time: {avg_loading_time:.2f} ms")
|
||||
print(f"Max loading time: {max_loading_time:.2f} ms")
|
||||
|
||||
# Model loading should be reasonable
|
||||
assert avg_loading_time < 200 # Less than 200ms average
|
||||
|
||||
def test_model_cache_performance(self):
|
||||
"""Test performance of model cache operations."""
|
||||
|
||||
model_manager = ModelManager()
|
||||
|
||||
with patch('torch.load') as mock_torch_load, \
|
||||
patch('os.path.exists', return_value=True):
|
||||
|
||||
mock_torch_load.return_value = Mock()
|
||||
|
||||
# Load model first
|
||||
from detector_worker.models.model_manager import ModelConfig
|
||||
config = ModelConfig(
|
||||
model_id="cache_perf_model",
|
||||
model_path="/fake/path/model.pt",
|
||||
model_type="detection",
|
||||
device="cpu"
|
||||
)
|
||||
|
||||
# Initial load
|
||||
model_manager.load_model(config)
|
||||
|
||||
# Benchmark cache retrieval
|
||||
cache_times = []
|
||||
num_retrievals = 10000
|
||||
|
||||
for _ in range(num_retrievals):
|
||||
start_time = time.perf_counter()
|
||||
model = model_manager.get_model("cache_perf_model")
|
||||
end_time = time.perf_counter()
|
||||
|
||||
cache_time_ms = (end_time - start_time) * 1000
|
||||
cache_times.append(cache_time_ms)
|
||||
|
||||
avg_cache_time = statistics.mean(cache_times)
|
||||
max_cache_time = max(cache_times)
|
||||
|
||||
print(f"\nModel Cache Performance:")
|
||||
print(f"Cache retrievals: {num_retrievals}")
|
||||
print(f"Average cache time: {avg_cache_time:.4f} ms")
|
||||
print(f"Max cache time: {max_cache_time:.4f} ms")
|
||||
|
||||
# Cache should be very fast
|
||||
assert avg_cache_time < 0.1 # Less than 0.1ms average
|
||||
assert max_cache_time < 1.0 # Less than 1ms max
|
828
tests/performance/test_storage_performance.py
Normal file
828
tests/performance/test_storage_performance.py
Normal file
|
@ -0,0 +1,828 @@
|
|||
"""
|
||||
Performance tests for storage components (database, Redis, session cache).
|
||||
|
||||
These tests benchmark storage operations to ensure they meet
|
||||
performance requirements for high-throughput scenarios.
|
||||
"""
|
||||
import pytest
|
||||
import asyncio
|
||||
import time
|
||||
import statistics
|
||||
import uuid
|
||||
from unittest.mock import Mock, patch, MagicMock
|
||||
import psutil
|
||||
import gc
|
||||
import numpy as np
|
||||
|
||||
from detector_worker.storage.database_manager import DatabaseManager
|
||||
from detector_worker.storage.redis_client import RedisClient, RedisConfig
|
||||
from detector_worker.storage.session_cache import SessionCacheManager, SessionCache, CacheConfig
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def performance_config():
|
||||
"""Configuration for performance tests."""
|
||||
return {
|
||||
"max_db_query_time_ms": 50,
|
||||
"max_redis_operation_time_ms": 10,
|
||||
"max_cache_operation_time_ms": 1,
|
||||
"min_db_throughput_ops_per_sec": 1000,
|
||||
"min_redis_throughput_ops_per_sec": 5000,
|
||||
"min_cache_throughput_ops_per_sec": 10000
|
||||
}
|
||||
|
||||
|
||||
class TestDatabasePerformance:
|
||||
"""Test database performance benchmarks."""
|
||||
|
||||
def test_database_connection_performance(self, performance_config):
|
||||
"""Test database connection establishment performance."""
|
||||
|
||||
with patch('psycopg2.connect') as mock_connect:
|
||||
# Setup mock connection
|
||||
mock_conn = Mock()
|
||||
mock_cursor = Mock()
|
||||
mock_conn.cursor.return_value = mock_cursor
|
||||
mock_connect.return_value = mock_conn
|
||||
|
||||
db_manager = DatabaseManager()
|
||||
|
||||
# Benchmark connection times
|
||||
connection_times = []
|
||||
num_connections = 100
|
||||
|
||||
for _ in range(num_connections):
|
||||
start_time = time.perf_counter()
|
||||
db_manager.connect()
|
||||
end_time = time.perf_counter()
|
||||
|
||||
connection_time_ms = (end_time - start_time) * 1000
|
||||
connection_times.append(connection_time_ms)
|
||||
|
||||
# Disconnect for next test
|
||||
db_manager.disconnect()
|
||||
|
||||
avg_connection_time = statistics.mean(connection_times)
|
||||
max_connection_time = max(connection_times)
|
||||
|
||||
print(f"\nDatabase Connection Performance:")
|
||||
print(f"Connections: {num_connections}")
|
||||
print(f"Average connection time: {avg_connection_time:.2f} ms")
|
||||
print(f"Max connection time: {max_connection_time:.2f} ms")
|
||||
|
||||
# Connection should be fast
|
||||
assert avg_connection_time < 10.0 # Less than 10ms average
|
||||
assert max_connection_time < 50.0 # Less than 50ms max
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_database_insert_performance(self, performance_config):
|
||||
"""Test database insert performance."""
|
||||
|
||||
with patch('psycopg2.connect') as mock_connect:
|
||||
# Setup mock database
|
||||
mock_conn = Mock()
|
||||
mock_cursor = Mock()
|
||||
mock_conn.cursor.return_value = mock_cursor
|
||||
mock_connect.return_value = mock_conn
|
||||
|
||||
db_manager = DatabaseManager()
|
||||
db_manager.connect()
|
||||
|
||||
# Prepare test data
|
||||
table_name = "car_frontal_info"
|
||||
test_records = [
|
||||
{
|
||||
"display_id": f"display_{i}",
|
||||
"captured_timestamp": str(int(time.time() * 1000) + i),
|
||||
"session_id": str(uuid.uuid4()),
|
||||
"license_character": None,
|
||||
"license_type": "No model available"
|
||||
}
|
||||
for i in range(1000)
|
||||
]
|
||||
|
||||
# Benchmark single inserts
|
||||
insert_times = []
|
||||
|
||||
for record in test_records[:100]: # Test first 100 for individual timing
|
||||
start_time = time.perf_counter()
|
||||
await db_manager.create_record(table_name, record)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
insert_time_ms = (end_time - start_time) * 1000
|
||||
insert_times.append(insert_time_ms)
|
||||
|
||||
# Benchmark batch insert
|
||||
start_time = time.perf_counter()
|
||||
for record in test_records[100:]:
|
||||
await db_manager.create_record(table_name, record)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
batch_time = end_time - start_time
|
||||
batch_throughput = 900 / batch_time # 900 records in batch
|
||||
|
||||
avg_insert_time = statistics.mean(insert_times)
|
||||
max_insert_time = max(insert_times)
|
||||
|
||||
print(f"\nDatabase Insert Performance:")
|
||||
print(f"Average insert time: {avg_insert_time:.2f} ms")
|
||||
print(f"Max insert time: {max_insert_time:.2f} ms")
|
||||
print(f"Batch throughput: {batch_throughput:.0f} inserts/second")
|
||||
|
||||
assert avg_insert_time < performance_config["max_db_query_time_ms"]
|
||||
assert batch_throughput > performance_config["min_db_throughput_ops_per_sec"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_database_update_performance(self, performance_config):
|
||||
"""Test database update performance."""
|
||||
|
||||
with patch('psycopg2.connect') as mock_connect:
|
||||
# Setup mock database
|
||||
mock_conn = Mock()
|
||||
mock_cursor = Mock()
|
||||
mock_conn.cursor.return_value = mock_cursor
|
||||
mock_connect.return_value = mock_conn
|
||||
|
||||
db_manager = DatabaseManager()
|
||||
db_manager.connect()
|
||||
|
||||
table_name = "car_frontal_info"
|
||||
session_ids = [str(uuid.uuid4()) for _ in range(1000)]
|
||||
|
||||
# Benchmark updates
|
||||
update_times = []
|
||||
|
||||
for session_id in session_ids[:100]: # Test first 100 for individual timing
|
||||
update_data = {
|
||||
"car_brand": "Toyota",
|
||||
"car_body_type": "Sedan",
|
||||
"updated_at": "NOW()"
|
||||
}
|
||||
|
||||
start_time = time.perf_counter()
|
||||
await db_manager.update_record(table_name, session_id, update_data, key_field="session_id")
|
||||
end_time = time.perf_counter()
|
||||
|
||||
update_time_ms = (end_time - start_time) * 1000
|
||||
update_times.append(update_time_ms)
|
||||
|
||||
# Benchmark batch updates
|
||||
start_time = time.perf_counter()
|
||||
for session_id in session_ids[100:]:
|
||||
update_data = {
|
||||
"car_brand": "Honda",
|
||||
"car_body_type": "Hatchback"
|
||||
}
|
||||
await db_manager.update_record(table_name, session_id, update_data, key_field="session_id")
|
||||
end_time = time.perf_counter()
|
||||
|
||||
batch_time = end_time - start_time
|
||||
batch_throughput = 900 / batch_time
|
||||
|
||||
avg_update_time = statistics.mean(update_times)
|
||||
max_update_time = max(update_times)
|
||||
|
||||
print(f"\nDatabase Update Performance:")
|
||||
print(f"Average update time: {avg_update_time:.2f} ms")
|
||||
print(f"Max update time: {max_update_time:.2f} ms")
|
||||
print(f"Batch throughput: {batch_throughput:.0f} updates/second")
|
||||
|
||||
assert avg_update_time < performance_config["max_db_query_time_ms"]
|
||||
assert batch_throughput > performance_config["min_db_throughput_ops_per_sec"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_database_query_performance(self, performance_config):
|
||||
"""Test database query performance."""
|
||||
|
||||
with patch('psycopg2.connect') as mock_connect:
|
||||
# Setup mock database
|
||||
mock_conn = Mock()
|
||||
mock_cursor = Mock()
|
||||
mock_conn.cursor.return_value = mock_cursor
|
||||
|
||||
# Mock query results
|
||||
mock_cursor.fetchone.return_value = ("display_1", "1640995200", "session_123", None, "No model", "Toyota", "Sedan")
|
||||
mock_cursor.fetchall.return_value = [
|
||||
("display_1", "1640995200", "session_123", None, "No model", "Toyota", "Sedan"),
|
||||
("display_2", "1640995201", "session_124", None, "No model", "Honda", "Hatchback")
|
||||
]
|
||||
|
||||
mock_connect.return_value = mock_conn
|
||||
|
||||
db_manager = DatabaseManager()
|
||||
db_manager.connect()
|
||||
|
||||
table_name = "car_frontal_info"
|
||||
|
||||
# Benchmark single record queries
|
||||
query_times = []
|
||||
num_queries = 1000
|
||||
|
||||
for i in range(num_queries):
|
||||
session_id = f"session_{i}"
|
||||
|
||||
start_time = time.perf_counter()
|
||||
result = await db_manager.get_record(table_name, session_id, key_field="session_id")
|
||||
end_time = time.perf_counter()
|
||||
|
||||
query_time_ms = (end_time - start_time) * 1000
|
||||
query_times.append(query_time_ms)
|
||||
|
||||
avg_query_time = statistics.mean(query_times)
|
||||
max_query_time = max(query_times)
|
||||
query_throughput = num_queries / (sum(query_times) / 1000)
|
||||
|
||||
print(f"\nDatabase Query Performance:")
|
||||
print(f"Queries: {num_queries}")
|
||||
print(f"Average query time: {avg_query_time:.2f} ms")
|
||||
print(f"Max query time: {max_query_time:.2f} ms")
|
||||
print(f"Query throughput: {query_throughput:.0f} queries/second")
|
||||
|
||||
assert avg_query_time < performance_config["max_db_query_time_ms"]
|
||||
assert query_throughput > performance_config["min_db_throughput_ops_per_sec"]
|
||||
|
||||
|
||||
class TestRedisPerformance:
|
||||
"""Test Redis client performance benchmarks."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_redis_connection_performance(self):
|
||||
"""Test Redis connection performance."""
|
||||
|
||||
with patch('redis.Redis') as mock_redis_class, \
|
||||
patch('redis.ConnectionPool') as mock_pool_class:
|
||||
|
||||
mock_redis = Mock()
|
||||
mock_redis.ping.return_value = True
|
||||
mock_redis_class.return_value = mock_redis
|
||||
|
||||
mock_pool = Mock()
|
||||
mock_pool_class.return_value = mock_pool
|
||||
|
||||
config = RedisConfig(host="localhost", port=6379)
|
||||
|
||||
# Benchmark connection times
|
||||
connection_times = []
|
||||
num_connections = 100
|
||||
|
||||
for _ in range(num_connections):
|
||||
redis_client = RedisClient(config)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
await redis_client.connect()
|
||||
end_time = time.perf_counter()
|
||||
|
||||
connection_time_ms = (end_time - start_time) * 1000
|
||||
connection_times.append(connection_time_ms)
|
||||
|
||||
await redis_client.disconnect()
|
||||
|
||||
avg_connection_time = statistics.mean(connection_times)
|
||||
max_connection_time = max(connection_times)
|
||||
|
||||
print(f"\nRedis Connection Performance:")
|
||||
print(f"Connections: {num_connections}")
|
||||
print(f"Average connection time: {avg_connection_time:.2f} ms")
|
||||
print(f"Max connection time: {max_connection_time:.2f} ms")
|
||||
|
||||
# Redis connections should be very fast
|
||||
assert avg_connection_time < 5.0 # Less than 5ms average
|
||||
assert max_connection_time < 20.0 # Less than 20ms max
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_redis_basic_operations_performance(self, performance_config):
|
||||
"""Test basic Redis operations performance."""
|
||||
|
||||
with patch('redis.Redis') as mock_redis_class:
|
||||
mock_redis = Mock()
|
||||
mock_redis.ping.return_value = True
|
||||
mock_redis.set.return_value = True
|
||||
mock_redis.get.return_value = "test_value"
|
||||
mock_redis.delete.return_value = 1
|
||||
mock_redis.exists.return_value = 1
|
||||
mock_redis_class.return_value = mock_redis
|
||||
|
||||
config = RedisConfig(host="localhost")
|
||||
redis_client = RedisClient(config)
|
||||
await redis_client.connect()
|
||||
|
||||
# Benchmark SET operations
|
||||
set_times = []
|
||||
num_operations = 10000
|
||||
|
||||
for i in range(num_operations):
|
||||
start_time = time.perf_counter()
|
||||
await redis_client.set(f"key_{i}", f"value_{i}", expire_seconds=300)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
set_time_ms = (end_time - start_time) * 1000
|
||||
set_times.append(set_time_ms)
|
||||
|
||||
# Benchmark GET operations
|
||||
get_times = []
|
||||
for i in range(num_operations):
|
||||
start_time = time.perf_counter()
|
||||
value = await redis_client.get(f"key_{i}")
|
||||
end_time = time.perf_counter()
|
||||
|
||||
get_time_ms = (end_time - start_time) * 1000
|
||||
get_times.append(get_time_ms)
|
||||
|
||||
# Benchmark DELETE operations
|
||||
delete_times = []
|
||||
for i in range(num_operations):
|
||||
start_time = time.perf_counter()
|
||||
result = await redis_client.delete(f"key_{i}")
|
||||
end_time = time.perf_counter()
|
||||
|
||||
delete_time_ms = (end_time - start_time) * 1000
|
||||
delete_times.append(delete_time_ms)
|
||||
|
||||
# Calculate statistics
|
||||
avg_set_time = statistics.mean(set_times)
|
||||
avg_get_time = statistics.mean(get_times)
|
||||
avg_delete_time = statistics.mean(delete_times)
|
||||
|
||||
set_throughput = num_operations / (sum(set_times) / 1000)
|
||||
get_throughput = num_operations / (sum(get_times) / 1000)
|
||||
delete_throughput = num_operations / (sum(delete_times) / 1000)
|
||||
|
||||
print(f"\nRedis Basic Operations Performance:")
|
||||
print(f"Operations per type: {num_operations}")
|
||||
print(f"Average SET time: {avg_set_time:.3f} ms")
|
||||
print(f"Average GET time: {avg_get_time:.3f} ms")
|
||||
print(f"Average DELETE time: {avg_delete_time:.3f} ms")
|
||||
print(f"SET throughput: {set_throughput:.0f} ops/second")
|
||||
print(f"GET throughput: {get_throughput:.0f} ops/second")
|
||||
print(f"DELETE throughput: {delete_throughput:.0f} ops/second")
|
||||
|
||||
assert avg_set_time < performance_config["max_redis_operation_time_ms"]
|
||||
assert avg_get_time < performance_config["max_redis_operation_time_ms"]
|
||||
assert avg_delete_time < performance_config["max_redis_operation_time_ms"]
|
||||
|
||||
assert set_throughput > performance_config["min_redis_throughput_ops_per_sec"]
|
||||
assert get_throughput > performance_config["min_redis_throughput_ops_per_sec"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_redis_image_storage_performance(self):
|
||||
"""Test Redis image storage performance."""
|
||||
|
||||
with patch('redis.Redis') as mock_redis_class, \
|
||||
patch('cv2.imencode') as mock_imencode:
|
||||
|
||||
mock_redis = Mock()
|
||||
mock_redis.ping.return_value = True
|
||||
mock_redis.set.return_value = True
|
||||
mock_redis.expire.return_value = True
|
||||
mock_redis_class.return_value = mock_redis
|
||||
|
||||
# Mock image encoding
|
||||
encoded_data = np.array([1, 2, 3, 4, 5], dtype=np.uint8)
|
||||
mock_imencode.return_value = (True, encoded_data)
|
||||
|
||||
config = RedisConfig(host="localhost")
|
||||
redis_client = RedisClient(config)
|
||||
await redis_client.connect()
|
||||
|
||||
# Create test frames
|
||||
small_frame = np.random.randint(0, 255, (240, 320, 3), dtype=np.uint8)
|
||||
medium_frame = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
||||
large_frame = np.random.randint(0, 255, (1080, 1920, 3), dtype=np.uint8)
|
||||
|
||||
frames = [
|
||||
("small", small_frame),
|
||||
("medium", medium_frame),
|
||||
("large", large_frame)
|
||||
]
|
||||
|
||||
for frame_type, frame in frames:
|
||||
storage_times = []
|
||||
num_images = 100
|
||||
|
||||
for i in range(num_images):
|
||||
key = f"test_image_{frame_type}_{i}"
|
||||
|
||||
start_time = time.perf_counter()
|
||||
await redis_client.image_storage.store_image(key, frame, expire_seconds=300)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
storage_time_ms = (end_time - start_time) * 1000
|
||||
storage_times.append(storage_time_ms)
|
||||
|
||||
avg_storage_time = statistics.mean(storage_times)
|
||||
max_storage_time = max(storage_times)
|
||||
throughput = num_images / (sum(storage_times) / 1000)
|
||||
|
||||
print(f"\n{frame_type.capitalize()} Frame Storage Performance:")
|
||||
print(f"Frame size: {frame.shape}")
|
||||
print(f"Images stored: {num_images}")
|
||||
print(f"Average storage time: {avg_storage_time:.2f} ms")
|
||||
print(f"Max storage time: {max_storage_time:.2f} ms")
|
||||
print(f"Storage throughput: {throughput:.1f} images/second")
|
||||
|
||||
# Performance should scale reasonably with image size
|
||||
expected_max_time = {"small": 50, "medium": 100, "large": 200}
|
||||
assert avg_storage_time < expected_max_time[frame_type]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_redis_pipeline_performance(self):
|
||||
"""Test Redis pipeline performance."""
|
||||
|
||||
with patch('redis.Redis') as mock_redis_class:
|
||||
mock_redis = Mock()
|
||||
mock_redis.ping.return_value = True
|
||||
mock_redis_class.return_value = mock_redis
|
||||
|
||||
# Mock pipeline
|
||||
mock_pipeline = Mock()
|
||||
mock_pipeline.execute.return_value = [True] * 1000
|
||||
mock_redis.pipeline.return_value = mock_pipeline
|
||||
|
||||
config = RedisConfig(host="localhost")
|
||||
redis_client = RedisClient(config)
|
||||
await redis_client.connect()
|
||||
|
||||
# Benchmark pipeline operations
|
||||
num_operations = 1000
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
async with redis_client.pipeline() as pipe:
|
||||
for i in range(num_operations):
|
||||
pipe.set(f"pipeline_key_{i}", f"pipeline_value_{i}")
|
||||
results = await pipe.execute()
|
||||
|
||||
end_time = time.perf_counter()
|
||||
|
||||
total_time = end_time - start_time
|
||||
throughput = num_operations / total_time
|
||||
|
||||
print(f"\nRedis Pipeline Performance:")
|
||||
print(f"Operations: {num_operations}")
|
||||
print(f"Total time: {total_time:.3f} seconds")
|
||||
print(f"Throughput: {throughput:.0f} ops/second")
|
||||
|
||||
# Pipeline should be much faster than individual operations
|
||||
assert throughput > 10000 # Should exceed 10k ops/second with pipeline
|
||||
assert len(results) == num_operations
|
||||
|
||||
|
||||
class TestSessionCachePerformance:
|
||||
"""Test session cache performance benchmarks."""
|
||||
|
||||
def test_cache_basic_operations_performance(self, performance_config):
|
||||
"""Test basic cache operations performance."""
|
||||
|
||||
cache_config = CacheConfig(max_size=10000, ttl_seconds=3600)
|
||||
cache = SessionCache(cache_config)
|
||||
|
||||
# Prepare test data
|
||||
test_sessions = []
|
||||
for i in range(10000):
|
||||
from detector_worker.storage.session_cache import SessionData
|
||||
session_data = SessionData(
|
||||
session_id=f"session_{i}",
|
||||
camera_id=f"camera_{i % 100}", # 100 unique cameras
|
||||
display_id=f"display_{i % 50}" # 50 unique displays
|
||||
)
|
||||
session_data.add_detection_data("main", {"class": "car", "confidence": 0.9})
|
||||
test_sessions.append((f"session_{i}", session_data))
|
||||
|
||||
# Benchmark PUT operations
|
||||
put_times = []
|
||||
for session_id, session_data in test_sessions:
|
||||
start_time = time.perf_counter()
|
||||
cache.put(session_id, session_data)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
put_time_ms = (end_time - start_time) * 1000
|
||||
put_times.append(put_time_ms)
|
||||
|
||||
# Benchmark GET operations
|
||||
get_times = []
|
||||
for session_id, _ in test_sessions:
|
||||
start_time = time.perf_counter()
|
||||
retrieved_data = cache.get(session_id)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
get_time_ms = (end_time - start_time) * 1000
|
||||
get_times.append(get_time_ms)
|
||||
|
||||
# Calculate statistics
|
||||
avg_put_time = statistics.mean(put_times)
|
||||
avg_get_time = statistics.mean(get_times)
|
||||
max_put_time = max(put_times)
|
||||
max_get_time = max(get_times)
|
||||
|
||||
put_throughput = len(test_sessions) / (sum(put_times) / 1000)
|
||||
get_throughput = len(test_sessions) / (sum(get_times) / 1000)
|
||||
|
||||
print(f"\nSession Cache Basic Operations Performance:")
|
||||
print(f"Operations per type: {len(test_sessions)}")
|
||||
print(f"Average PUT time: {avg_put_time:.3f} ms")
|
||||
print(f"Average GET time: {avg_get_time:.3f} ms")
|
||||
print(f"Max PUT time: {max_put_time:.3f} ms")
|
||||
print(f"Max GET time: {max_get_time:.3f} ms")
|
||||
print(f"PUT throughput: {put_throughput:.0f} ops/second")
|
||||
print(f"GET throughput: {get_throughput:.0f} ops/second")
|
||||
|
||||
assert avg_put_time < performance_config["max_cache_operation_time_ms"]
|
||||
assert avg_get_time < performance_config["max_cache_operation_time_ms"]
|
||||
assert put_throughput > performance_config["min_cache_throughput_ops_per_sec"]
|
||||
assert get_throughput > performance_config["min_cache_throughput_ops_per_sec"]
|
||||
|
||||
def test_cache_manager_performance(self, performance_config):
|
||||
"""Test session cache manager performance."""
|
||||
|
||||
cache_manager = SessionCacheManager()
|
||||
cache_manager.clear_all()
|
||||
|
||||
# Benchmark detection caching
|
||||
detection_times = []
|
||||
num_operations = 5000
|
||||
|
||||
for i in range(num_operations):
|
||||
camera_id = f"camera_{i % 50}"
|
||||
detection_data = {
|
||||
"class": "car",
|
||||
"confidence": 0.9,
|
||||
"bbox": [100, 200, 300, 400],
|
||||
"track_id": i
|
||||
}
|
||||
|
||||
start_time = time.perf_counter()
|
||||
cache_manager.cache_detection(camera_id, detection_data)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
detection_time_ms = (end_time - start_time) * 1000
|
||||
detection_times.append(detection_time_ms)
|
||||
|
||||
# Benchmark detection retrieval
|
||||
retrieval_times = []
|
||||
for i in range(num_operations):
|
||||
camera_id = f"camera_{i % 50}"
|
||||
|
||||
start_time = time.perf_counter()
|
||||
cached_detection = cache_manager.get_cached_detection(camera_id)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
retrieval_time_ms = (end_time - start_time) * 1000
|
||||
retrieval_times.append(retrieval_time_ms)
|
||||
|
||||
# Benchmark session operations
|
||||
session_times = []
|
||||
for i in range(1000): # Fewer session operations as they're more complex
|
||||
session_id = str(uuid.uuid4())
|
||||
camera_id = f"camera_{i % 20}"
|
||||
|
||||
start_time = time.perf_counter()
|
||||
cache_manager.create_session(session_id, camera_id, {"initial": "data"})
|
||||
cache_manager.update_session_detection(session_id, {"car_brand": "Toyota"})
|
||||
session_data = cache_manager.get_session_detection(session_id)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
session_time_ms = (end_time - start_time) * 1000
|
||||
session_times.append(session_time_ms)
|
||||
|
||||
# Calculate statistics
|
||||
avg_detection_time = statistics.mean(detection_times)
|
||||
avg_retrieval_time = statistics.mean(retrieval_times)
|
||||
avg_session_time = statistics.mean(session_times)
|
||||
|
||||
detection_throughput = num_operations / (sum(detection_times) / 1000)
|
||||
retrieval_throughput = num_operations / (sum(retrieval_times) / 1000)
|
||||
session_throughput = 1000 / (sum(session_times) / 1000)
|
||||
|
||||
print(f"\nCache Manager Performance:")
|
||||
print(f"Average detection cache time: {avg_detection_time:.3f} ms")
|
||||
print(f"Average retrieval time: {avg_retrieval_time:.3f} ms")
|
||||
print(f"Average session operation time: {avg_session_time:.3f} ms")
|
||||
print(f"Detection throughput: {detection_throughput:.0f} ops/second")
|
||||
print(f"Retrieval throughput: {retrieval_throughput:.0f} ops/second")
|
||||
print(f"Session throughput: {session_throughput:.0f} ops/second")
|
||||
|
||||
assert avg_detection_time < performance_config["max_cache_operation_time_ms"] * 2
|
||||
assert avg_retrieval_time < performance_config["max_cache_operation_time_ms"]
|
||||
assert detection_throughput > performance_config["min_cache_throughput_ops_per_sec"] / 2
|
||||
|
||||
def test_cache_memory_performance(self):
|
||||
"""Test cache memory usage and performance."""
|
||||
|
||||
# Measure initial memory
|
||||
gc.collect()
|
||||
initial_memory = psutil.Process().memory_info().rss / 1024 / 1024 # MB
|
||||
|
||||
cache_config = CacheConfig(max_size=10000, ttl_seconds=3600)
|
||||
cache = SessionCache(cache_config)
|
||||
|
||||
# Add many sessions to test memory usage
|
||||
num_sessions = 5000
|
||||
memory_measurements = []
|
||||
|
||||
for i in range(num_sessions):
|
||||
from detector_worker.storage.session_cache import SessionData
|
||||
session_data = SessionData(
|
||||
session_id=f"memory_session_{i}",
|
||||
camera_id=f"camera_{i % 100}",
|
||||
display_id=f"display_{i % 50}"
|
||||
)
|
||||
|
||||
# Add some detection data
|
||||
session_data.add_detection_data("detection", {
|
||||
"class": "car",
|
||||
"confidence": 0.9,
|
||||
"bbox": [100, 200, 300, 400],
|
||||
"features": [float(j) for j in range(50)] # Add some bulk
|
||||
})
|
||||
|
||||
cache.put(f"memory_session_{i}", session_data)
|
||||
|
||||
# Measure memory periodically
|
||||
if i % 500 == 0 and i > 0:
|
||||
current_memory = psutil.Process().memory_info().rss / 1024 / 1024
|
||||
memory_increase = current_memory - initial_memory
|
||||
memory_measurements.append((i, memory_increase))
|
||||
|
||||
# Final memory measurement
|
||||
gc.collect()
|
||||
final_memory = psutil.Process().memory_info().rss / 1024 / 1024
|
||||
total_memory_increase = final_memory - initial_memory
|
||||
|
||||
# Calculate memory per session
|
||||
memory_per_session = total_memory_increase / num_sessions
|
||||
|
||||
print(f"\nCache Memory Performance:")
|
||||
print(f"Sessions cached: {num_sessions}")
|
||||
print(f"Initial memory: {initial_memory:.1f} MB")
|
||||
print(f"Final memory: {final_memory:.1f} MB")
|
||||
print(f"Total memory increase: {total_memory_increase:.1f} MB")
|
||||
print(f"Memory per session: {memory_per_session * 1024:.1f} KB")
|
||||
|
||||
# Memory usage should be reasonable
|
||||
assert memory_per_session < 0.1 # Less than 100KB per session
|
||||
assert total_memory_increase < 500 # Total increase less than 500MB
|
||||
|
||||
# Test access performance with full cache
|
||||
access_times = []
|
||||
for i in range(1000):
|
||||
session_id = f"memory_session_{i}"
|
||||
|
||||
start_time = time.perf_counter()
|
||||
session_data = cache.get(session_id)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
access_time_ms = (end_time - start_time) * 1000
|
||||
access_times.append(access_time_ms)
|
||||
|
||||
avg_access_time = statistics.mean(access_times)
|
||||
max_access_time = max(access_times)
|
||||
|
||||
print(f"Full cache access performance:")
|
||||
print(f"Average access time: {avg_access_time:.3f} ms")
|
||||
print(f"Max access time: {max_access_time:.3f} ms")
|
||||
|
||||
# Access should remain fast even with full cache
|
||||
assert avg_access_time < 1.0 # Less than 1ms average
|
||||
assert max_access_time < 10.0 # Less than 10ms max
|
||||
|
||||
def test_cache_eviction_performance(self):
|
||||
"""Test cache eviction performance."""
|
||||
|
||||
# Create cache with small size to force evictions
|
||||
cache_config = CacheConfig(max_size=1000, eviction_policy="lru")
|
||||
cache = SessionCache(cache_config)
|
||||
|
||||
# Fill cache beyond capacity
|
||||
num_sessions = 2000 # Double the capacity
|
||||
eviction_times = []
|
||||
|
||||
for i in range(num_sessions):
|
||||
from detector_worker.storage.session_cache import SessionData
|
||||
session_data = SessionData(
|
||||
session_id=f"eviction_session_{i}",
|
||||
camera_id=f"camera_{i % 100}",
|
||||
display_id=f"display_{i % 50}"
|
||||
)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
cache.put(f"eviction_session_{i}", session_data)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
operation_time_ms = (end_time - start_time) * 1000
|
||||
eviction_times.append(operation_time_ms)
|
||||
|
||||
# Analyze eviction performance
|
||||
avg_operation_time = statistics.mean(eviction_times)
|
||||
max_operation_time = max(eviction_times)
|
||||
|
||||
# Check that cache size is maintained
|
||||
assert cache.size() == 1000 # Should not exceed max_size
|
||||
|
||||
print(f"\nCache Eviction Performance:")
|
||||
print(f"Sessions processed: {num_sessions}")
|
||||
print(f"Final cache size: {cache.size()}")
|
||||
print(f"Average operation time: {avg_operation_time:.3f} ms")
|
||||
print(f"Max operation time: {max_operation_time:.3f} ms")
|
||||
|
||||
# Eviction should not significantly slow down operations
|
||||
assert avg_operation_time < 5.0 # Less than 5ms average with eviction
|
||||
assert max_operation_time < 20.0 # Less than 20ms max
|
||||
|
||||
|
||||
class TestStorageIntegrationPerformance:
|
||||
"""Test integrated storage performance scenarios."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_full_storage_pipeline_performance(self):
|
||||
"""Test performance of complete storage pipeline."""
|
||||
|
||||
with patch('psycopg2.connect') as mock_db_connect, \
|
||||
patch('redis.Redis') as mock_redis_class:
|
||||
|
||||
# Setup mocks
|
||||
mock_db_conn = Mock()
|
||||
mock_db_cursor = Mock()
|
||||
mock_db_conn.cursor.return_value = mock_db_cursor
|
||||
mock_db_connect.return_value = mock_db_conn
|
||||
|
||||
mock_redis = Mock()
|
||||
mock_redis.ping.return_value = True
|
||||
mock_redis.set.return_value = True
|
||||
mock_redis.expire.return_value = True
|
||||
mock_redis_class.return_value = mock_redis
|
||||
|
||||
# Initialize storage components
|
||||
db_manager = DatabaseManager()
|
||||
db_manager.connect()
|
||||
|
||||
redis_config = RedisConfig(host="localhost")
|
||||
redis_client = RedisClient(redis_config)
|
||||
await redis_client.connect()
|
||||
|
||||
cache_manager = SessionCacheManager()
|
||||
cache_manager.clear_all()
|
||||
|
||||
# Benchmark complete storage pipeline
|
||||
pipeline_times = []
|
||||
num_iterations = 500
|
||||
|
||||
for i in range(num_iterations):
|
||||
session_id = str(uuid.uuid4())
|
||||
camera_id = f"camera_{i % 20}"
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
# 1. Cache detection
|
||||
detection_data = {
|
||||
"class": "car",
|
||||
"confidence": 0.9,
|
||||
"bbox": [100, 200, 300, 400],
|
||||
"track_id": i + 1000
|
||||
}
|
||||
cache_manager.cache_detection(camera_id, detection_data)
|
||||
|
||||
# 2. Create session
|
||||
cache_manager.create_session(session_id, camera_id, {"initial": "data"})
|
||||
|
||||
# 3. Database insert
|
||||
await db_manager.create_record("car_frontal_info", {
|
||||
"session_id": session_id,
|
||||
"display_id": f"display_{i % 10}",
|
||||
"captured_timestamp": str(int(time.time() * 1000)),
|
||||
"license_type": "No model available"
|
||||
})
|
||||
|
||||
# 4. Redis store
|
||||
await redis_client.set(f"detection:{session_id}", "image_data", expire_seconds=600)
|
||||
|
||||
# 5. Update session with results
|
||||
cache_manager.update_session_detection(session_id, {
|
||||
"car_brand": "Toyota",
|
||||
"car_body_type": "Sedan"
|
||||
})
|
||||
|
||||
# 6. Database update
|
||||
await db_manager.update_record("car_frontal_info", session_id, {
|
||||
"car_brand": "Toyota",
|
||||
"car_body_type": "Sedan"
|
||||
}, key_field="session_id")
|
||||
|
||||
end_time = time.perf_counter()
|
||||
|
||||
pipeline_time_ms = (end_time - start_time) * 1000
|
||||
pipeline_times.append(pipeline_time_ms)
|
||||
|
||||
# Analyze pipeline performance
|
||||
avg_pipeline_time = statistics.mean(pipeline_times)
|
||||
max_pipeline_time = max(pipeline_times)
|
||||
pipeline_throughput = num_iterations / (sum(pipeline_times) / 1000)
|
||||
|
||||
print(f"\nFull Storage Pipeline Performance:")
|
||||
print(f"Pipeline iterations: {num_iterations}")
|
||||
print(f"Average pipeline time: {avg_pipeline_time:.2f} ms")
|
||||
print(f"Max pipeline time: {max_pipeline_time:.2f} ms")
|
||||
print(f"Pipeline throughput: {pipeline_throughput:.1f} pipelines/second")
|
||||
|
||||
# Complete pipeline should be efficient
|
||||
assert avg_pipeline_time < 100 # Less than 100ms per complete pipeline
|
||||
assert pipeline_throughput > 50 # At least 50 pipelines/second
|
596
tests/performance/test_websocket_performance.py
Normal file
596
tests/performance/test_websocket_performance.py
Normal file
|
@ -0,0 +1,596 @@
|
|||
"""
|
||||
Performance tests for WebSocket communication and message processing.
|
||||
|
||||
These tests benchmark WebSocket throughput, latency, and concurrent
|
||||
connection handling to ensure scalability requirements are met.
|
||||
"""
|
||||
import pytest
|
||||
import asyncio
|
||||
import time
|
||||
import statistics
|
||||
import json
|
||||
from unittest.mock import Mock, AsyncMock
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import psutil
|
||||
|
||||
from detector_worker.communication.websocket_handler import WebSocketHandler
|
||||
from detector_worker.communication.message_processor import MessageProcessor
|
||||
from detector_worker.communication.websocket_handler import ConnectionManager
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def performance_config():
|
||||
"""Configuration for performance tests."""
|
||||
return {
|
||||
"max_message_latency_ms": 10,
|
||||
"min_throughput_msgs_per_sec": 1000,
|
||||
"max_concurrent_connections": 100,
|
||||
"max_memory_per_connection_kb": 100
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_websocket():
|
||||
"""Create mock WebSocket for performance testing."""
|
||||
websocket = Mock()
|
||||
websocket.accept = AsyncMock()
|
||||
websocket.send_json = AsyncMock()
|
||||
websocket.send_text = AsyncMock()
|
||||
websocket.receive_json = AsyncMock()
|
||||
websocket.receive_text = AsyncMock()
|
||||
websocket.close = AsyncMock()
|
||||
websocket.ping = AsyncMock()
|
||||
return websocket
|
||||
|
||||
|
||||
class TestWebSocketMessagePerformance:
|
||||
"""Test WebSocket message processing performance."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_message_processing_throughput(self, performance_config):
|
||||
"""Test message processing throughput."""
|
||||
|
||||
message_processor = MessageProcessor()
|
||||
|
||||
# Simple state request message
|
||||
test_message = {"type": "requestState"}
|
||||
client_id = "perf_client"
|
||||
|
||||
# Warm up
|
||||
for _ in range(10):
|
||||
await message_processor.process_message(test_message, client_id)
|
||||
|
||||
# Benchmark throughput
|
||||
num_messages = 10000
|
||||
start_time = time.perf_counter()
|
||||
|
||||
for _ in range(num_messages):
|
||||
await message_processor.process_message(test_message, client_id)
|
||||
|
||||
end_time = time.perf_counter()
|
||||
total_time = end_time - start_time
|
||||
throughput = num_messages / total_time
|
||||
|
||||
print(f"\nMessage Processing Throughput:")
|
||||
print(f"Messages processed: {num_messages}")
|
||||
print(f"Total time: {total_time:.2f} seconds")
|
||||
print(f"Throughput: {throughput:.0f} messages/second")
|
||||
|
||||
assert throughput >= performance_config["min_throughput_msgs_per_sec"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_message_processing_latency(self, performance_config):
|
||||
"""Test individual message processing latency."""
|
||||
|
||||
message_processor = MessageProcessor()
|
||||
|
||||
test_messages = [
|
||||
{"type": "requestState"},
|
||||
{"type": "setSessionId", "payload": {"sessionId": "test", "displayId": "display"}},
|
||||
{"type": "patchSession", "payload": {"sessionId": "test", "data": {"test": "value"}}}
|
||||
]
|
||||
|
||||
client_id = "latency_client"
|
||||
|
||||
# Benchmark individual message latency
|
||||
all_latencies = []
|
||||
|
||||
for message_type, test_message in enumerate(test_messages):
|
||||
latencies = []
|
||||
|
||||
for _ in range(1000):
|
||||
start_time = time.perf_counter()
|
||||
await message_processor.process_message(test_message, client_id)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
latency_ms = (end_time - start_time) * 1000
|
||||
latencies.append(latency_ms)
|
||||
|
||||
avg_latency = statistics.mean(latencies)
|
||||
max_latency = max(latencies)
|
||||
p95_latency = statistics.quantiles(latencies, n=20)[18] # 95th percentile
|
||||
|
||||
all_latencies.extend(latencies)
|
||||
|
||||
print(f"\nMessage Type: {test_message['type']}")
|
||||
print(f"Average latency: {avg_latency:.3f} ms")
|
||||
print(f"Max latency: {max_latency:.3f} ms")
|
||||
print(f"95th percentile: {p95_latency:.3f} ms")
|
||||
|
||||
assert avg_latency < performance_config["max_message_latency_ms"]
|
||||
assert p95_latency < performance_config["max_message_latency_ms"] * 2
|
||||
|
||||
# Overall statistics
|
||||
overall_avg = statistics.mean(all_latencies)
|
||||
overall_p95 = statistics.quantiles(all_latencies, n=20)[18]
|
||||
|
||||
print(f"\nOverall Message Latency:")
|
||||
print(f"Average latency: {overall_avg:.3f} ms")
|
||||
print(f"95th percentile: {overall_p95:.3f} ms")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_concurrent_message_processing(self, performance_config):
|
||||
"""Test concurrent message processing performance."""
|
||||
|
||||
message_processor = MessageProcessor()
|
||||
|
||||
async def process_messages_batch(client_id, num_messages):
|
||||
"""Process a batch of messages for one client."""
|
||||
test_message = {"type": "requestState"}
|
||||
latencies = []
|
||||
|
||||
for _ in range(num_messages):
|
||||
start_time = time.perf_counter()
|
||||
await message_processor.process_message(test_message, client_id)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
latency_ms = (end_time - start_time) * 1000
|
||||
latencies.append(latency_ms)
|
||||
|
||||
return latencies
|
||||
|
||||
# Run concurrent processing
|
||||
num_clients = 50
|
||||
messages_per_client = 100
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
tasks = [
|
||||
process_messages_batch(f"client_{i}", messages_per_client)
|
||||
for i in range(num_clients)
|
||||
]
|
||||
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
end_time = time.perf_counter()
|
||||
total_time = end_time - start_time
|
||||
|
||||
# Analyze results
|
||||
all_latencies = [latency for client_latencies in results for latency in client_latencies]
|
||||
total_messages = len(all_latencies)
|
||||
avg_latency = statistics.mean(all_latencies)
|
||||
throughput = total_messages / total_time
|
||||
|
||||
print(f"\nConcurrent Message Processing:")
|
||||
print(f"Clients: {num_clients}")
|
||||
print(f"Total messages: {total_messages}")
|
||||
print(f"Total time: {total_time:.2f} seconds")
|
||||
print(f"Throughput: {throughput:.0f} messages/second")
|
||||
print(f"Average latency: {avg_latency:.3f} ms")
|
||||
|
||||
assert throughput >= performance_config["min_throughput_msgs_per_sec"] / 2 # Reduced due to concurrency overhead
|
||||
assert avg_latency < performance_config["max_message_latency_ms"] * 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_large_message_performance(self):
|
||||
"""Test performance with large messages."""
|
||||
|
||||
message_processor = MessageProcessor()
|
||||
|
||||
# Create large message (simulating detection results)
|
||||
large_payload = {
|
||||
"detections": [
|
||||
{
|
||||
"class": f"object_{i}",
|
||||
"confidence": 0.9,
|
||||
"bbox": [i*10, i*10, (i+1)*10, (i+1)*10],
|
||||
"metadata": {
|
||||
"feature_vector": [float(j) for j in range(100)],
|
||||
"description": "x" * 500 # Large text field
|
||||
}
|
||||
}
|
||||
for i in range(50) # 50 detections
|
||||
],
|
||||
"camera_info": {
|
||||
"resolution": [1920, 1080],
|
||||
"settings": {"brightness": 50, "contrast": 75},
|
||||
"history": [{"timestamp": i, "event": f"event_{i}"} for i in range(100)]
|
||||
}
|
||||
}
|
||||
|
||||
large_message = {
|
||||
"type": "imageDetection",
|
||||
"payload": large_payload
|
||||
}
|
||||
|
||||
client_id = "large_msg_client"
|
||||
|
||||
# Measure message size
|
||||
message_size_bytes = len(json.dumps(large_message))
|
||||
print(f"\nLarge Message Performance:")
|
||||
print(f"Message size: {message_size_bytes / 1024:.1f} KB")
|
||||
|
||||
# Benchmark large message processing
|
||||
processing_times = []
|
||||
num_iterations = 100
|
||||
|
||||
for _ in range(num_iterations):
|
||||
start_time = time.perf_counter()
|
||||
await message_processor.process_message(large_message, client_id)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
processing_time_ms = (end_time - start_time) * 1000
|
||||
processing_times.append(processing_time_ms)
|
||||
|
||||
avg_processing_time = statistics.mean(processing_times)
|
||||
max_processing_time = max(processing_times)
|
||||
|
||||
print(f"Average processing time: {avg_processing_time:.2f} ms")
|
||||
print(f"Max processing time: {max_processing_time:.2f} ms")
|
||||
|
||||
# Large messages should still be processed reasonably quickly
|
||||
assert avg_processing_time < 100 # Less than 100ms for large messages
|
||||
assert max_processing_time < 500 # Less than 500ms max
|
||||
|
||||
|
||||
class TestConnectionManagerPerformance:
|
||||
"""Test connection manager performance."""
|
||||
|
||||
def test_connection_creation_performance(self, performance_config, mock_websocket):
|
||||
"""Test connection creation and management performance."""
|
||||
|
||||
connection_manager = ConnectionManager()
|
||||
|
||||
# Benchmark connection creation
|
||||
creation_times = []
|
||||
num_connections = 1000
|
||||
|
||||
for i in range(num_connections):
|
||||
start_time = time.perf_counter()
|
||||
connection_manager._create_connection(mock_websocket, f"client_{i}")
|
||||
end_time = time.perf_counter()
|
||||
|
||||
creation_time_ms = (end_time - start_time) * 1000
|
||||
creation_times.append(creation_time_ms)
|
||||
|
||||
avg_creation_time = statistics.mean(creation_times)
|
||||
max_creation_time = max(creation_times)
|
||||
|
||||
print(f"\nConnection Creation Performance:")
|
||||
print(f"Connections created: {num_connections}")
|
||||
print(f"Average creation time: {avg_creation_time:.3f} ms")
|
||||
print(f"Max creation time: {max_creation_time:.3f} ms")
|
||||
|
||||
# Connection creation should be very fast
|
||||
assert avg_creation_time < 1.0 # Less than 1ms average
|
||||
assert max_creation_time < 10.0 # Less than 10ms max
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_broadcast_performance(self, mock_websocket):
|
||||
"""Test broadcast message performance."""
|
||||
|
||||
connection_manager = ConnectionManager()
|
||||
|
||||
# Create many mock connections
|
||||
num_connections = 1000
|
||||
mock_websockets = []
|
||||
|
||||
for i in range(num_connections):
|
||||
ws = Mock()
|
||||
ws.send_json = AsyncMock()
|
||||
ws.send_text = AsyncMock()
|
||||
mock_websockets.append(ws)
|
||||
|
||||
# Add to connection manager
|
||||
connection = connection_manager._create_connection(ws, f"client_{i}")
|
||||
connection.is_connected = True
|
||||
connection_manager.connections[f"client_{i}"] = connection
|
||||
|
||||
# Test broadcast performance
|
||||
test_message = {"type": "broadcast", "data": "test message"}
|
||||
|
||||
broadcast_times = []
|
||||
num_broadcasts = 100
|
||||
|
||||
for _ in range(num_broadcasts):
|
||||
start_time = time.perf_counter()
|
||||
await connection_manager.broadcast(test_message)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
broadcast_time_ms = (end_time - start_time) * 1000
|
||||
broadcast_times.append(broadcast_time_ms)
|
||||
|
||||
avg_broadcast_time = statistics.mean(broadcast_times)
|
||||
max_broadcast_time = max(broadcast_times)
|
||||
|
||||
print(f"\nBroadcast Performance:")
|
||||
print(f"Connections: {num_connections}")
|
||||
print(f"Broadcasts: {num_broadcasts}")
|
||||
print(f"Average broadcast time: {avg_broadcast_time:.2f} ms")
|
||||
print(f"Max broadcast time: {max_broadcast_time:.2f} ms")
|
||||
|
||||
# Broadcast should scale reasonably
|
||||
assert avg_broadcast_time < 50 # Less than 50ms for 1000 connections
|
||||
|
||||
# Verify all connections received the message
|
||||
for ws in mock_websockets:
|
||||
assert ws.send_json.call_count == num_broadcasts
|
||||
|
||||
def test_subscription_management_performance(self):
|
||||
"""Test subscription management performance."""
|
||||
|
||||
connection_manager = ConnectionManager()
|
||||
|
||||
# Test subscription operations performance
|
||||
num_operations = 10000
|
||||
|
||||
# Add subscriptions
|
||||
add_times = []
|
||||
for i in range(num_operations):
|
||||
client_id = f"client_{i % 100}" # 100 unique clients
|
||||
subscription_id = f"camera_{i % 50}" # 50 unique cameras
|
||||
|
||||
start_time = time.perf_counter()
|
||||
connection_manager.add_subscription(client_id, subscription_id)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
add_time_ms = (end_time - start_time) * 1000
|
||||
add_times.append(add_time_ms)
|
||||
|
||||
# Query subscriptions
|
||||
query_times = []
|
||||
for i in range(1000):
|
||||
client_id = f"client_{i % 100}"
|
||||
|
||||
start_time = time.perf_counter()
|
||||
subscriptions = connection_manager.get_client_subscriptions(client_id)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
query_time_ms = (end_time - start_time) * 1000
|
||||
query_times.append(query_time_ms)
|
||||
|
||||
# Remove subscriptions
|
||||
remove_times = []
|
||||
for i in range(num_operations):
|
||||
client_id = f"client_{i % 100}"
|
||||
subscription_id = f"camera_{i % 50}"
|
||||
|
||||
start_time = time.perf_counter()
|
||||
connection_manager.remove_subscription(client_id, subscription_id)
|
||||
end_time = time.perf_counter()
|
||||
|
||||
remove_time_ms = (end_time - start_time) * 1000
|
||||
remove_times.append(remove_time_ms)
|
||||
|
||||
# Analyze results
|
||||
avg_add_time = statistics.mean(add_times)
|
||||
avg_query_time = statistics.mean(query_times)
|
||||
avg_remove_time = statistics.mean(remove_times)
|
||||
|
||||
print(f"\nSubscription Management Performance:")
|
||||
print(f"Average add time: {avg_add_time:.4f} ms")
|
||||
print(f"Average query time: {avg_query_time:.4f} ms")
|
||||
print(f"Average remove time: {avg_remove_time:.4f} ms")
|
||||
|
||||
# Should be very fast operations
|
||||
assert avg_add_time < 0.1
|
||||
assert avg_query_time < 0.1
|
||||
assert avg_remove_time < 0.1
|
||||
|
||||
|
||||
class TestWebSocketHandlerPerformance:
|
||||
"""Test complete WebSocket handler performance."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_concurrent_connections_performance(self, performance_config):
|
||||
"""Test performance with many concurrent connections."""
|
||||
|
||||
message_processor = MessageProcessor()
|
||||
websocket_handler = WebSocketHandler(message_processor)
|
||||
|
||||
async def simulate_client_session(client_id, num_messages=50):
|
||||
"""Simulate a client WebSocket session."""
|
||||
mock_ws = Mock()
|
||||
mock_ws.accept = AsyncMock()
|
||||
mock_ws.send_json = AsyncMock()
|
||||
mock_ws.receive_json = AsyncMock()
|
||||
|
||||
# Simulate message sequence
|
||||
messages = [
|
||||
{"type": "requestState"} for _ in range(num_messages)
|
||||
]
|
||||
messages.append(asyncio.CancelledError()) # Disconnect
|
||||
|
||||
mock_ws.receive_json.side_effect = messages
|
||||
|
||||
processing_times = []
|
||||
try:
|
||||
await websocket_handler.handle_websocket(mock_ws, client_id)
|
||||
except asyncio.CancelledError:
|
||||
pass # Expected disconnect
|
||||
|
||||
return len(messages) - 1 # Exclude the disconnect
|
||||
|
||||
# Test concurrent connections
|
||||
num_concurrent_clients = 100
|
||||
messages_per_client = 25
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
tasks = [
|
||||
simulate_client_session(f"perf_client_{i}", messages_per_client)
|
||||
for i in range(num_concurrent_clients)
|
||||
]
|
||||
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
end_time = time.perf_counter()
|
||||
total_time = end_time - start_time
|
||||
|
||||
# Analyze results
|
||||
successful_clients = len([r for r in results if not isinstance(r, Exception)])
|
||||
total_messages = sum(r for r in results if isinstance(r, int))
|
||||
|
||||
print(f"\nConcurrent Connections Performance:")
|
||||
print(f"Concurrent clients: {num_concurrent_clients}")
|
||||
print(f"Successful clients: {successful_clients}")
|
||||
print(f"Total messages: {total_messages}")
|
||||
print(f"Total time: {total_time:.2f} seconds")
|
||||
print(f"Messages per second: {total_messages / total_time:.0f}")
|
||||
|
||||
assert successful_clients >= num_concurrent_clients * 0.95 # 95% success rate
|
||||
assert total_messages / total_time >= 1000 # At least 1000 msg/sec throughput
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_memory_usage_under_load(self, performance_config):
|
||||
"""Test memory usage under high connection load."""
|
||||
|
||||
message_processor = MessageProcessor()
|
||||
websocket_handler = WebSocketHandler(message_processor)
|
||||
|
||||
# Measure initial memory
|
||||
initial_memory = psutil.Process().memory_info().rss / 1024 / 1024 # MB
|
||||
|
||||
# Create many connections
|
||||
num_connections = 500
|
||||
connections = []
|
||||
|
||||
for i in range(num_connections):
|
||||
mock_ws = Mock()
|
||||
mock_ws.accept = AsyncMock()
|
||||
mock_ws.send_json = AsyncMock()
|
||||
|
||||
connection = websocket_handler.connection_manager._create_connection(
|
||||
mock_ws, f"mem_test_client_{i}"
|
||||
)
|
||||
connection.is_connected = True
|
||||
websocket_handler.connection_manager.connections[f"mem_test_client_{i}"] = connection
|
||||
connections.append(connection)
|
||||
|
||||
# Measure memory after connections
|
||||
after_connections_memory = psutil.Process().memory_info().rss / 1024 / 1024
|
||||
memory_per_connection = (after_connections_memory - initial_memory) / num_connections * 1024 # KB
|
||||
|
||||
# Simulate some activity
|
||||
test_message = {"type": "broadcast", "data": "test"}
|
||||
for _ in range(10):
|
||||
await websocket_handler.connection_manager.broadcast(test_message)
|
||||
|
||||
# Measure memory after activity
|
||||
after_activity_memory = psutil.Process().memory_info().rss / 1024 / 1024
|
||||
|
||||
print(f"\nMemory Usage Under Load:")
|
||||
print(f"Initial memory: {initial_memory:.1f} MB")
|
||||
print(f"After {num_connections} connections: {after_connections_memory:.1f} MB")
|
||||
print(f"After activity: {after_activity_memory:.1f} MB")
|
||||
print(f"Memory per connection: {memory_per_connection:.1f} KB")
|
||||
|
||||
# Memory usage should be reasonable
|
||||
assert memory_per_connection < performance_config["max_memory_per_connection_kb"]
|
||||
|
||||
# Clean up
|
||||
websocket_handler.connection_manager.connections.clear()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_heartbeat_performance(self):
|
||||
"""Test heartbeat mechanism performance."""
|
||||
|
||||
message_processor = MessageProcessor()
|
||||
websocket_handler = WebSocketHandler(message_processor, {"heartbeat_interval": 0.1})
|
||||
|
||||
# Create connections with mock WebSockets
|
||||
num_connections = 100
|
||||
mock_websockets = []
|
||||
|
||||
for i in range(num_connections):
|
||||
mock_ws = Mock()
|
||||
mock_ws.ping = AsyncMock()
|
||||
mock_websockets.append(mock_ws)
|
||||
|
||||
connection = websocket_handler.connection_manager._create_connection(
|
||||
mock_ws, f"heartbeat_client_{i}"
|
||||
)
|
||||
connection.is_connected = True
|
||||
websocket_handler.connection_manager.connections[f"heartbeat_client_{i}"] = connection
|
||||
|
||||
# Start heartbeat task
|
||||
heartbeat_task = asyncio.create_task(websocket_handler._heartbeat_loop())
|
||||
|
||||
# Let it run for several heartbeat cycles
|
||||
start_time = time.perf_counter()
|
||||
await asyncio.sleep(0.5) # 5 heartbeat cycles
|
||||
end_time = time.perf_counter()
|
||||
|
||||
# Cancel heartbeat
|
||||
heartbeat_task.cancel()
|
||||
|
||||
try:
|
||||
await heartbeat_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
# Analyze heartbeat performance
|
||||
elapsed_time = end_time - start_time
|
||||
expected_pings = int(elapsed_time / 0.1) * num_connections
|
||||
|
||||
actual_pings = sum(ws.ping.call_count for ws in mock_websockets)
|
||||
ping_efficiency = actual_pings / expected_pings if expected_pings > 0 else 0
|
||||
|
||||
print(f"\nHeartbeat Performance:")
|
||||
print(f"Connections: {num_connections}")
|
||||
print(f"Elapsed time: {elapsed_time:.2f} seconds")
|
||||
print(f"Expected pings: {expected_pings}")
|
||||
print(f"Actual pings: {actual_pings}")
|
||||
print(f"Ping efficiency: {ping_efficiency:.2%}")
|
||||
|
||||
# Should achieve reasonable ping efficiency
|
||||
assert ping_efficiency > 0.8 # At least 80% efficiency
|
||||
|
||||
# Clean up
|
||||
websocket_handler.connection_manager.connections.clear()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_error_handling_performance(self):
|
||||
"""Test performance impact of error handling."""
|
||||
|
||||
message_processor = MessageProcessor()
|
||||
websocket_handler = WebSocketHandler(message_processor)
|
||||
|
||||
# Create messages that will cause errors
|
||||
error_messages = [
|
||||
{"invalid": "message"}, # Missing type
|
||||
{"type": "unknown_type"}, # Unknown type
|
||||
{"type": "subscribe"}, # Missing payload
|
||||
]
|
||||
|
||||
valid_message = {"type": "requestState"}
|
||||
|
||||
# Mix error messages with valid ones
|
||||
test_sequence = (error_messages + [valid_message]) * 250 # 1000 total messages
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
for message in test_sequence:
|
||||
await message_processor.process_message(message, "error_perf_client")
|
||||
|
||||
end_time = time.perf_counter()
|
||||
total_time = end_time - start_time
|
||||
throughput = len(test_sequence) / total_time
|
||||
|
||||
print(f"\nError Handling Performance:")
|
||||
print(f"Total messages (with errors): {len(test_sequence)}")
|
||||
print(f"Total time: {total_time:.2f} seconds")
|
||||
print(f"Throughput: {throughput:.0f} messages/second")
|
||||
|
||||
# Error handling shouldn't significantly impact performance
|
||||
assert throughput > 500 # Should still process > 500 msg/sec with errors
|
Loading…
Add table
Add a link
Reference in a new issue