python-detector-worker/tests/performance/test_websocket_performance.py
2025-09-12 18:55:23 +07:00

596 lines
No EOL
22 KiB
Python

"""
Performance tests for WebSocket communication and message processing.
These tests benchmark WebSocket throughput, latency, and concurrent
connection handling to ensure scalability requirements are met.
"""
import pytest
import asyncio
import time
import statistics
import json
from unittest.mock import Mock, AsyncMock
from concurrent.futures import ThreadPoolExecutor
import psutil
from detector_worker.communication.websocket_handler import WebSocketHandler
from detector_worker.communication.message_processor import MessageProcessor
from detector_worker.communication.websocket_handler import ConnectionManager
@pytest.fixture
def performance_config():
"""Configuration for performance tests."""
return {
"max_message_latency_ms": 10,
"min_throughput_msgs_per_sec": 1000,
"max_concurrent_connections": 100,
"max_memory_per_connection_kb": 100
}
@pytest.fixture
def mock_websocket():
"""Create mock WebSocket for performance testing."""
websocket = Mock()
websocket.accept = AsyncMock()
websocket.send_json = AsyncMock()
websocket.send_text = AsyncMock()
websocket.receive_json = AsyncMock()
websocket.receive_text = AsyncMock()
websocket.close = AsyncMock()
websocket.ping = AsyncMock()
return websocket
class TestWebSocketMessagePerformance:
"""Test WebSocket message processing performance."""
@pytest.mark.asyncio
async def test_message_processing_throughput(self, performance_config):
"""Test message processing throughput."""
message_processor = MessageProcessor()
# Simple state request message
test_message = {"type": "requestState"}
client_id = "perf_client"
# Warm up
for _ in range(10):
await message_processor.process_message(test_message, client_id)
# Benchmark throughput
num_messages = 10000
start_time = time.perf_counter()
for _ in range(num_messages):
await message_processor.process_message(test_message, client_id)
end_time = time.perf_counter()
total_time = end_time - start_time
throughput = num_messages / total_time
print(f"\nMessage Processing Throughput:")
print(f"Messages processed: {num_messages}")
print(f"Total time: {total_time:.2f} seconds")
print(f"Throughput: {throughput:.0f} messages/second")
assert throughput >= performance_config["min_throughput_msgs_per_sec"]
@pytest.mark.asyncio
async def test_message_processing_latency(self, performance_config):
"""Test individual message processing latency."""
message_processor = MessageProcessor()
test_messages = [
{"type": "requestState"},
{"type": "setSessionId", "payload": {"sessionId": "test", "displayId": "display"}},
{"type": "patchSession", "payload": {"sessionId": "test", "data": {"test": "value"}}}
]
client_id = "latency_client"
# Benchmark individual message latency
all_latencies = []
for message_type, test_message in enumerate(test_messages):
latencies = []
for _ in range(1000):
start_time = time.perf_counter()
await message_processor.process_message(test_message, client_id)
end_time = time.perf_counter()
latency_ms = (end_time - start_time) * 1000
latencies.append(latency_ms)
avg_latency = statistics.mean(latencies)
max_latency = max(latencies)
p95_latency = statistics.quantiles(latencies, n=20)[18] # 95th percentile
all_latencies.extend(latencies)
print(f"\nMessage Type: {test_message['type']}")
print(f"Average latency: {avg_latency:.3f} ms")
print(f"Max latency: {max_latency:.3f} ms")
print(f"95th percentile: {p95_latency:.3f} ms")
assert avg_latency < performance_config["max_message_latency_ms"]
assert p95_latency < performance_config["max_message_latency_ms"] * 2
# Overall statistics
overall_avg = statistics.mean(all_latencies)
overall_p95 = statistics.quantiles(all_latencies, n=20)[18]
print(f"\nOverall Message Latency:")
print(f"Average latency: {overall_avg:.3f} ms")
print(f"95th percentile: {overall_p95:.3f} ms")
@pytest.mark.asyncio
async def test_concurrent_message_processing(self, performance_config):
"""Test concurrent message processing performance."""
message_processor = MessageProcessor()
async def process_messages_batch(client_id, num_messages):
"""Process a batch of messages for one client."""
test_message = {"type": "requestState"}
latencies = []
for _ in range(num_messages):
start_time = time.perf_counter()
await message_processor.process_message(test_message, client_id)
end_time = time.perf_counter()
latency_ms = (end_time - start_time) * 1000
latencies.append(latency_ms)
return latencies
# Run concurrent processing
num_clients = 50
messages_per_client = 100
start_time = time.perf_counter()
tasks = [
process_messages_batch(f"client_{i}", messages_per_client)
for i in range(num_clients)
]
results = await asyncio.gather(*tasks)
end_time = time.perf_counter()
total_time = end_time - start_time
# Analyze results
all_latencies = [latency for client_latencies in results for latency in client_latencies]
total_messages = len(all_latencies)
avg_latency = statistics.mean(all_latencies)
throughput = total_messages / total_time
print(f"\nConcurrent Message Processing:")
print(f"Clients: {num_clients}")
print(f"Total messages: {total_messages}")
print(f"Total time: {total_time:.2f} seconds")
print(f"Throughput: {throughput:.0f} messages/second")
print(f"Average latency: {avg_latency:.3f} ms")
assert throughput >= performance_config["min_throughput_msgs_per_sec"] / 2 # Reduced due to concurrency overhead
assert avg_latency < performance_config["max_message_latency_ms"] * 2
@pytest.mark.asyncio
async def test_large_message_performance(self):
"""Test performance with large messages."""
message_processor = MessageProcessor()
# Create large message (simulating detection results)
large_payload = {
"detections": [
{
"class": f"object_{i}",
"confidence": 0.9,
"bbox": [i*10, i*10, (i+1)*10, (i+1)*10],
"metadata": {
"feature_vector": [float(j) for j in range(100)],
"description": "x" * 500 # Large text field
}
}
for i in range(50) # 50 detections
],
"camera_info": {
"resolution": [1920, 1080],
"settings": {"brightness": 50, "contrast": 75},
"history": [{"timestamp": i, "event": f"event_{i}"} for i in range(100)]
}
}
large_message = {
"type": "imageDetection",
"payload": large_payload
}
client_id = "large_msg_client"
# Measure message size
message_size_bytes = len(json.dumps(large_message))
print(f"\nLarge Message Performance:")
print(f"Message size: {message_size_bytes / 1024:.1f} KB")
# Benchmark large message processing
processing_times = []
num_iterations = 100
for _ in range(num_iterations):
start_time = time.perf_counter()
await message_processor.process_message(large_message, client_id)
end_time = time.perf_counter()
processing_time_ms = (end_time - start_time) * 1000
processing_times.append(processing_time_ms)
avg_processing_time = statistics.mean(processing_times)
max_processing_time = max(processing_times)
print(f"Average processing time: {avg_processing_time:.2f} ms")
print(f"Max processing time: {max_processing_time:.2f} ms")
# Large messages should still be processed reasonably quickly
assert avg_processing_time < 100 # Less than 100ms for large messages
assert max_processing_time < 500 # Less than 500ms max
class TestConnectionManagerPerformance:
"""Test connection manager performance."""
def test_connection_creation_performance(self, performance_config, mock_websocket):
"""Test connection creation and management performance."""
connection_manager = ConnectionManager()
# Benchmark connection creation
creation_times = []
num_connections = 1000
for i in range(num_connections):
start_time = time.perf_counter()
connection_manager._create_connection(mock_websocket, f"client_{i}")
end_time = time.perf_counter()
creation_time_ms = (end_time - start_time) * 1000
creation_times.append(creation_time_ms)
avg_creation_time = statistics.mean(creation_times)
max_creation_time = max(creation_times)
print(f"\nConnection Creation Performance:")
print(f"Connections created: {num_connections}")
print(f"Average creation time: {avg_creation_time:.3f} ms")
print(f"Max creation time: {max_creation_time:.3f} ms")
# Connection creation should be very fast
assert avg_creation_time < 1.0 # Less than 1ms average
assert max_creation_time < 10.0 # Less than 10ms max
@pytest.mark.asyncio
async def test_broadcast_performance(self, mock_websocket):
"""Test broadcast message performance."""
connection_manager = ConnectionManager()
# Create many mock connections
num_connections = 1000
mock_websockets = []
for i in range(num_connections):
ws = Mock()
ws.send_json = AsyncMock()
ws.send_text = AsyncMock()
mock_websockets.append(ws)
# Add to connection manager
connection = connection_manager._create_connection(ws, f"client_{i}")
connection.is_connected = True
connection_manager.connections[f"client_{i}"] = connection
# Test broadcast performance
test_message = {"type": "broadcast", "data": "test message"}
broadcast_times = []
num_broadcasts = 100
for _ in range(num_broadcasts):
start_time = time.perf_counter()
await connection_manager.broadcast(test_message)
end_time = time.perf_counter()
broadcast_time_ms = (end_time - start_time) * 1000
broadcast_times.append(broadcast_time_ms)
avg_broadcast_time = statistics.mean(broadcast_times)
max_broadcast_time = max(broadcast_times)
print(f"\nBroadcast Performance:")
print(f"Connections: {num_connections}")
print(f"Broadcasts: {num_broadcasts}")
print(f"Average broadcast time: {avg_broadcast_time:.2f} ms")
print(f"Max broadcast time: {max_broadcast_time:.2f} ms")
# Broadcast should scale reasonably
assert avg_broadcast_time < 50 # Less than 50ms for 1000 connections
# Verify all connections received the message
for ws in mock_websockets:
assert ws.send_json.call_count == num_broadcasts
def test_subscription_management_performance(self):
"""Test subscription management performance."""
connection_manager = ConnectionManager()
# Test subscription operations performance
num_operations = 10000
# Add subscriptions
add_times = []
for i in range(num_operations):
client_id = f"client_{i % 100}" # 100 unique clients
subscription_id = f"camera_{i % 50}" # 50 unique cameras
start_time = time.perf_counter()
connection_manager.add_subscription(client_id, subscription_id)
end_time = time.perf_counter()
add_time_ms = (end_time - start_time) * 1000
add_times.append(add_time_ms)
# Query subscriptions
query_times = []
for i in range(1000):
client_id = f"client_{i % 100}"
start_time = time.perf_counter()
subscriptions = connection_manager.get_client_subscriptions(client_id)
end_time = time.perf_counter()
query_time_ms = (end_time - start_time) * 1000
query_times.append(query_time_ms)
# Remove subscriptions
remove_times = []
for i in range(num_operations):
client_id = f"client_{i % 100}"
subscription_id = f"camera_{i % 50}"
start_time = time.perf_counter()
connection_manager.remove_subscription(client_id, subscription_id)
end_time = time.perf_counter()
remove_time_ms = (end_time - start_time) * 1000
remove_times.append(remove_time_ms)
# Analyze results
avg_add_time = statistics.mean(add_times)
avg_query_time = statistics.mean(query_times)
avg_remove_time = statistics.mean(remove_times)
print(f"\nSubscription Management Performance:")
print(f"Average add time: {avg_add_time:.4f} ms")
print(f"Average query time: {avg_query_time:.4f} ms")
print(f"Average remove time: {avg_remove_time:.4f} ms")
# Should be very fast operations
assert avg_add_time < 0.1
assert avg_query_time < 0.1
assert avg_remove_time < 0.1
class TestWebSocketHandlerPerformance:
"""Test complete WebSocket handler performance."""
@pytest.mark.asyncio
async def test_concurrent_connections_performance(self, performance_config):
"""Test performance with many concurrent connections."""
message_processor = MessageProcessor()
websocket_handler = WebSocketHandler(message_processor)
async def simulate_client_session(client_id, num_messages=50):
"""Simulate a client WebSocket session."""
mock_ws = Mock()
mock_ws.accept = AsyncMock()
mock_ws.send_json = AsyncMock()
mock_ws.receive_json = AsyncMock()
# Simulate message sequence
messages = [
{"type": "requestState"} for _ in range(num_messages)
]
messages.append(asyncio.CancelledError()) # Disconnect
mock_ws.receive_json.side_effect = messages
processing_times = []
try:
await websocket_handler.handle_websocket(mock_ws, client_id)
except asyncio.CancelledError:
pass # Expected disconnect
return len(messages) - 1 # Exclude the disconnect
# Test concurrent connections
num_concurrent_clients = 100
messages_per_client = 25
start_time = time.perf_counter()
tasks = [
simulate_client_session(f"perf_client_{i}", messages_per_client)
for i in range(num_concurrent_clients)
]
results = await asyncio.gather(*tasks, return_exceptions=True)
end_time = time.perf_counter()
total_time = end_time - start_time
# Analyze results
successful_clients = len([r for r in results if not isinstance(r, Exception)])
total_messages = sum(r for r in results if isinstance(r, int))
print(f"\nConcurrent Connections Performance:")
print(f"Concurrent clients: {num_concurrent_clients}")
print(f"Successful clients: {successful_clients}")
print(f"Total messages: {total_messages}")
print(f"Total time: {total_time:.2f} seconds")
print(f"Messages per second: {total_messages / total_time:.0f}")
assert successful_clients >= num_concurrent_clients * 0.95 # 95% success rate
assert total_messages / total_time >= 1000 # At least 1000 msg/sec throughput
@pytest.mark.asyncio
async def test_memory_usage_under_load(self, performance_config):
"""Test memory usage under high connection load."""
message_processor = MessageProcessor()
websocket_handler = WebSocketHandler(message_processor)
# Measure initial memory
initial_memory = psutil.Process().memory_info().rss / 1024 / 1024 # MB
# Create many connections
num_connections = 500
connections = []
for i in range(num_connections):
mock_ws = Mock()
mock_ws.accept = AsyncMock()
mock_ws.send_json = AsyncMock()
connection = websocket_handler.connection_manager._create_connection(
mock_ws, f"mem_test_client_{i}"
)
connection.is_connected = True
websocket_handler.connection_manager.connections[f"mem_test_client_{i}"] = connection
connections.append(connection)
# Measure memory after connections
after_connections_memory = psutil.Process().memory_info().rss / 1024 / 1024
memory_per_connection = (after_connections_memory - initial_memory) / num_connections * 1024 # KB
# Simulate some activity
test_message = {"type": "broadcast", "data": "test"}
for _ in range(10):
await websocket_handler.connection_manager.broadcast(test_message)
# Measure memory after activity
after_activity_memory = psutil.Process().memory_info().rss / 1024 / 1024
print(f"\nMemory Usage Under Load:")
print(f"Initial memory: {initial_memory:.1f} MB")
print(f"After {num_connections} connections: {after_connections_memory:.1f} MB")
print(f"After activity: {after_activity_memory:.1f} MB")
print(f"Memory per connection: {memory_per_connection:.1f} KB")
# Memory usage should be reasonable
assert memory_per_connection < performance_config["max_memory_per_connection_kb"]
# Clean up
websocket_handler.connection_manager.connections.clear()
@pytest.mark.asyncio
async def test_heartbeat_performance(self):
"""Test heartbeat mechanism performance."""
message_processor = MessageProcessor()
websocket_handler = WebSocketHandler(message_processor, {"heartbeat_interval": 0.1})
# Create connections with mock WebSockets
num_connections = 100
mock_websockets = []
for i in range(num_connections):
mock_ws = Mock()
mock_ws.ping = AsyncMock()
mock_websockets.append(mock_ws)
connection = websocket_handler.connection_manager._create_connection(
mock_ws, f"heartbeat_client_{i}"
)
connection.is_connected = True
websocket_handler.connection_manager.connections[f"heartbeat_client_{i}"] = connection
# Start heartbeat task
heartbeat_task = asyncio.create_task(websocket_handler._heartbeat_loop())
# Let it run for several heartbeat cycles
start_time = time.perf_counter()
await asyncio.sleep(0.5) # 5 heartbeat cycles
end_time = time.perf_counter()
# Cancel heartbeat
heartbeat_task.cancel()
try:
await heartbeat_task
except asyncio.CancelledError:
pass
# Analyze heartbeat performance
elapsed_time = end_time - start_time
expected_pings = int(elapsed_time / 0.1) * num_connections
actual_pings = sum(ws.ping.call_count for ws in mock_websockets)
ping_efficiency = actual_pings / expected_pings if expected_pings > 0 else 0
print(f"\nHeartbeat Performance:")
print(f"Connections: {num_connections}")
print(f"Elapsed time: {elapsed_time:.2f} seconds")
print(f"Expected pings: {expected_pings}")
print(f"Actual pings: {actual_pings}")
print(f"Ping efficiency: {ping_efficiency:.2%}")
# Should achieve reasonable ping efficiency
assert ping_efficiency > 0.8 # At least 80% efficiency
# Clean up
websocket_handler.connection_manager.connections.clear()
@pytest.mark.asyncio
async def test_error_handling_performance(self):
"""Test performance impact of error handling."""
message_processor = MessageProcessor()
websocket_handler = WebSocketHandler(message_processor)
# Create messages that will cause errors
error_messages = [
{"invalid": "message"}, # Missing type
{"type": "unknown_type"}, # Unknown type
{"type": "subscribe"}, # Missing payload
]
valid_message = {"type": "requestState"}
# Mix error messages with valid ones
test_sequence = (error_messages + [valid_message]) * 250 # 1000 total messages
start_time = time.perf_counter()
for message in test_sequence:
await message_processor.process_message(message, "error_perf_client")
end_time = time.perf_counter()
total_time = end_time - start_time
throughput = len(test_sequence) / total_time
print(f"\nError Handling Performance:")
print(f"Total messages (with errors): {len(test_sequence)}")
print(f"Total time: {total_time:.2f} seconds")
print(f"Throughput: {throughput:.0f} messages/second")
# Error handling shouldn't significantly impact performance
assert throughput > 500 # Should still process > 500 msg/sec with errors