python-detector-worker/tests/integration/test_pipeline_integration.py
2025-09-12 18:55:23 +07:00

738 lines
No EOL
32 KiB
Python

"""
Integration tests for pipeline execution workflows.
Tests the complete machine learning pipeline execution including
detection, classification, database updates, and Redis actions.
"""
import pytest
import asyncio
import json
import tempfile
import uuid
import time
from pathlib import Path
from unittest.mock import Mock, patch, AsyncMock
import numpy as np
from detector_worker.pipeline.pipeline_executor import PipelineExecutor
from detector_worker.pipeline.action_executor import ActionExecutor
from detector_worker.pipeline.field_mapper import FieldMapper
from detector_worker.models.model_manager import ModelManager
from detector_worker.storage.database_manager import DatabaseManager
from detector_worker.storage.redis_client import RedisClient, RedisConfig
from detector_worker.detection.detection_result import DetectionResult, BoundingBox
@pytest.fixture
def sample_detection_pipeline():
"""Create sample detection pipeline configuration."""
return {
"modelId": "car_frontal_detection_v1",
"modelFile": "car_frontal_detection_v1.pt",
"multiClass": True,
"expectedClasses": ["Car", "Frontal"],
"triggerClasses": ["Car", "Frontal"],
"minConfidence": 0.8,
"actions": [
{
"type": "redis_save_image",
"region": "Frontal",
"key": "inference:{display_id}:{timestamp}:{session_id}:{filename}",
"expire_seconds": 600
},
{
"type": "postgresql_create_record",
"table": "car_frontal_info",
"fields": {
"display_id": "{display_id}",
"captured_timestamp": "{timestamp}",
"session_id": "{session_id}",
"license_character": None,
"license_type": "No model available"
}
}
],
"branches": [
{
"modelId": "car_brand_cls_v1",
"modelFile": "car_brand_cls_v1.pt",
"parallel": True,
"crop": True,
"cropClass": "Frontal",
"triggerClasses": ["Frontal"],
"minConfidence": 0.85
},
{
"modelId": "car_bodytype_cls_v1",
"modelFile": "car_bodytype_cls_v1.pt",
"parallel": True,
"crop": True,
"cropClass": "Frontal",
"triggerClasses": ["Frontal"],
"minConfidence": 0.80
}
],
"parallelActions": [
{
"type": "postgresql_update_combined",
"table": "car_frontal_info",
"key_field": "session_id",
"waitForBranches": ["car_brand_cls_v1", "car_bodytype_cls_v1"],
"fields": {
"car_brand": "{car_brand_cls_v1.brand}",
"car_body_type": "{car_bodytype_cls_v1.body_type}"
}
}
]
}
@pytest.fixture
def sample_frame():
"""Create sample frame for testing."""
return np.ones((480, 640, 3), dtype=np.uint8) * 128
@pytest.fixture
def detection_context():
"""Create sample detection context."""
return {
"camera_id": "camera_001",
"display_id": "display_001",
"timestamp": int(time.time() * 1000),
"session_id": str(uuid.uuid4()),
"frame": np.ones((480, 640, 3), dtype=np.uint8) * 128,
"filename": "detection_image.jpg"
}
class TestPipelineIntegration:
"""Test complete pipeline integration workflows."""
@pytest.mark.asyncio
async def test_complete_detection_classification_pipeline(self, sample_detection_pipeline, detection_context):
"""Test complete detection to classification pipeline."""
pipeline_executor = PipelineExecutor()
model_manager = ModelManager()
with patch('torch.load') as mock_torch_load, \
patch('os.path.exists', return_value=True), \
patch('psycopg2.connect') as mock_db_connect, \
patch('redis.Redis') as mock_redis:
# Setup detection model mock
mock_detection_model = Mock()
mock_detection_result = Mock()
# Mock successful multi-class detection
mock_detection_result.boxes = Mock()
mock_detection_result.boxes.xyxy = Mock()
mock_detection_result.boxes.conf = Mock()
mock_detection_result.boxes.cls = Mock()
mock_detection_result.names = {0: "Car", 1: "Frontal"}
# Detection results: Car and Frontal detected with high confidence
mock_detection_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([
[50, 100, 350, 450], # Car bbox
[150, 200, 300, 400] # Frontal bbox (within Car)
])
mock_detection_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.92, 0.89])
mock_detection_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0, 1])
mock_detection_model.return_value = mock_detection_result
# Setup classification models
mock_brand_model = Mock()
mock_brand_result = Mock()
mock_brand_result.probs = Mock()
mock_brand_result.probs.top1 = 3 # Toyota index
mock_brand_result.probs.top1conf = Mock()
mock_brand_result.probs.top1conf.item.return_value = 0.87
mock_brand_result.names = {3: "Toyota"}
mock_brand_model.return_value = mock_brand_result
mock_bodytype_model = Mock()
mock_bodytype_result = Mock()
mock_bodytype_result.probs = Mock()
mock_bodytype_result.probs.top1 = 1 # Sedan index
mock_bodytype_result.probs.top1conf = Mock()
mock_bodytype_result.probs.top1conf.item.return_value = 0.82
mock_bodytype_result.names = {1: "Sedan"}
mock_bodytype_model.return_value = mock_bodytype_result
# Route model loading to appropriate mocks
def model_loader(path, **kwargs):
if "detection" in path:
return mock_detection_model
elif "brand" in path:
return mock_brand_model
elif "bodytype" in path:
return mock_bodytype_model
return Mock()
mock_torch_load.side_effect = model_loader
# Setup database mock
mock_db_conn = Mock()
mock_db_connect.return_value = mock_db_conn
mock_cursor = Mock()
mock_db_conn.cursor.return_value = mock_cursor
mock_cursor.fetchone.return_value = None
# Setup Redis mock
mock_redis_instance = Mock()
mock_redis.return_value = mock_redis_instance
mock_redis_instance.ping.return_value = True
mock_redis_instance.set.return_value = True
mock_redis_instance.expire.return_value = True
# Mock image encoding for Redis storage
with patch('cv2.imencode') as mock_imencode:
encoded_data = np.array([1, 2, 3, 4], dtype=np.uint8)
mock_imencode.return_value = (True, encoded_data)
# Execute complete pipeline
result = await pipeline_executor.execute_pipeline(sample_detection_pipeline, detection_context)
# Verify pipeline execution
assert result is not None
assert result.get("status") == "completed"
assert "detections" in result
# Verify detection results
detections = result["detections"]
assert len(detections) == 2 # Car and Frontal
detection_classes = [d.get("class") for d in detections]
assert "Car" in detection_classes
assert "Frontal" in detection_classes
# Verify classification results
assert "classification_results" in result
classification_results = result["classification_results"]
assert "car_brand_cls_v1" in classification_results
brand_result = classification_results["car_brand_cls_v1"]
assert brand_result.get("brand") == "Toyota"
assert brand_result.get("confidence") == 0.87
assert "car_bodytype_cls_v1" in classification_results
bodytype_result = classification_results["car_bodytype_cls_v1"]
assert bodytype_result.get("body_type") == "Sedan"
assert bodytype_result.get("confidence") == 0.82
# Verify database operations
db_calls = mock_cursor.execute.call_args_list
# Should have INSERT for initial record creation
insert_calls = [call for call in db_calls if "INSERT" in str(call[0])]
assert len(insert_calls) >= 1
# Should have UPDATE for classification results
update_calls = [call for call in db_calls if "UPDATE" in str(call[0])]
assert len(update_calls) >= 1
# Verify Redis operations
assert mock_redis_instance.set.called
assert mock_redis_instance.expire.called
@pytest.mark.asyncio
async def test_pipeline_with_missing_detections(self, sample_detection_pipeline, detection_context):
"""Test pipeline behavior when expected detections are missing."""
pipeline_executor = PipelineExecutor()
with patch('torch.load') as mock_torch_load, \
patch('os.path.exists', return_value=True):
# Setup detection model that doesn't find expected classes
mock_detection_model = Mock()
mock_detection_result = Mock()
mock_detection_result.boxes = Mock()
mock_detection_result.boxes.xyxy = Mock()
mock_detection_result.boxes.conf = Mock()
mock_detection_result.boxes.cls = Mock()
mock_detection_result.names = {0: "Car", 1: "Frontal"}
# Only detect Car, no Frontal
mock_detection_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([
[50, 100, 350, 450] # Only Car bbox
])
mock_detection_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.92])
mock_detection_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0])
mock_detection_model.return_value = mock_detection_result
mock_torch_load.return_value = mock_detection_model
# Execute pipeline
result = await pipeline_executor.execute_pipeline(sample_detection_pipeline, detection_context)
# Pipeline should complete but skip classification branches
assert result is not None
assert "detections" in result
detections = result["detections"]
assert len(detections) == 1 # Only Car detected
assert detections[0].get("class") == "Car"
# Classification should not have run (no Frontal detected)
classification_results = result.get("classification_results", {})
assert len(classification_results) == 0 or all(
not res for res in classification_results.values()
)
@pytest.mark.asyncio
async def test_pipeline_with_low_confidence_detections(self, sample_detection_pipeline, detection_context):
"""Test pipeline with detections below confidence threshold."""
pipeline_executor = PipelineExecutor()
with patch('torch.load') as mock_torch_load, \
patch('os.path.exists', return_value=True):
mock_detection_model = Mock()
mock_detection_result = Mock()
mock_detection_result.boxes = Mock()
mock_detection_result.boxes.xyxy = Mock()
mock_detection_result.boxes.conf = Mock()
mock_detection_result.boxes.cls = Mock()
mock_detection_result.names = {0: "Car", 1: "Frontal"}
# Detections with low confidence (below 0.8 threshold)
mock_detection_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([
[50, 100, 350, 450], # Car bbox
[150, 200, 300, 400] # Frontal bbox
])
mock_detection_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.75, 0.70]) # Below threshold
mock_detection_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0, 1])
mock_detection_model.return_value = mock_detection_result
mock_torch_load.return_value = mock_detection_model
# Execute pipeline
result = await pipeline_executor.execute_pipeline(sample_detection_pipeline, detection_context)
# Should complete but with filtered detections
assert result is not None
# Low confidence detections should be filtered out
detections = result.get("detections", [])
high_conf_detections = [d for d in detections if d.get("confidence", 0) >= 0.8]
assert len(high_conf_detections) == 0
@pytest.mark.asyncio
async def test_pipeline_branch_execution_order(self, sample_detection_pipeline, detection_context):
"""Test that pipeline branches execute in correct order and parallel mode works."""
pipeline_executor = PipelineExecutor()
with patch('torch.load') as mock_torch_load, \
patch('os.path.exists', return_value=True), \
patch('psycopg2.connect') as mock_db_connect:
# Track execution order
execution_order = []
# Setup detection model
mock_detection_model = Mock()
mock_detection_result = Mock()
mock_detection_result.boxes = Mock()
mock_detection_result.boxes.xyxy = Mock()
mock_detection_result.boxes.conf = Mock()
mock_detection_result.boxes.cls = Mock()
mock_detection_result.names = {0: "Car", 1: "Frontal"}
mock_detection_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([
[50, 100, 350, 450], [150, 200, 300, 400]
])
mock_detection_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.92, 0.89])
mock_detection_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0, 1])
def track_detection_execution(*args, **kwargs):
execution_order.append("detection")
return mock_detection_result
mock_detection_model.side_effect = track_detection_execution
# Setup classification models with execution tracking
def create_tracked_model(model_id):
def track_execution(*args, **kwargs):
execution_order.append(model_id)
result = Mock()
result.probs = Mock()
result.probs.top1 = 0
result.probs.top1conf = Mock()
result.probs.top1conf.item.return_value = 0.90
result.names = {0: "TestResult"}
return result
model = Mock()
model.side_effect = track_execution
return model
# Route models with execution tracking
def model_loader(path, **kwargs):
if "detection" in path:
return mock_detection_model
elif "brand" in path:
return create_tracked_model("car_brand_cls_v1")
elif "bodytype" in path:
return create_tracked_model("car_bodytype_cls_v1")
return Mock()
mock_torch_load.side_effect = model_loader
# Setup database mock
mock_db_conn = Mock()
mock_db_connect.return_value = mock_db_conn
mock_cursor = Mock()
mock_db_conn.cursor.return_value = mock_cursor
# Execute pipeline
result = await pipeline_executor.execute_pipeline(sample_detection_pipeline, detection_context)
# Verify execution order
assert "detection" in execution_order
assert execution_order[0] == "detection" # Detection should run first
# Classification models should run after detection
brand_index = execution_order.index("car_brand_cls_v1") if "car_brand_cls_v1" in execution_order else -1
bodytype_index = execution_order.index("car_bodytype_cls_v1") if "car_bodytype_cls_v1" in execution_order else -1
detection_index = execution_order.index("detection")
if brand_index >= 0:
assert brand_index > detection_index
if bodytype_index >= 0:
assert bodytype_index > detection_index
# Since branches are parallel, they could run in any order relative to each other
# but both should run after detection
@pytest.mark.asyncio
async def test_pipeline_error_recovery(self, sample_detection_pipeline, detection_context):
"""Test pipeline error handling and recovery."""
pipeline_executor = PipelineExecutor()
with patch('torch.load') as mock_torch_load, \
patch('os.path.exists', return_value=True), \
patch('psycopg2.connect') as mock_db_connect:
# Setup detection model that works
mock_detection_model = Mock()
mock_detection_result = Mock()
mock_detection_result.boxes = Mock()
mock_detection_result.boxes.xyxy = Mock()
mock_detection_result.boxes.conf = Mock()
mock_detection_result.boxes.cls = Mock()
mock_detection_result.names = {0: "Car", 1: "Frontal"}
mock_detection_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([
[50, 100, 350, 450], [150, 200, 300, 400]
])
mock_detection_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.92, 0.89])
mock_detection_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0, 1])
mock_detection_model.return_value = mock_detection_result
# Setup classification models - one fails, one succeeds
mock_brand_model = Mock()
mock_brand_model.side_effect = RuntimeError("Model inference failed")
mock_bodytype_model = Mock()
mock_bodytype_result = Mock()
mock_bodytype_result.probs = Mock()
mock_bodytype_result.probs.top1 = 1
mock_bodytype_result.probs.top1conf = Mock()
mock_bodytype_result.probs.top1conf.item.return_value = 0.85
mock_bodytype_result.names = {1: "SUV"}
mock_bodytype_model.return_value = mock_bodytype_result
def model_loader(path, **kwargs):
if "detection" in path:
return mock_detection_model
elif "brand" in path:
return mock_brand_model
elif "bodytype" in path:
return mock_bodytype_model
return Mock()
mock_torch_load.side_effect = model_loader
# Setup database mock
mock_db_conn = Mock()
mock_db_connect.return_value = mock_db_conn
mock_cursor = Mock()
mock_db_conn.cursor.return_value = mock_cursor
# Execute pipeline
result = await pipeline_executor.execute_pipeline(sample_detection_pipeline, detection_context)
# Pipeline should complete despite one branch failing
assert result is not None
# Detection should succeed
assert "detections" in result
detections = result["detections"]
assert len(detections) == 2
# Classification results should be partial
classification_results = result.get("classification_results", {})
# Brand classification should have failed
brand_result = classification_results.get("car_brand_cls_v1")
assert brand_result is None or brand_result.get("error") is not None
# Body type classification should have succeeded
bodytype_result = classification_results.get("car_bodytype_cls_v1")
assert bodytype_result is not None
assert bodytype_result.get("body_type") == "SUV"
assert bodytype_result.get("confidence") == 0.85
@pytest.mark.asyncio
async def test_field_mapping_and_database_update(self, sample_detection_pipeline, detection_context):
"""Test field mapping and database update integration."""
pipeline_executor = PipelineExecutor()
field_mapper = FieldMapper()
with patch('torch.load') as mock_torch_load, \
patch('os.path.exists', return_value=True), \
patch('psycopg2.connect') as mock_db_connect:
# Setup successful detection and classification
mock_detection_model = Mock()
mock_detection_result = Mock()
mock_detection_result.boxes = Mock()
mock_detection_result.boxes.xyxy = Mock()
mock_detection_result.boxes.conf = Mock()
mock_detection_result.boxes.cls = Mock()
mock_detection_result.names = {0: "Car", 1: "Frontal"}
mock_detection_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([
[50, 100, 350, 450], [150, 200, 300, 400]
])
mock_detection_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.92, 0.89])
mock_detection_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0, 1])
mock_detection_model.return_value = mock_detection_result
# Setup classification models
mock_brand_model = Mock()
mock_brand_result = Mock()
mock_brand_result.probs = Mock()
mock_brand_result.probs.top1 = 2
mock_brand_result.probs.top1conf = Mock()
mock_brand_result.probs.top1conf.item.return_value = 0.88
mock_brand_result.names = {2: "Honda"}
mock_brand_model.return_value = mock_brand_result
mock_bodytype_model = Mock()
mock_bodytype_result = Mock()
mock_bodytype_result.probs = Mock()
mock_bodytype_result.probs.top1 = 0
mock_bodytype_result.probs.top1conf = Mock()
mock_bodytype_result.probs.top1conf.item.return_value = 0.91
mock_bodytype_result.names = {0: "Hatchback"}
mock_bodytype_model.return_value = mock_bodytype_result
def model_loader(path, **kwargs):
if "detection" in path:
return mock_detection_model
elif "brand" in path:
return mock_brand_model
elif "bodytype" in path:
return mock_bodytype_model
return Mock()
mock_torch_load.side_effect = model_loader
# Setup database mock
mock_db_conn = Mock()
mock_db_connect.return_value = mock_db_conn
mock_cursor = Mock()
mock_db_conn.cursor.return_value = mock_cursor
# Execute pipeline
result = await pipeline_executor.execute_pipeline(sample_detection_pipeline, detection_context)
# Verify pipeline completed successfully
assert result is not None
assert result.get("status") == "completed"
# Check database operations
db_calls = mock_cursor.execute.call_args_list
# Should have INSERT and UPDATE operations
insert_calls = [call for call in db_calls if "INSERT" in str(call[0])]
update_calls = [call for call in db_calls if "UPDATE" in str(call[0])]
assert len(insert_calls) >= 1
assert len(update_calls) >= 1
# Check that UPDATE includes field mapping results
update_sql = str(update_calls[0][0])
assert "car_brand" in update_sql.lower()
assert "car_body_type" in update_sql.lower()
# Check that classification results were properly mapped
classification_results = result.get("classification_results", {})
assert "car_brand_cls_v1" in classification_results
assert "car_bodytype_cls_v1" in classification_results
brand_result = classification_results["car_brand_cls_v1"]
bodytype_result = classification_results["car_bodytype_cls_v1"]
assert brand_result.get("brand") == "Honda"
assert brand_result.get("confidence") == 0.88
assert bodytype_result.get("body_type") == "Hatchback"
assert bodytype_result.get("confidence") == 0.91
@pytest.mark.asyncio
async def test_redis_image_storage_integration(self, sample_detection_pipeline, detection_context):
"""Test Redis image storage integration in pipeline."""
pipeline_executor = PipelineExecutor()
with patch('torch.load') as mock_torch_load, \
patch('os.path.exists', return_value=True), \
patch('redis.Redis') as mock_redis, \
patch('cv2.imencode') as mock_imencode:
# Setup successful detection
mock_detection_model = Mock()
mock_detection_result = Mock()
mock_detection_result.boxes = Mock()
mock_detection_result.boxes.xyxy = Mock()
mock_detection_result.boxes.conf = Mock()
mock_detection_result.boxes.cls = Mock()
mock_detection_result.names = {0: "Car", 1: "Frontal"}
mock_detection_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([
[50, 100, 350, 450], [150, 200, 300, 400]
])
mock_detection_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.92, 0.89])
mock_detection_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0, 1])
mock_detection_model.return_value = mock_detection_result
mock_torch_load.return_value = mock_detection_model
# Setup Redis mock
mock_redis_instance = Mock()
mock_redis.return_value = mock_redis_instance
mock_redis_instance.ping.return_value = True
mock_redis_instance.set.return_value = True
mock_redis_instance.expire.return_value = True
# Setup image encoding mock
encoded_data = np.array([1, 2, 3, 4, 5], dtype=np.uint8)
mock_imencode.return_value = (True, encoded_data)
# Execute pipeline
result = await pipeline_executor.execute_pipeline(sample_detection_pipeline, detection_context)
# Verify Redis operations
assert mock_redis_instance.set.called
assert mock_redis_instance.expire.called
# Check that image was encoded
assert mock_imencode.called
# Verify correct key format was used
set_call = mock_redis_instance.set.call_args
redis_key = set_call[0][0]
# Key should contain display_id, timestamp, session_id
assert detection_context["display_id"] in redis_key
assert detection_context["session_id"] in redis_key
assert str(detection_context["timestamp"]) in redis_key
# Should set expiration
expire_call = mock_redis_instance.expire.call_args
expire_key = expire_call[0][0]
expire_seconds = expire_call[0][1]
assert expire_key == redis_key
assert expire_seconds == 600 # As configured in pipeline
@pytest.mark.asyncio
async def test_pipeline_performance_timing(self, sample_detection_pipeline, detection_context):
"""Test pipeline execution timing and performance."""
pipeline_executor = PipelineExecutor()
with patch('torch.load') as mock_torch_load, \
patch('os.path.exists', return_value=True), \
patch('psycopg2.connect') as mock_db_connect, \
patch('redis.Redis') as mock_redis, \
patch('cv2.imencode') as mock_imencode:
# Setup fast mocks
mock_detection_model = Mock()
mock_detection_result = Mock()
mock_detection_result.boxes = Mock()
mock_detection_result.boxes.xyxy = Mock()
mock_detection_result.boxes.conf = Mock()
mock_detection_result.boxes.cls = Mock()
mock_detection_result.names = {0: "Car", 1: "Frontal"}
mock_detection_result.boxes.xyxy.cpu.return_value.numpy.return_value = np.array([
[50, 100, 350, 450], [150, 200, 300, 400]
])
mock_detection_result.boxes.conf.cpu.return_value.numpy.return_value = np.array([0.92, 0.89])
mock_detection_result.boxes.cls.cpu.return_value.numpy.return_value = np.array([0, 1])
mock_detection_model.return_value = mock_detection_result
# Setup fast classification models
def create_fast_model():
model = Mock()
result = Mock()
result.probs = Mock()
result.probs.top1 = 0
result.probs.top1conf = Mock()
result.probs.top1conf.item.return_value = 0.90
result.names = {0: "TestClass"}
model.return_value = result
return model
def model_loader(path, **kwargs):
if "detection" in path:
return mock_detection_model
else:
return create_fast_model()
mock_torch_load.side_effect = model_loader
# Setup fast database and Redis
mock_db_conn = Mock()
mock_db_connect.return_value = mock_db_conn
mock_cursor = Mock()
mock_db_conn.cursor.return_value = mock_cursor
mock_redis_instance = Mock()
mock_redis.return_value = mock_redis_instance
mock_redis_instance.ping.return_value = True
mock_redis_instance.set.return_value = True
mock_redis_instance.expire.return_value = True
encoded_data = np.array([1, 2, 3], dtype=np.uint8)
mock_imencode.return_value = (True, encoded_data)
# Measure execution time
start_time = time.time()
result = await pipeline_executor.execute_pipeline(sample_detection_pipeline, detection_context)
end_time = time.time()
execution_time = end_time - start_time
# Pipeline should complete quickly (less than 1 second with mocks)
assert execution_time < 1.0
# Should have timing information in result
assert result is not None
if "execution_time" in result:
assert result["execution_time"] > 0
# Verify pipeline completed successfully
assert result.get("status") == "completed"