Refactor: nearly done phase 5

This commit is contained in:
ziesorx 2025-09-24 20:29:31 +07:00
parent 227e696ed6
commit 7a9a149955
12 changed files with 2750 additions and 105 deletions

View file

@ -6,14 +6,15 @@ import logging
import time
import uuid
from typing import Dict, Optional, Any, List, Tuple
import asyncio
from concurrent.futures import ThreadPoolExecutor
import asyncio
import numpy as np
from .tracker import VehicleTracker, TrackedVehicle
from .validator import StableCarValidator, ValidationResult, VehicleState
from .validator import StableCarValidator
from ..models.inference import YOLOWrapper
from ..models.pipeline import PipelineParser
from ..detection.pipeline import DetectionPipeline
logger = logging.getLogger(__name__)
@ -37,6 +38,9 @@ class TrackingPipelineIntegration:
self.model_manager = model_manager
self.message_sender = message_sender
# Store subscription info for snapshot access
self.subscription_info = None
# Initialize tracking components
tracking_config = pipeline_parser.tracking_config.__dict__ if pipeline_parser.tracking_config else {}
self.tracker = VehicleTracker(tracking_config)
@ -46,11 +50,15 @@ class TrackingPipelineIntegration:
self.tracking_model: Optional[YOLOWrapper] = None
self.tracking_model_id = None
# Detection pipeline (Phase 5)
self.detection_pipeline: Optional[DetectionPipeline] = None
# Session management
self.active_sessions: Dict[str, str] = {} # display_id -> session_id
self.session_vehicles: Dict[str, int] = {} # session_id -> track_id
self.cleared_sessions: Dict[str, float] = {} # session_id -> clear_time
self.pending_vehicles: Dict[str, int] = {} # display_id -> track_id (waiting for session ID)
self.pending_processing_data: Dict[str, Dict] = {} # display_id -> processing data (waiting for session ID)
# Additional validators for enhanced flow control
self.permanently_processed: Dict[int, float] = {} # track_id -> process_time (never process again)
@ -69,8 +77,6 @@ class TrackingPipelineIntegration:
'pipelines_executed': 0
}
# Test mode for mock detection
self.test_mode = True
logger.info("TrackingPipelineIntegration initialized")
@ -109,6 +115,10 @@ class TrackingPipelineIntegration:
if self.tracking_model:
logger.info(f"Tracking model {model_id} loaded successfully")
# Initialize detection pipeline (Phase 5)
await self._initialize_detection_pipeline()
return True
else:
logger.error(f"Failed to load tracking model {model_id}")
@ -118,6 +128,33 @@ class TrackingPipelineIntegration:
logger.error(f"Error initializing tracking model: {e}", exc_info=True)
return False
async def _initialize_detection_pipeline(self) -> bool:
"""
Initialize the detection pipeline for main detection processing.
Returns:
True if successful, False otherwise
"""
try:
if not self.pipeline_parser:
logger.warning("No pipeline parser available for detection pipeline")
return False
# Create detection pipeline with message sender capability
self.detection_pipeline = DetectionPipeline(self.pipeline_parser, self.model_manager, self.message_sender)
# Initialize detection pipeline
if await self.detection_pipeline.initialize():
logger.info("Detection pipeline initialized successfully")
return True
else:
logger.error("Failed to initialize detection pipeline")
return False
except Exception as e:
logger.error(f"Error initializing detection pipeline: {e}", exc_info=True)
return False
async def process_frame(self,
frame: np.ndarray,
display_id: str,
@ -237,10 +274,7 @@ class TrackingPipelineIntegration:
'confidence': validation_result.confidence
}
# Send mock image detection message in test mode
# Note: Backend will generate and send back session ID via setSessionId
if self.test_mode:
await self._send_mock_detection(subscription_id, None)
# Execute detection pipeline - this will send real imageDetection when detection is found
# Mark vehicle as pending session ID assignment
self.pending_vehicles[display_id] = vehicle.track_id
@ -283,7 +317,6 @@ class TrackingPipelineIntegration:
subscription_id: str) -> Dict[str, Any]:
"""
Execute the main detection pipeline for a validated vehicle.
This is a placeholder for Phase 5 implementation.
Args:
frame: Input frame
@ -295,73 +328,146 @@ class TrackingPipelineIntegration:
Returns:
Pipeline execution results
"""
logger.info(f"Executing pipeline for vehicle {vehicle.track_id}, "
logger.info(f"Executing detection pipeline for vehicle {vehicle.track_id}, "
f"session={session_id}, display={display_id}")
# Placeholder for Phase 5 pipeline execution
# This will be implemented when we create the detection module
pipeline_result = {
'status': 'pending',
'message': 'Pipeline execution will be implemented in Phase 5',
'vehicle_id': vehicle.track_id,
'session_id': session_id,
'bbox': vehicle.bbox,
'confidence': vehicle.confidence
}
# Simulate pipeline execution
await asyncio.sleep(0.1)
return pipeline_result
async def _send_mock_detection(self, subscription_id: str, session_id: str):
"""
Send mock image detection message to backend following worker.md specification.
Args:
subscription_id: Full subscription identifier (display-id;camera-id)
session_id: Session identifier for linking detection to user session
"""
try:
# Import here to avoid circular imports
from ..communication.messages import create_image_detection
# Check if detection pipeline is available
if not self.detection_pipeline:
logger.warning("Detection pipeline not initialized, using fallback")
return {
'status': 'error',
'message': 'Detection pipeline not available',
'vehicle_id': vehicle.track_id,
'session_id': session_id
}
# Create flat detection data as required by the model
detection_data = {
"carModel": None,
"carBrand": None,
"carYear": None,
"bodyType": None,
"licensePlateText": None,
"licensePlateConfidence": None
}
# Get model info from tracking configuration in pipeline.json
# Use 52 (from models/52/bangchak_poc2) as modelId
# Use tracking modelId as modelName
tracking_model_id = 52
tracking_model_name = "front_rear_detection_v1" # Default
if self.pipeline_parser and self.pipeline_parser.tracking_config:
tracking_model_name = self.pipeline_parser.tracking_config.model_id
# Create proper Pydantic message using the helper function
detection_message = create_image_detection(
subscription_identifier=subscription_id,
detection_data=detection_data,
model_id=tracking_model_id,
model_name=tracking_model_name
# Execute only the detection phase (first phase)
# This will run detection and send imageDetection message to backend
detection_result = await self.detection_pipeline.execute_detection_phase(
frame=frame,
display_id=display_id,
subscription_id=subscription_id
)
# Send to backend via WebSocket if sender is available
if self.message_sender:
await self.message_sender(detection_message)
logger.info(f"[MOCK DETECTION] Sent to backend: {detection_data}")
else:
logger.info(f"[MOCK DETECTION] No message sender available, would send: {detection_message}")
# Add vehicle information to result
detection_result['vehicle_id'] = vehicle.track_id
detection_result['vehicle_bbox'] = vehicle.bbox
detection_result['vehicle_confidence'] = vehicle.confidence
detection_result['phase'] = 'detection'
logger.info(f"Detection phase executed for vehicle {vehicle.track_id}: "
f"status={detection_result.get('status', 'unknown')}, "
f"message_sent={detection_result.get('message_sent', False)}, "
f"processing_time={detection_result.get('processing_time', 0):.3f}s")
# Store frame and detection results for processing phase
if detection_result['message_sent']:
# Store for later processing when sessionId is received
self.pending_processing_data[display_id] = {
'frame': frame.copy(), # Store copy of frame for processing phase
'vehicle': vehicle,
'subscription_id': subscription_id,
'detection_result': detection_result,
'timestamp': time.time()
}
logger.info(f"Stored processing data for {display_id}, waiting for sessionId from backend")
return detection_result
except Exception as e:
logger.error(f"Error sending mock detection: {e}", exc_info=True)
logger.error(f"Error executing detection pipeline: {e}", exc_info=True)
return {
'status': 'error',
'message': str(e),
'vehicle_id': vehicle.track_id,
'session_id': session_id,
'processing_time': 0.0
}
async def _execute_processing_phase(self,
processing_data: Dict[str, Any],
session_id: str,
display_id: str) -> None:
"""
Execute the processing phase after receiving sessionId from backend.
This includes branch processing and database operations.
Args:
processing_data: Stored processing data from detection phase
session_id: Session ID from backend
display_id: Display identifier
"""
try:
vehicle = processing_data['vehicle']
subscription_id = processing_data['subscription_id']
detection_result = processing_data['detection_result']
logger.info(f"Executing processing phase for session {session_id}, vehicle {vehicle.track_id}")
# Capture high-quality snapshot for pipeline processing
frame = None
if self.subscription_info and self.subscription_info.stream_config.snapshot_url:
from ..streaming.readers import HTTPSnapshotReader
logger.info(f"[PROCESSING PHASE] Fetching 2K snapshot for session {session_id}")
snapshot_reader = HTTPSnapshotReader(
camera_id=self.subscription_info.camera_id,
snapshot_url=self.subscription_info.stream_config.snapshot_url,
max_retries=3
)
frame = snapshot_reader.fetch_single_snapshot()
if frame is not None:
logger.info(f"[PROCESSING PHASE] Successfully fetched {frame.shape[1]}x{frame.shape[0]} snapshot for pipeline")
else:
logger.warning(f"[PROCESSING PHASE] Failed to capture snapshot, falling back to RTSP frame")
# Fall back to RTSP frame if snapshot fails
frame = processing_data['frame']
else:
logger.warning(f"[PROCESSING PHASE] No snapshot URL available, using RTSP frame")
frame = processing_data['frame']
# Extract detected regions from detection phase result if available
detected_regions = detection_result.get('detected_regions', {})
logger.info(f"[INTEGRATION] Passing detected_regions to processing phase: {list(detected_regions.keys())}")
# Execute processing phase with detection pipeline
if self.detection_pipeline:
processing_result = await self.detection_pipeline.execute_processing_phase(
frame=frame,
display_id=display_id,
session_id=session_id,
subscription_id=subscription_id,
detected_regions=detected_regions
)
logger.info(f"Processing phase completed for session {session_id}: "
f"status={processing_result.get('status', 'unknown')}, "
f"branches={len(processing_result.get('branch_results', {}))}, "
f"actions={len(processing_result.get('actions_executed', []))}, "
f"processing_time={processing_result.get('processing_time', 0):.3f}s")
# Update stats
self.stats['pipelines_executed'] += 1
else:
logger.error("Detection pipeline not available for processing phase")
except Exception as e:
logger.error(f"Error in processing phase for session {session_id}: {e}", exc_info=True)
def set_subscription_info(self, subscription_info):
"""
Set subscription info to access snapshot URL and other stream details.
Args:
subscription_info: SubscriptionInfo object containing stream config
"""
self.subscription_info = subscription_info
logger.debug(f"Set subscription info with snapshot_url: {subscription_info.stream_config.snapshot_url if subscription_info else None}")
def set_session_id(self, display_id: str, session_id: str):
"""
@ -393,6 +499,24 @@ class TrackingPipelineIntegration:
else:
logger.warning(f"No pending vehicle found for display {display_id} when setting session {session_id}")
# Check if we have pending processing data for this display
if display_id in self.pending_processing_data:
processing_data = self.pending_processing_data[display_id]
# Trigger the processing phase asynchronously
asyncio.create_task(self._execute_processing_phase(
processing_data=processing_data,
session_id=session_id,
display_id=display_id
))
# Remove from pending processing
del self.pending_processing_data[display_id]
logger.info(f"Triggered processing phase for session {session_id} on display {display_id}")
else:
logger.warning(f"No pending processing data found for display {display_id} when setting session {session_id}")
def clear_session_id(self, session_id: str):
"""
Clear session ID (post-fueling).
@ -441,6 +565,7 @@ class TrackingPipelineIntegration:
self.session_vehicles.clear()
self.cleared_sessions.clear()
self.pending_vehicles.clear()
self.pending_processing_data.clear()
self.permanently_processed.clear()
self.progression_stages.clear()
self.last_detection_time.clear()
@ -545,4 +670,9 @@ class TrackingPipelineIntegration:
"""Cleanup resources."""
self.executor.shutdown(wait=False)
self.reset_tracking()
# Cleanup detection pipeline
if self.detection_pipeline:
self.detection_pipeline.cleanup()
logger.info("Tracking pipeline integration cleaned up")