Refactor: Phase 5: Granular Refactoring

This commit is contained in:
ziesorx 2025-09-12 15:39:19 +07:00
parent 54f21672aa
commit 6c7c4c5d9c
4 changed files with 1216 additions and 15 deletions

View file

@ -657,20 +657,167 @@ class PipelineExecutor:
self._execute_node_actions(node, frame, detection_result, regions_dict)
# ─── Return detection result ────────────────────────────────
primary_detection = max(all_detections, key=lambda x: x["confidence"])
primary_bbox = primary_detection["bbox"]
# Add branch results and session_id to primary detection for compatibility
if "branch_results" in detection_result:
primary_detection["branch_results"] = detection_result["branch_results"]
if "session_id" in detection_result:
primary_detection["session_id"] = detection_result["session_id"]
return (primary_detection, primary_bbox) if return_bbox else primary_detection
return self._finalize_pipeline_result(detection_result, return_bbox)
except Exception as e:
pipeline_id = node.get("modelId", "unknown")
raise create_pipeline_error(pipeline_id, "pipeline_execution", e)
def _initialize_pipeline_execution(self, node: Dict[str, Any], context: Optional[Dict[str, Any]]) -> PipelineContext:
"""Initialize pipeline execution context."""
pipeline_context = self._extract_context(context)
model_id = node.get("modelId", "unknown")
if pipeline_context.backend_session_id:
logger.info(f"🔑 PIPELINE USING BACKEND SESSION_ID: {pipeline_context.backend_session_id} for camera {pipeline_context.camera_id}")
return pipeline_context
def _handle_classification_pipeline(
self,
frame: np.ndarray,
node: Dict[str, Any],
pipeline_context: PipelineContext,
return_bbox: bool
) -> Optional[Union[Dict[str, Any], Tuple[Dict[str, Any], List[int]]]]:
"""Handle classification pipeline tasks."""
task = getattr(node["model"], "task", None)
if task == "classify":
return self._handle_classification_task(frame, node, pipeline_context, return_bbox)
return None
def _check_session_state(
self,
pipeline_context: PipelineContext,
node: Dict[str, Any],
return_bbox: bool
) -> Optional[Union[Dict[str, Any], Tuple[Dict[str, Any], List[int]]]]:
"""Check session and camera activity state."""
model_id = node.get("modelId", "unknown")
return self._check_camera_active(pipeline_context.camera_id, model_id, return_bbox)
def _execute_detection_stage(
self,
frame: np.ndarray,
node: Dict[str, Any],
pipeline_context: PipelineContext,
validated_detection: Optional[Dict[str, Any]]
) -> Optional[Tuple[List[Dict[str, Any]], Dict[str, Any], Dict[str, Any]]]:
"""Execute the detection stage."""
all_detections, regions_dict, track_validation_result = self._run_detection_stage(
frame, node, pipeline_context, validated_detection
)
if not all_detections:
logger.debug("No detections from structured detection function")
return None
return all_detections, regions_dict, track_validation_result
def _create_none_detection(self, return_bbox: bool) -> Union[Dict[str, Any], Tuple[Dict[str, Any], List[int]]]:
"""Create a 'none' detection result."""
none_detection = {
"class": "none",
"confidence": 1.0,
"bbox": [0, 0, 0, 0],
"branch_results": {}
}
return (none_detection, [0, 0, 0, 0]) if return_bbox else none_detection
def _validate_pipeline_execution(
self,
node: Dict[str, Any],
track_validation_result: Dict[str, Any],
regions_dict: Dict[str, Any],
pipeline_context: PipelineContext,
return_bbox: bool
) -> Optional[Union[Dict[str, Any], Tuple[Dict[str, Any], List[int]], Tuple[None, None]]]:
"""Validate pipeline execution requirements."""
# Track-based validation
tracking_validation_result = self._validate_tracking_requirements(
node, track_validation_result, pipeline_context, return_bbox
)
if tracking_validation_result is not None:
return tracking_validation_result
# Pipeline execution validation
pipeline_valid, missing_branches = validate_pipeline_execution(node, regions_dict)
if not pipeline_valid:
logger.error(f"Pipeline execution validation FAILED - required branches {missing_branches} cannot execute")
logger.error("Aborting pipeline: no Redis actions or database records will be created")
return (None, None) if return_bbox else None
return None
def _execute_main_pipeline(
self,
frame: np.ndarray,
node: Dict[str, Any],
all_detections: List[Dict[str, Any]],
regions_dict: Dict[str, Any],
pipeline_context: PipelineContext
) -> Dict[str, Any]:
"""Execute the main pipeline with actions and branch processing."""
model_id = node.get("modelId", "unknown")
# Create detection result
detection_result = {
"detections": all_detections,
"regions": regions_dict,
**pipeline_context.to_dict()
}
# Handle database operations
self._handle_database_operations(node, detection_result, regions_dict, pipeline_context)
# Execute root node actions if no branches or specific model
if not node.get("branches") or node.get("modelId") == "yolo11n":
self._execute_node_actions(node, frame, detection_result, regions_dict)
# Process branches
branch_results = self._process_branches(frame, node, detection_result, regions_dict, pipeline_context)
detection_result["branch_results"] = branch_results
# Execute parallel actions
if node.get("parallelActions") and "branch_results" in detection_result:
self._execute_parallel_actions(node, frame, detection_result, regions_dict)
# Auto-enable occupancy mode after successful completion
occupancy_detector(pipeline_context.camera_id, model_id, enable=True)
logger.info(f"✅ Camera {pipeline_context.camera_id}: Pipeline completed, detection data will be sent to backend")
logger.info(f"🛑 Camera {pipeline_context.camera_id}: Model will stop inference for future frames")
logger.info(f"📡 Backend sessionId will be handled when received via WebSocket")
# Execute branch node actions
if node.get("actions") and regions_dict and node.get("modelId") != "yolo11n":
logger.debug(f"Executing post-detection actions for branch node {node.get('modelId')}")
self._execute_node_actions(node, frame, detection_result, regions_dict)
return detection_result
def _finalize_pipeline_result(
self,
detection_result: Dict[str, Any],
return_bbox: bool
) -> Union[Dict[str, Any], Tuple[Dict[str, Any], List[int]]]:
"""Finalize and return the pipeline result."""
all_detections = detection_result.get("detections", [])
if not all_detections:
return self._create_none_detection(return_bbox)
# Get primary detection (highest confidence)
primary_detection = max(all_detections, key=lambda x: x["confidence"])
primary_bbox = primary_detection["bbox"]
# Add branch results and session_id to primary detection for compatibility
if "branch_results" in detection_result:
primary_detection["branch_results"] = detection_result["branch_results"]
if "session_id" in detection_result:
primary_detection["session_id"] = detection_result["session_id"]
return (primary_detection, primary_bbox) if return_bbox else primary_detection
# Global pipeline executor instance