feat/tracking and save in redis finished

This commit is contained in:
Pongsatorn 2025-08-21 20:59:29 +07:00
parent 3a4a27ca68
commit 5873945115
8 changed files with 393 additions and 245 deletions

View file

@ -13,6 +13,7 @@ import concurrent.futures
from ultralytics import YOLO
from urllib.parse import urlparse
from .database import DatabaseManager
from datetime import datetime
# Create a logger specifically for this module
logger = logging.getLogger("detector_worker.pympta")
@ -108,6 +109,7 @@ def load_pipeline_node(node_config: dict, mpta_dir: str, redis_client, db_manage
"modelFile": node_config["modelFile"],
"triggerClasses": trigger_classes,
"triggerClassIndices": trigger_class_indices,
"classMapping": node_config.get("classMapping", {}),
"crop": node_config.get("crop", False),
"cropClass": node_config.get("cropClass"),
"minConfidence": node_config.get("minConfidence", None),
@ -608,8 +610,7 @@ def run_detection_with_tracking(frame, node, context=None):
)[0]
# Process detection results
all_detections = []
regions_dict = {}
candidate_detections = []
min_confidence = node.get("minConfidence", 0.0)
if res.boxes is None or len(res.boxes) == 0:
@ -618,6 +619,7 @@ def run_detection_with_tracking(frame, node, context=None):
logger.debug(f"Processing {len(res.boxes)} raw detections")
# First pass: collect all valid detections
for i, box in enumerate(res.boxes):
# Extract detection data
conf = float(box.cpu().conf[0])
@ -658,17 +660,39 @@ def run_detection_with_tracking(frame, node, context=None):
"class_id": cls_id
}
all_detections.append(detection)
logger.debug(f"Detection {i} accepted: {class_name} (conf={conf:.3f}, id={track_id}, bbox={bbox})")
# Update regions_dict with highest confidence detection per class
if class_name not in regions_dict or conf > regions_dict[class_name]["confidence"]:
regions_dict[class_name] = {
"bbox": bbox,
"confidence": conf,
"detection": detection,
"track_id": track_id
}
candidate_detections.append(detection)
logger.debug(f"Detection {i} candidate: {class_name} (conf={conf:.3f}, id={track_id}, bbox={bbox})")
# Second pass: select only the highest confidence detection overall
if not candidate_detections:
logger.debug("No valid candidate detections found")
return [], {}
# Find the single highest confidence detection across all detected classes
best_detection = max(candidate_detections, key=lambda x: x["confidence"])
original_class = best_detection["class"]
logger.info(f"Selected highest confidence detection: {original_class} (conf={best_detection['confidence']:.3f})")
# Apply class mapping if configured
mapped_class = original_class
class_mapping = node.get("classMapping", {})
if original_class in class_mapping:
mapped_class = class_mapping[original_class]
logger.info(f"Class mapping applied: {original_class}{mapped_class}")
# Update the detection object with mapped class
best_detection["class"] = mapped_class
best_detection["original_class"] = original_class # Keep original for reference
# Keep only the best detection with mapped class
all_detections = [best_detection]
regions_dict = {
mapped_class: {
"bbox": best_detection["bbox"],
"confidence": best_detection["confidence"],
"detection": best_detection,
"track_id": best_detection["id"]
}
}
# Multi-class validation
if node.get("multiClass", False) and node.get("expectedClasses"):
@ -964,7 +988,7 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None):
elif "color" in model_id:
det["color"] = class_name
execute_actions(node, frame, det)
execute_actions(node, frame, det, context.get("regions_dict") if context else None)
return (det, None) if return_bbox else det
# ─── Session management check ───────────────────────────────────────
@ -1019,13 +1043,14 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None):
**(context or {})
}
# ─── Create initial database record when Car+Frontal detected ────
if node.get("db_manager") and node.get("multiClass", False):
# Only create database record if we have both Car and Frontal
has_car = "Car" in regions_dict
has_frontal = "Frontal" in regions_dict
# ─── Create initial database record when valid detection found ────
if node.get("db_manager") and regions_dict:
# Create database record if we have any valid detection (after class mapping and filtering)
detected_classes = list(regions_dict.keys())
logger.debug(f"Valid detections found for database record: {detected_classes}")
if has_car and has_frontal:
# Always create record if we have valid detections that passed all filters
if detected_classes:
# Generate UUID session_id since client session is None for now
import uuid as uuid_lib
from datetime import datetime
@ -1047,9 +1072,12 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None):
detection_result["timestamp"] = timestamp # Update with proper timestamp
logger.info(f"Created initial database record with session_id: {inserted_session_id}")
else:
logger.debug(f"Database record not created - missing required classes. Has Car: {has_car}, Has Frontal: {has_frontal}")
logger.debug("Database record not created - no valid detections found after filtering")
execute_actions(node, frame, detection_result, regions_dict)
# Execute actions for root node only if it doesn't have branches
# Branch nodes with actions will execute them after branch processing
if not node.get("branches") or node.get("modelId") == "yolo11n":
execute_actions(node, frame, detection_result, regions_dict)
# ─── Branch processing (no stability check here) ─────────────────────────────
if node["branches"]:
@ -1089,21 +1117,28 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None):
futures = {}
for br in active_branches:
crop_class = br.get("cropClass", br.get("triggerClasses", [])[0] if br.get("triggerClasses") else None)
sub_frame = frame
crop_class = br.get("cropClass")
logger.info(f"Starting parallel branch: {br['modelId']}, crop_class: {crop_class}")
logger.info(f"Starting parallel branch: {br['modelId']}, cropClass: {crop_class}")
if br.get("crop", False) and crop_class:
cropped = crop_region_by_class(frame, regions_dict, crop_class)
if cropped is not None:
sub_frame = cv2.resize(cropped, (224, 224))
logger.debug(f"Successfully cropped {crop_class} region for {br['modelId']}")
if crop_class in regions_dict:
cropped = crop_region_by_class(frame, regions_dict, crop_class)
if cropped is not None:
sub_frame = cropped # Use cropped image without manual resizing
logger.debug(f"Successfully cropped {crop_class} region for {br['modelId']} - model will handle resizing")
else:
logger.warning(f"Failed to crop {crop_class} region for {br['modelId']}, skipping branch")
continue
else:
logger.warning(f"Failed to crop {crop_class} region for {br['modelId']}, skipping branch")
logger.warning(f"Crop class {crop_class} not found in detected regions for {br['modelId']}, skipping branch")
continue
future = executor.submit(run_pipeline, sub_frame, br, True, context)
# Add regions_dict to context for child branches
branch_context = dict(context) if context else {}
branch_context["regions_dict"] = regions_dict
future = executor.submit(run_pipeline, sub_frame, br, True, branch_context)
futures[future] = br
# Collect results
@ -1119,22 +1154,29 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None):
else:
# Run branches sequentially
for br in active_branches:
crop_class = br.get("cropClass", br.get("triggerClasses", [])[0] if br.get("triggerClasses") else None)
sub_frame = frame
crop_class = br.get("cropClass")
logger.info(f"Starting sequential branch: {br['modelId']}, crop_class: {crop_class}")
logger.info(f"Starting sequential branch: {br['modelId']}, cropClass: {crop_class}")
if br.get("crop", False) and crop_class:
cropped = crop_region_by_class(frame, regions_dict, crop_class)
if cropped is not None:
sub_frame = cv2.resize(cropped, (224, 224))
logger.debug(f"Successfully cropped {crop_class} region for {br['modelId']}")
if crop_class in regions_dict:
cropped = crop_region_by_class(frame, regions_dict, crop_class)
if cropped is not None:
sub_frame = cropped # Use cropped image without manual resizing
logger.debug(f"Successfully cropped {crop_class} region for {br['modelId']} - model will handle resizing")
else:
logger.warning(f"Failed to crop {crop_class} region for {br['modelId']}, skipping branch")
continue
else:
logger.warning(f"Failed to crop {crop_class} region for {br['modelId']}, skipping branch")
logger.warning(f"Crop class {crop_class} not found in detected regions for {br['modelId']}, skipping branch")
continue
try:
result, _ = run_pipeline(sub_frame, br, True, context)
# Add regions_dict to context for child branches
branch_context = dict(context) if context else {}
branch_context["regions_dict"] = regions_dict
result, _ = run_pipeline(sub_frame, br, True, branch_context)
if result:
branch_results[br["modelId"]] = result
logger.info(f"Branch {br['modelId']} completed: {result}")
@ -1156,6 +1198,14 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None):
start_cooldown_timer(camera_id, model_id)
logger.info(f"Camera {camera_id}: Pipeline completed successfully, starting 30s cooldown")
# ─── Execute actions after successful detection AND branch processing ──────────
# This ensures detection nodes (like frontal_detection_v1) execute their actions
# after completing both detection and branch processing
if node.get("actions") and regions_dict and node.get("modelId") != "yolo11n":
# Execute actions for branch detection nodes, skip root to avoid duplication
logger.debug(f"Executing post-detection actions for branch node {node.get('modelId')}")
execute_actions(node, frame, detection_result, regions_dict)
# ─── Return detection result ────────────────────────────────
primary_detection = max(all_detections, key=lambda x: x["confidence"])
primary_bbox = primary_detection["bbox"]