update tracker

This commit is contained in:
Pongsatorn 2025-08-20 21:26:54 +07:00
parent a54da904f7
commit 3a4a27ca68
7 changed files with 1405 additions and 84 deletions

View file

@ -17,6 +17,13 @@ from .database import DatabaseManager
# Create a logger specifically for this module
logger = logging.getLogger("detector_worker.pympta")
# Global camera-aware stability tracking
# Structure: {camera_id: {model_id: {"track_stability_counters": {track_id: count}, "stable_tracks": set(), "session_state": {...}}}}
_camera_stability_tracking = {}
# Timer-based cooldown configuration (for testing)
_cooldown_duration_seconds = 30
def validate_redis_config(redis_config: dict) -> bool:
"""Validate Redis configuration parameters."""
required_fields = ["host", "port"]
@ -78,7 +85,7 @@ def load_pipeline_node(node_config: dict, mpta_dir: str, redis_client, db_manage
logger.info(f"Loading model for node {node_config['modelId']} from {model_path}")
model = YOLO(model_path)
if torch.cuda.is_available():
logger.info(f"CUDA available. Moving model {node_config['modelId']} to GPU")
logger.info(f"CUDA available. Moving model {node_config['modelId']} to GPU VRAM")
model.to("cuda")
else:
logger.info(f"CUDA not available. Using CPU for model {node_config['modelId']}")
@ -92,6 +99,10 @@ def load_pipeline_node(node_config: dict, mpta_dir: str, redis_client, db_manage
if name in trigger_classes]
logger.debug(f"Converted trigger classes to indices: {trigger_class_indices}")
# Extract stability threshold from tracking config
tracking_config = node_config.get("tracking", {"enabled": True, "reidConfigPath": "botsort.yaml"})
stability_threshold = tracking_config.get("stabilityThreshold", 1)
node = {
"modelId": node_config["modelId"],
"modelFile": node_config["modelFile"],
@ -105,6 +116,8 @@ def load_pipeline_node(node_config: dict, mpta_dir: str, redis_client, db_manage
"parallel": node_config.get("parallel", False),
"actions": node_config.get("actions", []),
"parallelActions": node_config.get("parallelActions", []),
"tracking": tracking_config,
"stabilityThreshold": stability_threshold,
"model": model,
"branches": [],
"redis_client": redis_client,
@ -514,6 +527,342 @@ def resolve_field_mapping(value_template, branch_results, action_context):
logger.error(f"Error resolving field mapping '{value_template}': {e}")
return None
def run_detection_with_tracking(frame, node, context=None):
"""
Structured function for running YOLO detection with BoT-SORT tracking.
Args:
frame: Input frame/image
node: Pipeline node configuration with model and settings
context: Optional context information (camera info, session data, etc.)
Returns:
tuple: (all_detections, regions_dict) where:
- all_detections: List of all detection objects
- regions_dict: Dict mapping class names to highest confidence detections
Configuration options in node:
- model: YOLO model instance
- triggerClassIndices: List of class indices to detect (None for all classes)
- minConfidence: Minimum confidence threshold
- multiClass: Whether to enable multi-class detection mode
- expectedClasses: List of expected class names for multi-class validation
- tracking: Dict with tracking configuration
- enabled: Boolean to enable/disable tracking
- reidConfigPath: Path to ReID config file (default: "botsort.yaml")
"""
try:
# Extract tracking configuration
tracking_config = node.get("tracking", {})
tracking_enabled = tracking_config.get("enabled", True)
reid_config_path = tracking_config.get("reidConfigPath", "botsort.yaml")
# Check if we need to reset tracker after cooldown
camera_id = context.get("camera_id", "unknown") if context else "unknown"
model_id = node.get("modelId", "unknown")
stability_data = get_camera_stability_data(camera_id, model_id)
session_state = stability_data["session_state"]
if session_state.get("reset_tracker_on_resume", False):
# Reset YOLO tracker to get fresh track IDs
if hasattr(node["model"], 'trackers') and node["model"].trackers:
node["model"].trackers.clear() # Clear tracker state
logger.info(f"Camera {camera_id}: 🔄 Reset YOLO tracker - new cars will get fresh track IDs")
session_state["reset_tracker_on_resume"] = False # Clear the flag
# Get tracking zone from runtime context (camera-specific)
tracking_zone = context.get("trackingZone", []) if context else []
# Prepare class filtering
trigger_class_indices = node.get("triggerClassIndices")
class_filter = {"classes": trigger_class_indices} if trigger_class_indices else {}
logger.debug(f"Running detection for {node['modelId']} - tracking: {tracking_enabled}, classes: {node.get('triggerClasses', 'all')}")
if tracking_enabled and tracking_zone:
# Use tracking with zone validation
logger.debug(f"Using tracking with ReID config: {reid_config_path}")
res = node["model"].track(
frame,
stream=False,
persist=True,
tracker=reid_config_path,
**class_filter
)[0]
elif tracking_enabled:
# Use tracking without zone restriction
logger.debug("Using tracking without zone restriction")
res = node["model"].track(
frame,
stream=False,
persist=True,
**class_filter
)[0]
else:
# Use detection only (no tracking)
logger.debug("Using detection only (tracking disabled)")
res = node["model"].predict(
frame,
stream=False,
**class_filter
)[0]
# Process detection results
all_detections = []
regions_dict = {}
min_confidence = node.get("minConfidence", 0.0)
if res.boxes is None or len(res.boxes) == 0:
logger.debug("No detections found")
return [], {}
logger.debug(f"Processing {len(res.boxes)} raw detections")
for i, box in enumerate(res.boxes):
# Extract detection data
conf = float(box.cpu().conf[0])
cls_id = int(box.cpu().cls[0])
class_name = node["model"].names[cls_id]
# Apply confidence filtering
if conf < min_confidence:
logger.debug(f"Detection {i} '{class_name}' rejected: {conf:.3f} < {min_confidence}")
continue
# Extract bounding box
xy = box.cpu().xyxy[0]
x1, y1, x2, y2 = map(int, xy)
bbox = (x1, y1, x2, y2)
# Extract tracking ID if available
track_id = None
if hasattr(box, "id") and box.id is not None:
track_id = int(box.id.item())
# Apply tracking zone validation if enabled
if tracking_enabled and tracking_zone:
bbox_center_x = (x1 + x2) // 2
bbox_center_y = (y1 + y2) // 2
# Check if detection center is within tracking zone
if not _point_in_polygon((bbox_center_x, bbox_center_y), tracking_zone):
logger.debug(f"Detection {i} '{class_name}' outside tracking zone")
continue
# Create detection object
detection = {
"class": class_name,
"confidence": conf,
"id": track_id,
"bbox": bbox,
"class_id": cls_id
}
all_detections.append(detection)
logger.debug(f"Detection {i} accepted: {class_name} (conf={conf:.3f}, id={track_id}, bbox={bbox})")
# Update regions_dict with highest confidence detection per class
if class_name not in regions_dict or conf > regions_dict[class_name]["confidence"]:
regions_dict[class_name] = {
"bbox": bbox,
"confidence": conf,
"detection": detection,
"track_id": track_id
}
# Multi-class validation
if node.get("multiClass", False) and node.get("expectedClasses"):
expected_classes = node["expectedClasses"]
detected_classes = list(regions_dict.keys())
logger.debug(f"Multi-class validation: expected={expected_classes}, detected={detected_classes}")
# Check for required classes (flexible - at least one must match)
matching_classes = [cls for cls in expected_classes if cls in detected_classes]
if not matching_classes:
logger.warning(f"Multi-class validation failed: no expected classes detected")
return [], {}
logger.info(f"Multi-class validation passed: {matching_classes} detected")
logger.info(f"Detection completed: {len(all_detections)} detections, {len(regions_dict)} unique classes")
# Update stability tracking for detections with track IDs (requires camera_id from context)
camera_id = context.get("camera_id", "unknown") if context else "unknown"
update_track_stability(node, all_detections, camera_id)
return all_detections, regions_dict
except Exception as e:
logger.error(f"Error in detection_with_tracking for {node.get('modelId', 'unknown')}: {e}")
logger.debug(f"Detection error traceback: {traceback.format_exc()}")
return [], {}
def _point_in_polygon(point, polygon):
"""Check if a point is inside a polygon using ray casting algorithm."""
if not polygon or len(polygon) < 3:
return True # No zone restriction if invalid polygon
x, y = point
n = len(polygon)
inside = False
p1x, p1y = polygon[0]
for i in range(1, n + 1):
p2x, p2y = polygon[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def get_camera_stability_data(camera_id, model_id):
"""Get or create stability tracking data for a specific camera and model."""
global _camera_stability_tracking
if camera_id not in _camera_stability_tracking:
_camera_stability_tracking[camera_id] = {}
if model_id not in _camera_stability_tracking[camera_id]:
logger.warning(f"🔄 Camera {camera_id}: Creating NEW stability data for {model_id} - this will reset any cooldown!")
_camera_stability_tracking[camera_id][model_id] = {
"track_stability_counters": {},
"stable_tracks": set(),
"session_state": {
"active": True,
"cooldown_until": 0.0,
"reset_tracker_on_resume": False
}
}
return _camera_stability_tracking[camera_id][model_id]
def update_track_stability(node, detections, camera_id):
"""Update stability counters for tracked objects per camera."""
stability_threshold = node.get("stabilityThreshold", 1)
model_id = node.get("modelId", "unknown")
# Get camera-specific stability data
stability_data = get_camera_stability_data(camera_id, model_id)
track_counters = stability_data["track_stability_counters"]
stable_tracks = stability_data["stable_tracks"]
# Get current track IDs from detections
current_track_ids = set()
for detection in detections:
track_id = detection.get("id")
if track_id is not None:
current_track_ids.add(track_id)
# Increment counter for this track
track_counters[track_id] = track_counters.get(track_id, 0) + 1
# Check if track becomes stable
if track_counters[track_id] >= stability_threshold and track_id not in stable_tracks:
stable_tracks.add(track_id)
logger.info(f"Camera {camera_id}: Track ID {track_id} became stable after {track_counters[track_id]} detections (threshold: {stability_threshold})")
# Clean up counters for tracks that disappeared
disappeared_tracks = set(track_counters.keys()) - current_track_ids
for track_id in disappeared_tracks:
logger.debug(f"Camera {camera_id}: Track ID {track_id} disappeared, removing from counters")
track_counters.pop(track_id, None)
stable_tracks.discard(track_id)
logger.debug(f"Camera {camera_id}: Track stability: active={list(current_track_ids)}, stable={list(stable_tracks)}, counters={track_counters}")
def check_stable_tracks(camera_id, model_id, regions_dict):
"""Check if any stable tracks match the detected classes for a specific camera."""
# Get camera-specific stability data
stability_data = get_camera_stability_data(camera_id, model_id)
stable_tracks = stability_data["stable_tracks"]
if not stable_tracks:
return False, []
# Check if any detection in regions_dict has a stable track ID
stable_detections = []
for class_name, region_data in regions_dict.items():
detection = region_data.get("detection", {})
track_id = detection.get("id")
if track_id is not None and track_id in stable_tracks:
stable_detections.append((class_name, track_id))
logger.debug(f"Camera {camera_id}: Found stable detection: {class_name} with stable track ID {track_id}")
has_stable_tracks = len(stable_detections) > 0
return has_stable_tracks, stable_detections
def start_cooldown_timer(camera_id, model_id):
"""Start 30-second cooldown timer after successful pipeline completion."""
stability_data = get_camera_stability_data(camera_id, model_id)
session_state = stability_data["session_state"]
# Start timer-based cooldown
cooldown_until = time.time() + _cooldown_duration_seconds
session_state["cooldown_until"] = cooldown_until
session_state["active"] = False
session_state["reset_tracker_on_resume"] = True # Flag to reset YOLO tracker
logger.info(f"Camera {camera_id}: 🛑 Starting {_cooldown_duration_seconds}s cooldown timer (until: {cooldown_until:.2f})")
# DO NOT clear tracking state here - preserve it during cooldown
# Tracking state will be cleared when cooldown expires and new session starts
def is_camera_active(camera_id, model_id):
"""Check if camera should be processing detections (timer-based cooldown)."""
stability_data = get_camera_stability_data(camera_id, model_id)
session_state = stability_data["session_state"]
# Check if cooldown timer has expired
if not session_state["active"]:
current_time = time.time()
cooldown_until = session_state["cooldown_until"]
remaining_time = cooldown_until - current_time
if current_time >= cooldown_until:
session_state["active"] = True
session_state["reset_tracker_on_resume"] = True # Ensure tracker reset flag is set
# Clear tracking state NOW - before new detection session starts
stability_data["track_stability_counters"].clear()
stability_data["stable_tracks"].clear()
logger.info(f"Camera {camera_id}: 📢 Cooldown timer ended, resuming detection with fresh track IDs")
logger.info(f"Camera {camera_id}: 🧹 Cleared stability counters and stable tracks for fresh session")
else:
logger.debug(f"Camera {camera_id}: Still in cooldown - {remaining_time:.1f}s remaining")
return session_state["active"]
def cleanup_camera_stability(camera_id):
"""Clean up stability tracking data when a camera is disconnected, preserving cooldown timers."""
global _camera_stability_tracking
if camera_id in _camera_stability_tracking:
# Check if any models are still in cooldown before cleanup
models_in_cooldown = []
for model_id, model_data in _camera_stability_tracking[camera_id].items():
session_state = model_data.get("session_state", {})
if not session_state.get("active", True) and time.time() < session_state.get("cooldown_until", 0):
cooldown_remaining = session_state["cooldown_until"] - time.time()
models_in_cooldown.append((model_id, cooldown_remaining))
logger.warning(f"⚠️ Camera {camera_id}: Model {model_id} is in cooldown ({cooldown_remaining:.1f}s remaining) - preserving timer!")
if models_in_cooldown:
# DO NOT clear any tracking data during cooldown - preserve everything
logger.warning(f"⚠️ Camera {camera_id}: PRESERVING ALL data during cooldown - no cleanup performed!")
logger.warning(f" - Track IDs will reset only AFTER cooldown expires")
logger.warning(f" - Stability counters preserved until cooldown ends")
else:
# Safe to delete everything - no active cooldowns
del _camera_stability_tracking[camera_id]
logger.info(f"Cleaned up stability tracking data for camera {camera_id} (no active cooldowns)")
def validate_pipeline_execution(node, regions_dict):
"""
Pre-validate that all required branches will execute successfully before
@ -618,92 +967,42 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None):
execute_actions(node, frame, det)
return (det, None) if return_bbox else det
# ─── Detection stage - Multi-class support ──────────────────
tk = node["triggerClassIndices"]
logger.debug(f"Running detection for node {node['modelId']} with trigger classes: {node.get('triggerClasses', [])} (indices: {tk})")
logger.debug(f"Node configuration: minConfidence={node['minConfidence']}, multiClass={node.get('multiClass', False)}")
# ─── Session management check ───────────────────────────────────────
camera_id = context.get("camera_id", "unknown") if context else "unknown"
model_id = node.get("modelId", "unknown")
res = node["model"].track(
frame,
stream=False,
persist=True,
**({"classes": tk} if tk else {})
)[0]
# Collect all detections above confidence threshold
all_detections = []
all_boxes = []
regions_dict = {}
logger.debug(f"Raw detection results from model: {len(res.boxes) if res.boxes is not None else 0} detections")
for i, box in enumerate(res.boxes):
conf = float(box.cpu().conf[0])
cid = int(box.cpu().cls[0])
name = node["model"].names[cid]
logger.debug(f"Detection {i}: class='{name}' (id={cid}), confidence={conf:.3f}, threshold={node['minConfidence']}")
if conf < node["minConfidence"]:
logger.debug(f" -> REJECTED: confidence {conf:.3f} < threshold {node['minConfidence']}")
continue
xy = box.cpu().xyxy[0]
x1, y1, x2, y2 = map(int, xy)
bbox = (x1, y1, x2, y2)
detection = {
"class": name,
"confidence": conf,
"id": box.id.item() if hasattr(box, "id") else None,
"bbox": bbox
}
all_detections.append(detection)
all_boxes.append(bbox)
logger.debug(f" -> ACCEPTED: {name} with confidence {conf:.3f}, bbox={bbox}")
# Store highest confidence detection for each class
if name not in regions_dict or conf > regions_dict[name]["confidence"]:
regions_dict[name] = {
"bbox": bbox,
"confidence": conf,
"detection": detection
}
logger.debug(f" -> Updated regions_dict['{name}'] with confidence {conf:.3f}")
logger.info(f"Detection summary: {len(all_detections)} accepted detections from {len(res.boxes) if res.boxes is not None else 0} total")
logger.info(f"Detected classes: {list(regions_dict.keys())}")
if not all_detections:
logger.warning("No detections above confidence threshold - returning null")
if not is_camera_active(camera_id, model_id):
logger.info(f"⏰ Camera {camera_id}: Tracker stopped - in cooldown period, skipping all detection")
return (None, None) if return_bbox else None
# ─── Multi-class validation ─────────────────────────────────
if node.get("multiClass", False) and node.get("expectedClasses"):
expected_classes = node["expectedClasses"]
detected_classes = list(regions_dict.keys())
# ─── Detection stage - Using structured detection function ──────────────────
all_detections, regions_dict = run_detection_with_tracking(frame, node, context)
if not all_detections:
logger.warning("No detections from structured detection function - returning null")
return (None, None) if return_bbox else None
# Extract bounding boxes for compatibility
all_boxes = [det["bbox"] for det in all_detections]
# ─── Stability validation (only for root pipeline node) ────────────────────────
stability_threshold = node.get("stabilityThreshold", 1)
if stability_threshold > 1:
# Extract camera_id for stability check
camera_id = context.get("camera_id", "unknown") if context else "unknown"
model_id = node.get("modelId", "unknown")
logger.info(f"Multi-class validation: expected={expected_classes}, detected={detected_classes}")
# Check if we have stable tracks for this specific camera
has_stable_tracks, stable_detections = check_stable_tracks(camera_id, model_id, regions_dict)
# Check if at least one expected class is detected (flexible mode)
matching_classes = [cls for cls in expected_classes if cls in detected_classes]
missing_classes = [cls for cls in expected_classes if cls not in detected_classes]
logger.debug(f"Matching classes: {matching_classes}, Missing classes: {missing_classes}")
if not matching_classes:
# No expected classes found at all
logger.warning(f"PIPELINE REJECTED: No expected classes detected. Expected: {expected_classes}, Detected: {detected_classes}")
return (None, None) if return_bbox else None
if missing_classes:
logger.info(f"Partial multi-class detection: {matching_classes} found, {missing_classes} missing")
if not has_stable_tracks:
logger.info(f"Camera {camera_id}: Track not stable yet (threshold: {stability_threshold}) - validation only, skipping branches")
# Return early with just the detection result, no branch processing
primary_detection = max(all_detections, key=lambda x: x["confidence"]) if all_detections else {"class": "none", "confidence": 0.0, "bbox": [0, 0, 0, 0]}
primary_bbox = primary_detection.get("bbox", [0, 0, 0, 0])
return (primary_detection, primary_bbox) if return_bbox else primary_detection
else:
logger.info(f"Complete multi-class detection success: {detected_classes}")
else:
logger.debug("No multi-class validation - proceeding with all detections")
logger.info(f"Camera {camera_id}: Stable tracks {[det[1] for det in stable_detections]} detected - proceeding with full pipeline")
# ─── Pre-validate pipeline execution ────────────────────────
pipeline_valid, missing_branches = validate_pipeline_execution(node, regions_dict)
@ -752,10 +1051,14 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None):
execute_actions(node, frame, detection_result, regions_dict)
# ─── Parallel branch processing ─────────────────────────────
# ─── Branch processing (no stability check here) ─────────────────────────────
if node["branches"]:
branch_results = {}
# Extract camera_id for logging
camera_id = detection_result.get("camera_id", context.get("camera_id", "unknown") if context else "unknown")
# Filter branches that should be triggered
active_branches = []
for br in node["branches"]:
@ -848,6 +1151,10 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None):
# ─── Execute Parallel Actions ───────────────────────────────
if node.get("parallelActions") and "branch_results" in detection_result:
execute_parallel_actions(node, frame, detection_result, regions_dict)
# ─── Start 30s cooldown timer after successful pipeline completion ─────────────────
start_cooldown_timer(camera_id, model_id)
logger.info(f"Camera {camera_id}: Pipeline completed successfully, starting 30s cooldown")
# ─── Return detection result ────────────────────────────────
primary_detection = max(all_detections, key=lambda x: x["confidence"])