Compare commits
2 commits
0c6fc7a394
...
2f52374ff4
Author | SHA1 | Date | |
---|---|---|---|
|
2f52374ff4 | ||
|
c01f449d0d |
2 changed files with 100 additions and 49 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -15,3 +15,5 @@ feeder/
|
||||||
.venv/
|
.venv/
|
||||||
.vscode/
|
.vscode/
|
||||||
dist/
|
dist/
|
||||||
|
websocket_comm.log
|
||||||
|
temp_debug/
|
|
@ -61,12 +61,27 @@ def crop_region_by_class(frame, regions_dict, class_name):
|
||||||
|
|
||||||
bbox = regions_dict[class_name]['bbox']
|
bbox = regions_dict[class_name]['bbox']
|
||||||
x1, y1, x2, y2 = bbox
|
x1, y1, x2, y2 = bbox
|
||||||
|
|
||||||
|
# Diagnostic logging for crop issues
|
||||||
|
frame_h, frame_w = frame.shape[:2]
|
||||||
|
logger.debug(f"CROP DEBUG: Frame dimensions: {frame_w}x{frame_h}")
|
||||||
|
logger.debug(f"CROP DEBUG: Original bbox: {bbox}")
|
||||||
|
logger.debug(f"CROP DEBUG: Bbox dimensions: {x2-x1}x{y2-y1}")
|
||||||
|
|
||||||
|
# Check if bbox is within frame bounds
|
||||||
|
if x1 < 0 or y1 < 0 or x2 > frame_w or y2 > frame_h:
|
||||||
|
logger.warning(f"CROP DEBUG: Bbox extends beyond frame! Clipping...")
|
||||||
|
x1, y1 = max(0, x1), max(0, y1)
|
||||||
|
x2, y2 = min(frame_w, x2), min(frame_h, y2)
|
||||||
|
logger.debug(f"CROP DEBUG: Clipped bbox: ({x1}, {y1}, {x2}, {y2})")
|
||||||
|
|
||||||
cropped = frame[y1:y2, x1:x2]
|
cropped = frame[y1:y2, x1:x2]
|
||||||
|
|
||||||
if cropped.size == 0:
|
if cropped.size == 0:
|
||||||
logger.warning(f"Empty crop for class '{class_name}' with bbox {bbox}")
|
logger.warning(f"Empty crop for class '{class_name}' with bbox {bbox}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
logger.debug(f"CROP DEBUG: Successful crop shape: {cropped.shape}")
|
||||||
return cropped
|
return cropped
|
||||||
|
|
||||||
def format_action_context(base_context, additional_context=None):
|
def format_action_context(base_context, additional_context=None):
|
||||||
|
@ -113,6 +128,7 @@ def load_pipeline_node(node_config: dict, mpta_dir: str, redis_client, db_manage
|
||||||
"crop": node_config.get("crop", False),
|
"crop": node_config.get("crop", False),
|
||||||
"cropClass": node_config.get("cropClass"),
|
"cropClass": node_config.get("cropClass"),
|
||||||
"minConfidence": node_config.get("minConfidence", None),
|
"minConfidence": node_config.get("minConfidence", None),
|
||||||
|
"frontalMinConfidence": node_config.get("frontalMinConfidence", None),
|
||||||
"minBboxAreaRatio": node_config.get("minBboxAreaRatio", 0.0),
|
"minBboxAreaRatio": node_config.get("minBboxAreaRatio", 0.0),
|
||||||
"multiClass": node_config.get("multiClass", False),
|
"multiClass": node_config.get("multiClass", False),
|
||||||
"expectedClasses": node_config.get("expectedClasses", []),
|
"expectedClasses": node_config.get("expectedClasses", []),
|
||||||
|
@ -634,8 +650,7 @@ def run_detection_with_tracking(frame, node, context=None):
|
||||||
logger.info(f"Camera {camera_id}: 🔄 Reset YOLO tracker - new cars will get fresh track IDs")
|
logger.info(f"Camera {camera_id}: 🔄 Reset YOLO tracker - new cars will get fresh track IDs")
|
||||||
session_state["reset_tracker_on_resume"] = False # Clear the flag
|
session_state["reset_tracker_on_resume"] = False # Clear the flag
|
||||||
|
|
||||||
# Get tracking zone from runtime context (camera-specific)
|
# Tracking zones removed - process all detections
|
||||||
tracking_zone = context.get("trackingZone", []) if context else []
|
|
||||||
|
|
||||||
# Prepare class filtering
|
# Prepare class filtering
|
||||||
trigger_class_indices = node.get("triggerClassIndices")
|
trigger_class_indices = node.get("triggerClassIndices")
|
||||||
|
@ -643,19 +658,13 @@ def run_detection_with_tracking(frame, node, context=None):
|
||||||
|
|
||||||
logger.debug(f"Running detection for {node['modelId']} - tracking: {tracking_enabled}, stability_threshold: {stability_threshold}, classes: {node.get('triggerClasses', 'all')}")
|
logger.debug(f"Running detection for {node['modelId']} - tracking: {tracking_enabled}, stability_threshold: {stability_threshold}, classes: {node.get('triggerClasses', 'all')}")
|
||||||
|
|
||||||
if tracking_enabled and tracking_zone:
|
# Use predict for detection-only models (frontal detection), track for main detection models
|
||||||
# Use tracking with zone validation
|
model_id = node.get("modelId", "")
|
||||||
logger.debug(f"Using tracking with ReID config: {reid_config_path}")
|
use_tracking = tracking_enabled and not ("frontal" in model_id.lower() or "detection" in model_id.lower())
|
||||||
res = node["model"].track(
|
|
||||||
frame,
|
if use_tracking:
|
||||||
stream=False,
|
# Use tracking for main detection models (yolo11m, etc.)
|
||||||
persist=True,
|
logger.debug(f"Using tracking for {model_id}")
|
||||||
tracker=reid_config_path,
|
|
||||||
**class_filter
|
|
||||||
)[0]
|
|
||||||
elif tracking_enabled:
|
|
||||||
# Use tracking without zone restriction
|
|
||||||
logger.debug("Using tracking without zone restriction")
|
|
||||||
res = node["model"].track(
|
res = node["model"].track(
|
||||||
frame,
|
frame,
|
||||||
stream=False,
|
stream=False,
|
||||||
|
@ -663,8 +672,8 @@ def run_detection_with_tracking(frame, node, context=None):
|
||||||
**class_filter
|
**class_filter
|
||||||
)[0]
|
)[0]
|
||||||
else:
|
else:
|
||||||
# Use detection only (no tracking)
|
# Use detection only for frontal detection and other detection-only models
|
||||||
logger.debug("Using detection only (tracking disabled)")
|
logger.debug(f"Using prediction only for {model_id}")
|
||||||
res = node["model"].predict(
|
res = node["model"].predict(
|
||||||
frame,
|
frame,
|
||||||
stream=False,
|
stream=False,
|
||||||
|
@ -673,6 +682,12 @@ def run_detection_with_tracking(frame, node, context=None):
|
||||||
|
|
||||||
# Process detection results
|
# Process detection results
|
||||||
candidate_detections = []
|
candidate_detections = []
|
||||||
|
# Use frontalMinConfidence for frontal detection models, otherwise use minConfidence
|
||||||
|
model_id = node.get("modelId", "")
|
||||||
|
if "frontal" in model_id.lower() and "frontalMinConfidence" in node:
|
||||||
|
min_confidence = node.get("frontalMinConfidence", 0.0)
|
||||||
|
logger.debug(f"Using frontalMinConfidence={min_confidence} for {model_id}")
|
||||||
|
else:
|
||||||
min_confidence = node.get("minConfidence", 0.0)
|
min_confidence = node.get("minConfidence", 0.0)
|
||||||
|
|
||||||
if res.boxes is None or len(res.boxes) == 0:
|
if res.boxes is None or len(res.boxes) == 0:
|
||||||
|
@ -716,15 +731,7 @@ def run_detection_with_tracking(frame, node, context=None):
|
||||||
logger.debug(f"❌ Camera {camera_id}: Detection {i+1} REJECTED - confidence {conf:.3f} < {min_confidence}")
|
logger.debug(f"❌ Camera {camera_id}: Detection {i+1} REJECTED - confidence {conf:.3f} < {min_confidence}")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Apply tracking zone validation if enabled
|
# Tracking zone validation removed - process all detections
|
||||||
if tracking_enabled and tracking_zone:
|
|
||||||
bbox_center_x = (x1 + x2) // 2
|
|
||||||
bbox_center_y = (y1 + y2) // 2
|
|
||||||
|
|
||||||
# Check if detection center is within tracking zone
|
|
||||||
if not _point_in_polygon((bbox_center_x, bbox_center_y), tracking_zone):
|
|
||||||
logger.debug(f"❌ Camera {camera_id}: Detection {i+1} REJECTED - outside tracking zone")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Create detection object
|
# Create detection object
|
||||||
detection = {
|
detection = {
|
||||||
|
@ -757,6 +764,11 @@ def run_detection_with_tracking(frame, node, context=None):
|
||||||
for i, detection in enumerate(candidate_detections):
|
for i, detection in enumerate(candidate_detections):
|
||||||
logger.debug(f"🏆 Camera {camera_id}: Candidate {i+1}: {detection['class']} conf={detection['confidence']:.3f} track_id={detection['id']}")
|
logger.debug(f"🏆 Camera {camera_id}: Candidate {i+1}: {detection['class']} conf={detection['confidence']:.3f} track_id={detection['id']}")
|
||||||
|
|
||||||
|
# Show all candidate detections before selection
|
||||||
|
logger.debug(f"Found {len(candidate_detections)} candidate detections:")
|
||||||
|
for i, det in enumerate(candidate_detections):
|
||||||
|
logger.debug(f"Candidate {i+1}: {det['class']} conf={det['confidence']:.3f} bbox={det['bbox']}")
|
||||||
|
|
||||||
# Find the single highest confidence detection across all detected classes
|
# Find the single highest confidence detection across all detected classes
|
||||||
best_detection = max(candidate_detections, key=lambda x: x["confidence"])
|
best_detection = max(candidate_detections, key=lambda x: x["confidence"])
|
||||||
original_class = best_detection["class"]
|
original_class = best_detection["class"]
|
||||||
|
@ -808,6 +820,25 @@ def run_detection_with_tracking(frame, node, context=None):
|
||||||
logger.info(f"✅ Camera {camera_id}: DETECTION COMPLETE - tracking single car: track_id={track_id}, conf={best_detection['confidence']:.3f}")
|
logger.info(f"✅ Camera {camera_id}: DETECTION COMPLETE - tracking single car: track_id={track_id}, conf={best_detection['confidence']:.3f}")
|
||||||
logger.debug(f"📊 Camera {camera_id}: Detection summary: {len(res.boxes)} raw → {len(candidate_detections)} candidates → 1 selected")
|
logger.debug(f"📊 Camera {camera_id}: Detection summary: {len(res.boxes)} raw → {len(candidate_detections)} candidates → 1 selected")
|
||||||
|
|
||||||
|
# Debug: Save vehicle crop for debugging (disabled for production)
|
||||||
|
# if node.get("modelId") in ["yolo11n", "yolo11m"] and regions_dict:
|
||||||
|
# try:
|
||||||
|
# import datetime
|
||||||
|
# os.makedirs("temp_debug", exist_ok=True)
|
||||||
|
# timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
|
||||||
|
#
|
||||||
|
# for class_name, region_data in regions_dict.items():
|
||||||
|
# bbox = region_data['bbox']
|
||||||
|
# x1, y1, x2, y2 = bbox
|
||||||
|
# cropped = frame[y1:y2, x1:x2]
|
||||||
|
# if cropped.size > 0:
|
||||||
|
# model_name = node.get("modelId", "yolo")
|
||||||
|
# debug_path = f"temp_debug/{model_name}_{class_name}_crop_{timestamp}.jpg"
|
||||||
|
# cv2.imwrite(debug_path, cropped)
|
||||||
|
# logger.debug(f"Saved {model_name} {class_name} crop to {debug_path}")
|
||||||
|
# except Exception as e:
|
||||||
|
# logger.error(f"Failed to save {node.get('modelId', 'yolo')} crop: {e}")
|
||||||
|
|
||||||
# Update track-based stability tracking for the single selected car
|
# Update track-based stability tracking for the single selected car
|
||||||
camera_id = context.get("camera_id", "unknown") if context else "unknown"
|
camera_id = context.get("camera_id", "unknown") if context else "unknown"
|
||||||
model_id = node.get("modelId", "unknown")
|
model_id = node.get("modelId", "unknown")
|
||||||
|
@ -826,28 +857,6 @@ def run_detection_with_tracking(frame, node, context=None):
|
||||||
logger.debug(f"Detection error traceback: {traceback.format_exc()}")
|
logger.debug(f"Detection error traceback: {traceback.format_exc()}")
|
||||||
return [], {}, {"validation_complete": False, "stable_tracks": [], "current_tracks": []}
|
return [], {}, {"validation_complete": False, "stable_tracks": [], "current_tracks": []}
|
||||||
|
|
||||||
def _point_in_polygon(point, polygon):
|
|
||||||
"""Check if a point is inside a polygon using ray casting algorithm."""
|
|
||||||
if not polygon or len(polygon) < 3:
|
|
||||||
return True # No zone restriction if invalid polygon
|
|
||||||
|
|
||||||
x, y = point
|
|
||||||
n = len(polygon)
|
|
||||||
inside = False
|
|
||||||
|
|
||||||
p1x, p1y = polygon[0]
|
|
||||||
for i in range(1, n + 1):
|
|
||||||
p2x, p2y = polygon[i % n]
|
|
||||||
if y > min(p1y, p2y):
|
|
||||||
if y <= max(p1y, p2y):
|
|
||||||
if x <= max(p1x, p2x):
|
|
||||||
if p1y != p2y:
|
|
||||||
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
|
|
||||||
if p1x == p2x or x <= xinters:
|
|
||||||
inside = not inside
|
|
||||||
p1x, p1y = p2x, p2y
|
|
||||||
|
|
||||||
return inside
|
|
||||||
|
|
||||||
def get_camera_stability_data(camera_id, model_id):
|
def get_camera_stability_data(camera_id, model_id):
|
||||||
"""Get or create stability tracking data for a specific camera and model."""
|
"""Get or create stability tracking data for a specific camera and model."""
|
||||||
|
@ -1424,6 +1433,46 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None, valid
|
||||||
# Normal detection stage - Using structured detection function
|
# Normal detection stage - Using structured detection function
|
||||||
all_detections, regions_dict, track_validation_result = run_detection_with_tracking(frame, node, context)
|
all_detections, regions_dict, track_validation_result = run_detection_with_tracking(frame, node, context)
|
||||||
|
|
||||||
|
# Debug: Save crops for debugging (disabled for production)
|
||||||
|
# if regions_dict:
|
||||||
|
# try:
|
||||||
|
# import datetime
|
||||||
|
# os.makedirs("temp_debug", exist_ok=True)
|
||||||
|
# timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||||
|
# model_id = node.get("modelId", "unknown")
|
||||||
|
#
|
||||||
|
# # Save vehicle crop from yolo model (any vehicle: car, truck, bus, motorcycle)
|
||||||
|
# if model_id in ["yolo11n", "yolo11m"]:
|
||||||
|
# # Look for any vehicle class in regions_dict
|
||||||
|
# vehicle_classes = ["car", "truck", "bus", "motorcycle"]
|
||||||
|
# found_vehicle = None
|
||||||
|
# for vehicle_class in vehicle_classes:
|
||||||
|
# if vehicle_class in regions_dict:
|
||||||
|
# found_vehicle = vehicle_class
|
||||||
|
# break
|
||||||
|
#
|
||||||
|
# if found_vehicle:
|
||||||
|
# bbox = regions_dict[found_vehicle]['bbox']
|
||||||
|
# x1, y1, x2, y2 = bbox
|
||||||
|
# cropped = frame[y1:y2, x1:x2]
|
||||||
|
# if cropped.size > 0:
|
||||||
|
# debug_path = f"temp_debug/{found_vehicle}_crop_{timestamp}.jpg"
|
||||||
|
# cv2.imwrite(debug_path, cropped)
|
||||||
|
# logger.debug(f"Saved {found_vehicle} crop to {debug_path}")
|
||||||
|
#
|
||||||
|
# # Save frontal crop from frontal_detection_v1
|
||||||
|
# elif model_id == "frontal_detection_v1" and "frontal" in regions_dict:
|
||||||
|
# bbox = regions_dict["frontal"]['bbox']
|
||||||
|
# x1, y1, x2, y2 = bbox
|
||||||
|
# cropped = frame[y1:y2, x1:x2]
|
||||||
|
# if cropped.size > 0:
|
||||||
|
# debug_path = f"temp_debug/frontal_crop_{timestamp}.jpg"
|
||||||
|
# cv2.imwrite(debug_path, cropped)
|
||||||
|
# logger.debug(f"Saved frontal crop to {debug_path}")
|
||||||
|
#
|
||||||
|
# except Exception as e:
|
||||||
|
# logger.error(f"Failed to save crops: {e}")
|
||||||
|
|
||||||
if not all_detections:
|
if not all_detections:
|
||||||
logger.debug("No detections from structured detection function - sending 'none' detection")
|
logger.debug("No detections from structured detection function - sending 'none' detection")
|
||||||
none_detection = {
|
none_detection = {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue