fix: car detection use wrong image source
All checks were successful
Build Worker Base and Application Images / check-base-changes (push) Successful in 6s
Build Worker Base and Application Images / build-base (push) Has been skipped
Build Worker Base and Application Images / build-docker (push) Successful in 3m34s
Build Worker Base and Application Images / deploy-stack (push) Successful in 15s

This commit is contained in:
ziesorx 2025-10-20 16:54:27 +07:00
parent 5e59e00c55
commit a4cfb264b9
3 changed files with 98 additions and 34 deletions

View file

@ -393,7 +393,12 @@ class BranchProcessor:
trigger_classes = getattr(branch_config, 'trigger_classes', [])
logger.info(f"[DETECTED REGIONS] {branch_id}: Available parent detections: {list(detected_regions.keys())}")
for region_name, region_data in detected_regions.items():
logger.debug(f"[REGION DATA] {branch_id}: '{region_name}' -> bbox={region_data.get('bbox')}, conf={region_data.get('confidence')}")
# Handle both list (new) and single dict (backward compat)
if isinstance(region_data, list):
for i, region in enumerate(region_data):
logger.debug(f"[REGION DATA] {branch_id}: '{region_name}[{i}]' -> bbox={region.get('bbox')}, conf={region.get('confidence')}")
else:
logger.debug(f"[REGION DATA] {branch_id}: '{region_name}' -> bbox={region_data.get('bbox')}, conf={region_data.get('confidence')}")
if trigger_classes:
# Check if any parent detection matches our trigger classes (case-insensitive)
@ -454,18 +459,24 @@ class BranchProcessor:
for crop_class in crop_classes:
if crop_class in detected_regions:
region = detected_regions[crop_class]
confidence = region.get('confidence', 0.0)
regions = detected_regions[crop_class]
# Select largest bbox (no confidence filtering - parent already validated it)
bbox = region['bbox']
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) # width * height
# Handle both list (new) and single dict (backward compat)
if not isinstance(regions, list):
regions = [regions]
# Choose biggest bbox among available detections
if area > best_area:
best_region = region
best_class = crop_class
best_area = area
# Find largest bbox from all detections of this class
for region in regions:
confidence = region.get('confidence', 0.0)
bbox = region['bbox']
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) # width * height
# Choose biggest bbox among all available detections
if area > best_area:
best_region = region
best_class = crop_class
best_area = area
logger.debug(f"[CROP] Selected larger bbox for '{crop_class}': area={area:.0f}px², conf={confidence:.3f}")
if best_region:
bbox = best_region['bbox']
@ -483,7 +494,6 @@ class BranchProcessor:
logger.info(f"[INFERENCE START] {branch_id}: Running inference on {'cropped' if input_frame is not frame else 'full'} frame "
f"({input_frame.shape[1]}x{input_frame.shape[0]}) with confidence={min_confidence}")
# Use .predict() method for both detection and classification models
inference_start = time.time()
detection_results = model.model.predict(input_frame, conf=min_confidence, verbose=False)
@ -690,10 +700,26 @@ class BranchProcessor:
bbox = None
if region_name and region_name in detected_regions:
# Crop the specified region
bbox = detected_regions[region_name]['bbox']
# Handle both list (new) and single dict (backward compat)
regions = detected_regions[region_name]
if isinstance(regions, list):
# Multiple detections - select largest bbox
if regions:
best_region = max(regions, key=lambda r: (r['bbox'][2] - r['bbox'][0]) * (r['bbox'][3] - r['bbox'][1]))
bbox = best_region['bbox']
else:
bbox = regions['bbox']
elif region_name and region_name.lower() == 'frontal' and 'front_rear' in detected_regions:
# Special case: "frontal" region maps to "front_rear" detection
bbox = detected_regions['front_rear']['bbox']
# Handle both list (new) and single dict (backward compat)
regions = detected_regions['front_rear']
if isinstance(regions, list):
# Multiple detections - select largest bbox
if regions:
best_region = max(regions, key=lambda r: (r['bbox'][2] - r['bbox'][0]) * (r['bbox'][3] - r['bbox'][1]))
bbox = best_region['bbox']
else:
bbox = regions['bbox']
if bbox is not None:
x1, y1, x2, y2 = [int(coord) for coord in bbox]

View file

@ -495,11 +495,13 @@ class DetectionPipeline:
}
valid_detections.append(detection_info)
# Store region for processing phase
detected_regions[class_name] = {
# Store region for processing phase (support multiple detections per class)
if class_name not in detected_regions:
detected_regions[class_name] = []
detected_regions[class_name].append({
'bbox': bbox,
'confidence': confidence
}
})
else:
logger.warning("[DETECTION PHASE] No boxes found in detection results")
@ -951,14 +953,26 @@ class DetectionPipeline:
if region_name and region_name in detected_regions:
# Crop the specified region
bbox = detected_regions[region_name]['bbox']
x1, y1, x2, y2 = [int(coord) for coord in bbox]
cropped = frame[y1:y2, x1:x2]
if cropped.size > 0:
image_to_save = cropped
logger.debug(f"Cropped region '{region_name}' for redis_save_image")
# Handle both list (new) and single dict (backward compat)
regions = detected_regions[region_name]
if isinstance(regions, list):
# Multiple detections - select largest bbox
if regions:
best_region = max(regions, key=lambda r: (r['bbox'][2] - r['bbox'][0]) * (r['bbox'][3] - r['bbox'][1]))
bbox = best_region['bbox']
else:
bbox = None
else:
logger.warning(f"Empty crop for region '{region_name}', using full frame")
bbox = regions['bbox']
if bbox:
x1, y1, x2, y2 = [int(coord) for coord in bbox]
cropped = frame[y1:y2, x1:x2]
if cropped.size > 0:
image_to_save = cropped
logger.debug(f"Cropped region '{region_name}' for redis_save_image")
else:
logger.warning(f"Empty crop for region '{region_name}', using full frame")
# Format key with context
key = action.params['key'].format(**context)