fix: model calling method
All checks were successful
Build Worker Base and Application Images / check-base-changes (push) Successful in 8s
Build Worker Base and Application Images / build-base (push) Has been skipped
Build Worker Base and Application Images / build-docker (push) Successful in 2m44s
Build Worker Base and Application Images / deploy-stack (push) Successful in 9s

This commit is contained in:
ziesorx 2025-09-25 15:06:41 +07:00
parent 5bb68b6e10
commit 2e5316ca01
3 changed files with 82 additions and 33 deletions

View file

@ -133,32 +133,43 @@ class DetectionPipeline:
async def _initialize_detection_model(self) -> bool:
"""
Load and initialize the main detection model.
Load and initialize the main detection model from pipeline.json configuration.
Returns:
True if successful, False otherwise
"""
try:
if not self.pipeline_config:
logger.warning("No pipeline configuration found")
logger.error("No pipeline configuration found - cannot initialize detection model")
return False
model_file = getattr(self.pipeline_config, 'model_file', None)
model_id = getattr(self.pipeline_config, 'model_id', None)
min_confidence = getattr(self.pipeline_config, 'min_confidence', 0.6)
trigger_classes = getattr(self.pipeline_config, 'trigger_classes', [])
crop = getattr(self.pipeline_config, 'crop', False)
if not model_file:
logger.warning("No detection model file specified")
logger.error("No detection model file specified in pipeline configuration")
return False
# Load detection model
logger.info(f"Loading detection model: {model_id} ({model_file})")
# Log complete pipeline configuration for main detection model
logger.info(f"[MAIN MODEL CONFIG] Initializing from pipeline.json:")
logger.info(f"[MAIN MODEL CONFIG] modelId: {model_id}")
logger.info(f"[MAIN MODEL CONFIG] modelFile: {model_file}")
logger.info(f"[MAIN MODEL CONFIG] minConfidence: {min_confidence}")
logger.info(f"[MAIN MODEL CONFIG] triggerClasses: {trigger_classes}")
logger.info(f"[MAIN MODEL CONFIG] crop: {crop}")
# Load detection model using model manager
logger.info(f"[MAIN MODEL LOADING] Loading {model_file} from model directory {self.model_id}")
self.detection_model = self.model_manager.get_yolo_model(self.model_id, model_file)
if not self.detection_model:
logger.error(f"Failed to load detection model {model_file} from model {self.model_id}")
logger.error(f"[MAIN MODEL ERROR] Failed to load detection model {model_file} from model {self.model_id}")
return False
self.detection_model_id = model_id
logger.info(f"Detection model {model_id} loaded successfully")
logger.info(f"[MAIN MODEL SUCCESS] Detection model {model_id} ({model_file}) loaded successfully")
return True
except Exception as e:
@ -462,10 +473,13 @@ class DetectionPipeline:
'timestamp_ms': int(time.time() * 1000)
}
# Run inference on single snapshot using .predict() method
detection_results = self.detection_model.model.predict(
# Run inference using direct model call (like ML engineer's approach)
# Use minConfidence from pipeline.json configuration
model_confidence = getattr(self.pipeline_config, 'min_confidence', 0.6)
logger.info(f"[DETECTION PHASE] Running {self.pipeline_config.model_id} with conf={model_confidence} (from pipeline.json)")
detection_results = self.detection_model.model(
frame,
conf=getattr(self.pipeline_config, 'min_confidence', 0.6),
conf=model_confidence,
verbose=False
)
@ -477,7 +491,7 @@ class DetectionPipeline:
result_obj = detection_results[0]
trigger_classes = getattr(self.pipeline_config, 'trigger_classes', [])
# Handle .predict() results which have .boxes for detection models
# Handle direct model call results which have .boxes for detection models
if hasattr(result_obj, 'boxes') and result_obj.boxes is not None:
logger.info(f"[DETECTION PHASE] Found {len(result_obj.boxes)} raw detections from {getattr(self.pipeline_config, 'model_id', 'unknown')}")
@ -586,10 +600,13 @@ class DetectionPipeline:
# If no detected_regions provided, re-run detection to get them
if not detected_regions:
# Use .predict() method for detection
detection_results = self.detection_model.model.predict(
# Use direct model call for detection (like ML engineer's approach)
# Use minConfidence from pipeline.json configuration
model_confidence = getattr(self.pipeline_config, 'min_confidence', 0.6)
logger.info(f"[PROCESSING PHASE] Re-running {self.pipeline_config.model_id} with conf={model_confidence} (from pipeline.json)")
detection_results = self.detection_model.model(
frame,
conf=getattr(self.pipeline_config, 'min_confidence', 0.6),
conf=model_confidence,
verbose=False
)
@ -742,10 +759,13 @@ class DetectionPipeline:
}
# Run inference on single snapshot using .predict() method
detection_results = self.detection_model.model.predict(
# Run inference using direct model call (like ML engineer's approach)
# Use minConfidence from pipeline.json configuration
model_confidence = getattr(self.pipeline_config, 'min_confidence', 0.6)
logger.info(f"[PIPELINE EXECUTE] Running {self.pipeline_config.model_id} with conf={model_confidence} (from pipeline.json)")
detection_results = self.detection_model.model(
frame,
conf=getattr(self.pipeline_config, 'min_confidence', 0.6),
conf=model_confidence,
verbose=False
)
@ -757,7 +777,7 @@ class DetectionPipeline:
result_obj = detection_results[0]
trigger_classes = getattr(self.pipeline_config, 'trigger_classes', [])
# Handle .predict() results which have .boxes for detection models
# Handle direct model call results which have .boxes for detection models
if hasattr(result_obj, 'boxes') and result_obj.boxes is not None:
logger.info(f"[PIPELINE RAW] Found {len(result_obj.boxes)} raw detections from {getattr(self.pipeline_config, 'model_id', 'unknown')}")