fix: model calling method
All checks were successful
Build Worker Base and Application Images / check-base-changes (push) Successful in 8s
Build Worker Base and Application Images / build-base (push) Has been skipped
Build Worker Base and Application Images / build-docker (push) Successful in 2m44s
Build Worker Base and Application Images / deploy-stack (push) Successful in 9s
All checks were successful
Build Worker Base and Application Images / check-base-changes (push) Successful in 8s
Build Worker Base and Application Images / build-base (push) Has been skipped
Build Worker Base and Application Images / build-docker (push) Successful in 2m44s
Build Worker Base and Application Images / deploy-stack (push) Successful in 9s
This commit is contained in:
parent
5bb68b6e10
commit
2e5316ca01
3 changed files with 82 additions and 33 deletions
|
@ -438,11 +438,22 @@ class BranchProcessor:
|
||||||
f"({input_frame.shape[1]}x{input_frame.shape[0]}) with confidence={min_confidence}")
|
f"({input_frame.shape[1]}x{input_frame.shape[0]}) with confidence={min_confidence}")
|
||||||
|
|
||||||
|
|
||||||
# Use .predict() method for both detection and classification models
|
# Determine model type and use appropriate calling method (like ML engineer's approach)
|
||||||
inference_start = time.time()
|
inference_start = time.time()
|
||||||
detection_results = model.model.predict(input_frame, conf=min_confidence, verbose=False)
|
|
||||||
|
# Check if this is a classification model based on filename or model structure
|
||||||
|
is_classification = 'cls' in branch_id.lower() or 'classify' in branch_id.lower()
|
||||||
|
|
||||||
|
if is_classification:
|
||||||
|
# Use .predict() method for classification models (like ML engineer's classification_test.py)
|
||||||
|
detection_results = model.model.predict(source=input_frame, verbose=False)
|
||||||
|
logger.info(f"[INFERENCE DONE] {branch_id}: Classification completed in {time.time() - inference_start:.3f}s using .predict()")
|
||||||
|
else:
|
||||||
|
# Use direct model call for detection models (like ML engineer's detection_test.py)
|
||||||
|
detection_results = model.model(input_frame, conf=min_confidence, verbose=False)
|
||||||
|
logger.info(f"[INFERENCE DONE] {branch_id}: Detection completed in {time.time() - inference_start:.3f}s using direct call")
|
||||||
|
|
||||||
inference_time = time.time() - inference_start
|
inference_time = time.time() - inference_start
|
||||||
logger.info(f"[INFERENCE DONE] {branch_id}: Predict completed in {inference_time:.3f}s using .predict() method")
|
|
||||||
|
|
||||||
# Initialize branch_detections outside the conditional
|
# Initialize branch_detections outside the conditional
|
||||||
branch_detections = []
|
branch_detections = []
|
||||||
|
@ -648,17 +659,11 @@ class BranchProcessor:
|
||||||
# Format key with context
|
# Format key with context
|
||||||
key = action.params['key'].format(**context)
|
key = action.params['key'].format(**context)
|
||||||
|
|
||||||
# Convert image to bytes
|
# Get image format parameters
|
||||||
import cv2
|
import cv2
|
||||||
image_format = action.params.get('format', 'jpeg')
|
image_format = action.params.get('format', 'jpeg')
|
||||||
quality = action.params.get('quality', 90)
|
quality = action.params.get('quality', 90)
|
||||||
|
|
||||||
if image_format.lower() == 'jpeg':
|
|
||||||
encode_param = [cv2.IMWRITE_JPEG_QUALITY, quality]
|
|
||||||
_, image_bytes = cv2.imencode('.jpg', image_to_save, encode_param)
|
|
||||||
else:
|
|
||||||
_, image_bytes = cv2.imencode('.png', image_to_save)
|
|
||||||
|
|
||||||
# Save to Redis synchronously using a sync Redis client
|
# Save to Redis synchronously using a sync Redis client
|
||||||
try:
|
try:
|
||||||
import redis
|
import redis
|
||||||
|
|
|
@ -133,32 +133,43 @@ class DetectionPipeline:
|
||||||
|
|
||||||
async def _initialize_detection_model(self) -> bool:
|
async def _initialize_detection_model(self) -> bool:
|
||||||
"""
|
"""
|
||||||
Load and initialize the main detection model.
|
Load and initialize the main detection model from pipeline.json configuration.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if successful, False otherwise
|
True if successful, False otherwise
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
if not self.pipeline_config:
|
if not self.pipeline_config:
|
||||||
logger.warning("No pipeline configuration found")
|
logger.error("No pipeline configuration found - cannot initialize detection model")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
model_file = getattr(self.pipeline_config, 'model_file', None)
|
model_file = getattr(self.pipeline_config, 'model_file', None)
|
||||||
model_id = getattr(self.pipeline_config, 'model_id', None)
|
model_id = getattr(self.pipeline_config, 'model_id', None)
|
||||||
|
min_confidence = getattr(self.pipeline_config, 'min_confidence', 0.6)
|
||||||
|
trigger_classes = getattr(self.pipeline_config, 'trigger_classes', [])
|
||||||
|
crop = getattr(self.pipeline_config, 'crop', False)
|
||||||
|
|
||||||
if not model_file:
|
if not model_file:
|
||||||
logger.warning("No detection model file specified")
|
logger.error("No detection model file specified in pipeline configuration")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Load detection model
|
# Log complete pipeline configuration for main detection model
|
||||||
logger.info(f"Loading detection model: {model_id} ({model_file})")
|
logger.info(f"[MAIN MODEL CONFIG] Initializing from pipeline.json:")
|
||||||
|
logger.info(f"[MAIN MODEL CONFIG] modelId: {model_id}")
|
||||||
|
logger.info(f"[MAIN MODEL CONFIG] modelFile: {model_file}")
|
||||||
|
logger.info(f"[MAIN MODEL CONFIG] minConfidence: {min_confidence}")
|
||||||
|
logger.info(f"[MAIN MODEL CONFIG] triggerClasses: {trigger_classes}")
|
||||||
|
logger.info(f"[MAIN MODEL CONFIG] crop: {crop}")
|
||||||
|
|
||||||
|
# Load detection model using model manager
|
||||||
|
logger.info(f"[MAIN MODEL LOADING] Loading {model_file} from model directory {self.model_id}")
|
||||||
self.detection_model = self.model_manager.get_yolo_model(self.model_id, model_file)
|
self.detection_model = self.model_manager.get_yolo_model(self.model_id, model_file)
|
||||||
if not self.detection_model:
|
if not self.detection_model:
|
||||||
logger.error(f"Failed to load detection model {model_file} from model {self.model_id}")
|
logger.error(f"[MAIN MODEL ERROR] Failed to load detection model {model_file} from model {self.model_id}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
self.detection_model_id = model_id
|
self.detection_model_id = model_id
|
||||||
logger.info(f"Detection model {model_id} loaded successfully")
|
logger.info(f"[MAIN MODEL SUCCESS] Detection model {model_id} ({model_file}) loaded successfully")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -462,10 +473,13 @@ class DetectionPipeline:
|
||||||
'timestamp_ms': int(time.time() * 1000)
|
'timestamp_ms': int(time.time() * 1000)
|
||||||
}
|
}
|
||||||
|
|
||||||
# Run inference on single snapshot using .predict() method
|
# Run inference using direct model call (like ML engineer's approach)
|
||||||
detection_results = self.detection_model.model.predict(
|
# Use minConfidence from pipeline.json configuration
|
||||||
|
model_confidence = getattr(self.pipeline_config, 'min_confidence', 0.6)
|
||||||
|
logger.info(f"[DETECTION PHASE] Running {self.pipeline_config.model_id} with conf={model_confidence} (from pipeline.json)")
|
||||||
|
detection_results = self.detection_model.model(
|
||||||
frame,
|
frame,
|
||||||
conf=getattr(self.pipeline_config, 'min_confidence', 0.6),
|
conf=model_confidence,
|
||||||
verbose=False
|
verbose=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -477,7 +491,7 @@ class DetectionPipeline:
|
||||||
result_obj = detection_results[0]
|
result_obj = detection_results[0]
|
||||||
trigger_classes = getattr(self.pipeline_config, 'trigger_classes', [])
|
trigger_classes = getattr(self.pipeline_config, 'trigger_classes', [])
|
||||||
|
|
||||||
# Handle .predict() results which have .boxes for detection models
|
# Handle direct model call results which have .boxes for detection models
|
||||||
if hasattr(result_obj, 'boxes') and result_obj.boxes is not None:
|
if hasattr(result_obj, 'boxes') and result_obj.boxes is not None:
|
||||||
logger.info(f"[DETECTION PHASE] Found {len(result_obj.boxes)} raw detections from {getattr(self.pipeline_config, 'model_id', 'unknown')}")
|
logger.info(f"[DETECTION PHASE] Found {len(result_obj.boxes)} raw detections from {getattr(self.pipeline_config, 'model_id', 'unknown')}")
|
||||||
|
|
||||||
|
@ -586,10 +600,13 @@ class DetectionPipeline:
|
||||||
|
|
||||||
# If no detected_regions provided, re-run detection to get them
|
# If no detected_regions provided, re-run detection to get them
|
||||||
if not detected_regions:
|
if not detected_regions:
|
||||||
# Use .predict() method for detection
|
# Use direct model call for detection (like ML engineer's approach)
|
||||||
detection_results = self.detection_model.model.predict(
|
# Use minConfidence from pipeline.json configuration
|
||||||
|
model_confidence = getattr(self.pipeline_config, 'min_confidence', 0.6)
|
||||||
|
logger.info(f"[PROCESSING PHASE] Re-running {self.pipeline_config.model_id} with conf={model_confidence} (from pipeline.json)")
|
||||||
|
detection_results = self.detection_model.model(
|
||||||
frame,
|
frame,
|
||||||
conf=getattr(self.pipeline_config, 'min_confidence', 0.6),
|
conf=model_confidence,
|
||||||
verbose=False
|
verbose=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -742,10 +759,13 @@ class DetectionPipeline:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# Run inference on single snapshot using .predict() method
|
# Run inference using direct model call (like ML engineer's approach)
|
||||||
detection_results = self.detection_model.model.predict(
|
# Use minConfidence from pipeline.json configuration
|
||||||
|
model_confidence = getattr(self.pipeline_config, 'min_confidence', 0.6)
|
||||||
|
logger.info(f"[PIPELINE EXECUTE] Running {self.pipeline_config.model_id} with conf={model_confidence} (from pipeline.json)")
|
||||||
|
detection_results = self.detection_model.model(
|
||||||
frame,
|
frame,
|
||||||
conf=getattr(self.pipeline_config, 'min_confidence', 0.6),
|
conf=model_confidence,
|
||||||
verbose=False
|
verbose=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -757,7 +777,7 @@ class DetectionPipeline:
|
||||||
result_obj = detection_results[0]
|
result_obj = detection_results[0]
|
||||||
trigger_classes = getattr(self.pipeline_config, 'trigger_classes', [])
|
trigger_classes = getattr(self.pipeline_config, 'trigger_classes', [])
|
||||||
|
|
||||||
# Handle .predict() results which have .boxes for detection models
|
# Handle direct model call results which have .boxes for detection models
|
||||||
if hasattr(result_obj, 'boxes') and result_obj.boxes is not None:
|
if hasattr(result_obj, 'boxes') and result_obj.boxes is not None:
|
||||||
logger.info(f"[PIPELINE RAW] Found {len(result_obj.boxes)} raw detections from {getattr(self.pipeline_config, 'model_id', 'unknown')}")
|
logger.info(f"[PIPELINE RAW] Found {len(result_obj.boxes)} raw detections from {getattr(self.pipeline_config, 'model_id', 'unknown')}")
|
||||||
|
|
||||||
|
|
|
@ -81,8 +81,28 @@ class YOLOWrapper:
|
||||||
from ultralytics import YOLO
|
from ultralytics import YOLO
|
||||||
|
|
||||||
logger.info(f"Loading YOLO model from {self.model_path}")
|
logger.info(f"Loading YOLO model from {self.model_path}")
|
||||||
|
|
||||||
|
# Load model normally first
|
||||||
self.model = YOLO(str(self.model_path))
|
self.model = YOLO(str(self.model_path))
|
||||||
|
|
||||||
|
# Determine if this is a classification model based on filename or model structure
|
||||||
|
# Classification models typically have 'cls' in filename
|
||||||
|
is_classification = 'cls' in str(self.model_path).lower()
|
||||||
|
|
||||||
|
# For classification models, create a separate instance with task parameter
|
||||||
|
if is_classification:
|
||||||
|
try:
|
||||||
|
# Reload with classification task (like ML engineer's approach)
|
||||||
|
self.model = YOLO(str(self.model_path), task="classify")
|
||||||
|
logger.info(f"Loaded classification model {self.model_id} with task='classify'")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to load with task='classify', using default: {e}")
|
||||||
|
# Fall back to regular loading
|
||||||
|
self.model = YOLO(str(self.model_path))
|
||||||
|
logger.info(f"Loaded model {self.model_id} with default task")
|
||||||
|
else:
|
||||||
|
logger.info(f"Loaded detection model {self.model_id}")
|
||||||
|
|
||||||
# Move model to device
|
# Move model to device
|
||||||
if self.device == 'cuda' and torch.cuda.is_available():
|
if self.device == 'cuda' and torch.cuda.is_available():
|
||||||
self.model.to('cuda')
|
self.model.to('cuda')
|
||||||
|
@ -141,7 +161,7 @@ class YOLOWrapper:
|
||||||
import time
|
import time
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
# Run inference
|
# Run inference using direct model call (like ML engineer's approach)
|
||||||
results = self.model(
|
results = self.model(
|
||||||
image,
|
image,
|
||||||
conf=confidence_threshold,
|
conf=confidence_threshold,
|
||||||
|
@ -291,11 +311,11 @@ class YOLOWrapper:
|
||||||
raise RuntimeError(f"Model {self.model_id} not loaded")
|
raise RuntimeError(f"Model {self.model_id} not loaded")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Run inference
|
# Run inference using predict method for classification (like ML engineer's approach)
|
||||||
results = self.model(image, verbose=False)
|
results = self.model.predict(source=image, verbose=False)
|
||||||
|
|
||||||
# For classification models, extract probabilities
|
# For classification models, extract probabilities
|
||||||
if hasattr(results[0], 'probs'):
|
if results and len(results) > 0 and hasattr(results[0], 'probs') and results[0].probs is not None:
|
||||||
probs = results[0].probs
|
probs = results[0].probs
|
||||||
top_indices = probs.top5[:top_k]
|
top_indices = probs.top5[:top_k]
|
||||||
top_conf = probs.top5conf[:top_k].cpu().numpy()
|
top_conf = probs.top5conf[:top_k].cpu().numpy()
|
||||||
|
@ -307,7 +327,7 @@ class YOLOWrapper:
|
||||||
|
|
||||||
return predictions
|
return predictions
|
||||||
else:
|
else:
|
||||||
logger.warning(f"Model {self.model_id} does not support classification")
|
logger.warning(f"Model {self.model_id} does not support classification or no probs found")
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -350,6 +370,10 @@ class YOLOWrapper:
|
||||||
"""Get the number of classes the model can detect"""
|
"""Get the number of classes the model can detect"""
|
||||||
return len(self._class_names)
|
return len(self._class_names)
|
||||||
|
|
||||||
|
def is_classification_model(self) -> bool:
|
||||||
|
"""Check if this is a classification model"""
|
||||||
|
return 'cls' in str(self.model_path).lower() or 'classify' in str(self.model_path).lower()
|
||||||
|
|
||||||
def clear_cache(self) -> None:
|
def clear_cache(self) -> None:
|
||||||
"""Clear the model cache"""
|
"""Clear the model cache"""
|
||||||
with self._cache_lock:
|
with self._cache_lock:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue