diff --git a/.gitignore b/.gitignore index c990ddb..8014610 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,5 @@ no_frame_debug.log feeder/ .venv/ +.vscode/ +dist/ diff --git a/app.py b/app.py index 7cd0407..4c15324 100644 --- a/app.py +++ b/app.py @@ -17,6 +17,7 @@ import ssl import urllib3 import subprocess import tempfile +import redis from urllib.parse import urlparse from requests.adapters import HTTPAdapter from urllib3.util.ssl_ import create_urllib3_context @@ -27,7 +28,7 @@ from websockets.exceptions import ConnectionClosedError from ultralytics import YOLO # Import shared pipeline functions -from siwatsystem.pympta import load_pipeline_from_zip, run_pipeline +from siwatsystem.pympta import load_pipeline_from_zip, run_pipeline, cleanup_camera_stability app = FastAPI() @@ -43,6 +44,20 @@ camera_streams: Dict[str, Dict[str, Any]] = {} subscription_to_camera: Dict[str, str] = {} # Store latest frames for REST API access (separate from processing buffer) latest_frames: Dict[str, Any] = {} +# Store cached detection dict after successful pipeline completion +cached_detections: Dict[str, Dict[str, Any]] = {} +# Enhanced caching system for LPR integration +session_detections: Dict[str, Dict[str, Any]] = {} # session_id -> detection data +session_to_camera: Dict[str, str] = {} # session_id -> camera_id +detection_timestamps: Dict[str, float] = {} # session_id -> timestamp (for cleanup) +# Track frame skipping for pipeline buffer after detection +frame_skip_flags: Dict[str, bool] = {} +# Track camera connection states for immediate error handling +camera_states: Dict[str, Dict[str, Any]] = {} +# Track session ID states and pipeline modes per camera +session_pipeline_states: Dict[str, Dict[str, Any]] = {} +# Store full pipeline results for caching +cached_full_pipeline_results: Dict[str, Dict[str, Any]] = {} with open("config.json", "r") as f: config = json.load(f) @@ -69,11 +84,22 @@ logging.basicConfig( logger = logging.getLogger("detector_worker") logger.setLevel(logging.DEBUG) # Set app-specific logger to DEBUG level +# Create WebSocket communication logger +ws_logger = logging.getLogger("websocket_comm") +ws_logger.setLevel(logging.INFO) +ws_handler = logging.FileHandler("websocket_comm.log", encoding='utf-8') +ws_formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s") +ws_handler.setFormatter(ws_formatter) +ws_logger.addHandler(ws_handler) +ws_logger.propagate = False # Don't propagate to root logger + # Ensure all other libraries (including root) use at least INFO level logging.getLogger().setLevel(logging.INFO) logger.info("Starting detector worker application") logger.info(f"Configuration: Target FPS: {TARGET_FPS}, Max streams: {max_streams}, Max retries: {max_retries}") +ws_logger.info("WebSocket communication logging started - TX/RX format") +logger.info("WebSocket communication will be logged to websocket_comm.log") # Ensure the models directory exists os.makedirs("models", exist_ok=True) @@ -81,6 +107,335 @@ logger.info("Ensured models directory exists") # Constants for heartbeat and timeouts HEARTBEAT_INTERVAL = 2 # seconds + +# Global Redis connection for LPR integration +redis_client_global = None +lpr_listener_thread = None +cleanup_timer_thread = None +lpr_integration_started = False + +# Redis connection helper functions +def get_redis_config_from_model(camera_id: str) -> Dict[str, Any]: + """Extract Redis configuration from loaded model pipeline""" + try: + for model_id, model_tree in models.get(camera_id, {}).items(): + if hasattr(model_tree, 'get') and 'redis_client' in model_tree: + # Extract config from existing Redis client + client = model_tree['redis_client'] + if client: + return { + 'host': client.connection_pool.connection_kwargs['host'], + 'port': client.connection_pool.connection_kwargs['port'], + 'password': client.connection_pool.connection_kwargs.get('password'), + 'db': client.connection_pool.connection_kwargs.get('db', 0) + } + except Exception as e: + logger.debug(f"Could not extract Redis config from model: {e}") + + # Fallback - try to read from pipeline.json directly + try: + pipeline_dirs = [] + models_dir = "models" + if os.path.exists(models_dir): + for root, dirs, files in os.walk(models_dir): + if "pipeline.json" in files: + with open(os.path.join(root, "pipeline.json"), 'r') as f: + config = json.load(f) + if 'redis' in config: + return config['redis'] + except Exception as e: + logger.debug(f"Could not read Redis config from pipeline.json: {e}") + + return None + +def create_redis_connection() -> redis.Redis: + """Create Redis connection using config from pipeline""" + global redis_client_global + + if redis_client_global is not None: + try: + redis_client_global.ping() + return redis_client_global + except: + redis_client_global = None + + # Find any camera with a loaded model to get Redis config + redis_config = None + for camera_id in models.keys(): + redis_config = get_redis_config_from_model(camera_id) + if redis_config: + break + + if not redis_config: + logger.error("No Redis configuration found in any loaded models") + return None + + try: + redis_client_global = redis.Redis( + host=redis_config['host'], + port=redis_config['port'], + password=redis_config.get('password'), + db=redis_config.get('db', 0), + decode_responses=True, + socket_connect_timeout=5, + socket_timeout=5 + ) + redis_client_global.ping() + logger.info(f"βœ… Connected to Redis for LPR at {redis_config['host']}:{redis_config['port']}") + return redis_client_global + except Exception as e: + logger.error(f"❌ Failed to connect to Redis for LPR: {e}") + redis_client_global = None + return None + +# LPR Integration Functions +def process_license_result(lpr_data: Dict[str, Any]): + """Process incoming LPR result and update backend""" + try: + # Enhanced debugging for LPR data reception + logger.info("=" * 60) + logger.info("πŸš— LPR SERVICE DATA RECEIVED") + logger.info("=" * 60) + logger.info(f"πŸ“₯ Raw LPR data: {json.dumps(lpr_data, indent=2)}") + + session_id = str(lpr_data.get('session_id', '')) + license_text = lpr_data.get('license_character', '') + + logger.info(f"πŸ” Extracted session_id: '{session_id}'") + logger.info(f"πŸ” Extracted license_character: '{license_text}'") + logger.info(f"πŸ“Š Current cached sessions count: {len(session_detections)}") + logger.info(f"πŸ“Š Available session IDs: {list(session_detections.keys())}") + + # Find cached detection by session_id + if session_id not in session_detections: + logger.warning("❌ LPR SESSION ID NOT FOUND!") + logger.warning(f" Looking for session_id: '{session_id}'") + logger.warning(f" Available sessions: {list(session_detections.keys())}") + logger.warning(f" Session count: {len(session_detections)}") + + # Additional debugging - show session timestamps + if session_detections: + logger.warning("πŸ“… Available session details:") + for sid, timestamp in detection_timestamps.items(): + age = time.time() - timestamp + camera = session_to_camera.get(sid, 'unknown') + logger.warning(f" Session {sid}: camera={camera}, age={age:.1f}s") + else: + logger.warning(" No cached sessions available - worker may not have processed any detections yet") + + logger.warning("πŸ’‘ Possible causes:") + logger.warning(" 1. Session expired (TTL: 10 minutes)") + logger.warning(" 2. Session ID mismatch between detection and LPR service") + logger.warning(" 3. Detection was not cached (no sessionId from backend)") + logger.warning(" 4. Worker restarted after detection but before LPR result") + return + + # Get the original detection data + detection_data = session_detections[session_id].copy() + camera_id = session_to_camera.get(session_id, 'unknown') + + logger.info("βœ… LPR SESSION FOUND!") + logger.info(f" πŸ“Ή Camera ID: {camera_id}") + logger.info(f" ⏰ Session age: {time.time() - detection_timestamps.get(session_id, 0):.1f} seconds") + + # Show original detection structure before update + original_license = detection_data.get('data', {}).get('detection', {}).get('licensePlateText') + logger.info(f" πŸ” Original licensePlateText: {original_license}") + logger.info(f" πŸ†• New licensePlateText: '{license_text}'") + + # Update licensePlateText in detection + if 'data' in detection_data and 'detection' in detection_data['data']: + detection_data['data']['detection']['licensePlateText'] = license_text + + logger.info("🎯 LICENSE PLATE UPDATE SUCCESS!") + logger.info(f" βœ… Updated detection for session {session_id}") + logger.info(f" βœ… Set licensePlateText = '{license_text}'") + + # Show full detection structure after update + detection_dict = detection_data['data']['detection'] + logger.info("πŸ“‹ Updated detection dictionary:") + logger.info(f" carModel: {detection_dict.get('carModel')}") + logger.info(f" carBrand: {detection_dict.get('carBrand')}") + logger.info(f" bodyType: {detection_dict.get('bodyType')}") + logger.info(f" licensePlateText: {detection_dict.get('licensePlateText')} ← UPDATED") + logger.info(f" licensePlateConfidence: {detection_dict.get('licensePlateConfidence')}") + else: + logger.error("❌ INVALID DETECTION DATA STRUCTURE!") + logger.error(f" Session {session_id} has malformed detection data") + logger.error(f" Detection data keys: {list(detection_data.keys())}") + if 'data' in detection_data: + logger.error(f" Data keys: {list(detection_data['data'].keys())}") + return + + # Update timestamp to indicate this is an LPR update + detection_data['timestamp'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + + # Update all caches with new data + session_detections[session_id] = detection_data.copy() + cached_detections[camera_id] = detection_data.copy() + + # CRITICAL: Also update the pipeline state cached detection dict (used by lightweight mode) + if camera_id in session_pipeline_states: + pipeline_state = session_pipeline_states[camera_id] + current_cached_dict = pipeline_state.get("cached_detection_dict", {}) + + # Update the pipeline cached detection dict with new license plate + updated_dict = current_cached_dict.copy() if current_cached_dict else {} + updated_dict['licensePlateText'] = license_text + + pipeline_state["cached_detection_dict"] = updated_dict + logger.info(f"βœ… LPR: Updated pipeline state cached_detection_dict for camera {camera_id}") + logger.debug(f"πŸ” Pipeline cached dict now: {updated_dict}") + else: + logger.warning(f"⚠️ Camera {camera_id} not found in session_pipeline_states - pipeline cache not updated") + + logger.info("πŸ“‘ SENDING UPDATED DETECTION TO BACKEND") + logger.info(f" πŸ“Ή Camera ID: {camera_id}") + logger.info(f" πŸ“¨ Updated licensePlateText: '{license_text}'") + logger.info(" πŸ”„ Updated both cache systems:") + logger.info(f" 1️⃣ cached_detections[{camera_id}] βœ…") + logger.info(f" 2️⃣ session_pipeline_states[{camera_id}].cached_detection_dict βœ…") + + # Log the full message being sent + logger.info("πŸ“‹ Updated detection data in cache:") + logger.info(json.dumps(detection_data, indent=2)) + + logger.info("βœ… ALL CACHES UPDATED!") + logger.info(f" 🎯 Lightweight mode will now use updated licensePlateText") + logger.info(f" πŸ“€ Backend will receive: licensePlateText = '{license_text}'") + logger.info(" πŸ”„ Both cache systems synchronized with LPR data") + + logger.info("=" * 60) + logger.info("🏁 LPR PROCESSING COMPLETE") + logger.info(f" Session: {session_id}") + logger.info(f" License: '{license_text}'") + logger.info(f" Status: βœ… SUCCESS - DETECTION CACHE UPDATED") + logger.info("=" * 60) + + except Exception as e: + logger.error("=" * 60) + logger.error("❌ LPR PROCESSING FAILED") + logger.error("=" * 60) + logger.error(f"Error: {e}") + import traceback + logger.error(f"Traceback: {traceback.format_exc()}") + logger.error("=" * 60) + +# LPR integration now uses cached detection mechanism instead of direct WebSocket sending + +def license_results_listener(): + """Background thread to listen for LPR results from Redis""" + logger.info("🎧 Starting LPR listener thread...") + + while True: + try: + redis_client = create_redis_connection() + if not redis_client: + logger.error("❌ No Redis connection available for LPR listener") + time.sleep(10) + continue + + pubsub = redis_client.pubsub() + pubsub.subscribe("license_results") + logger.info("βœ… LPR listener subscribed to 'license_results' channel") + + for message in pubsub.listen(): + try: + if message['type'] == 'message': + logger.info("πŸ”” REDIS MESSAGE RECEIVED!") + logger.info(f" πŸ“‘ Channel: {message['channel']}") + logger.info(f" πŸ“₯ Raw data: {message['data']}") + logger.info(f" πŸ“ Data size: {len(str(message['data']))} bytes") + + try: + lpr_data = json.loads(message['data']) + logger.info("βœ… JSON parsing successful") + logger.info("🏁 Starting LPR processing...") + process_license_result(lpr_data) + logger.info("βœ… LPR processing completed") + except json.JSONDecodeError as e: + logger.error("❌ JSON PARSING FAILED!") + logger.error(f" Error: {e}") + logger.error(f" Raw data: {message['data']}") + logger.error(f" Data type: {type(message['data'])}") + except Exception as e: + logger.error("❌ LPR PROCESSING ERROR!") + logger.error(f" Error: {e}") + import traceback + logger.error(f" Traceback: {traceback.format_exc()}") + elif message['type'] == 'subscribe': + logger.info(f"πŸ“‘ LPR listener subscribed to channel: {message['channel']}") + logger.info("🎧 Ready to receive license plate results...") + elif message['type'] == 'unsubscribe': + logger.warning(f"πŸ“‘ LPR listener unsubscribed from channel: {message['channel']}") + else: + logger.debug(f"πŸ“‘ Redis message type: {message['type']}") + + except Exception as e: + logger.error(f"❌ Error in LPR message processing loop: {e}") + break + + except redis.exceptions.ConnectionError as e: + logger.error(f"❌ Redis connection lost in LPR listener: {e}") + time.sleep(5) # Wait before reconnecting + except Exception as e: + logger.error(f"❌ Unexpected error in LPR listener: {e}") + time.sleep(10) + + logger.warning("πŸ›‘ LPR listener thread stopped") + +def cleanup_expired_sessions(): + """Remove sessions older than TTL (10 minutes)""" + try: + current_time = time.time() + ttl_seconds = 600 # 10 minutes + + expired_sessions = [ + session_id for session_id, timestamp in detection_timestamps.items() + if current_time - timestamp > ttl_seconds + ] + + if expired_sessions: + logger.info(f"🧹 Cleaning up {len(expired_sessions)} expired sessions") + + for session_id in expired_sessions: + session_detections.pop(session_id, None) + camera_id = session_to_camera.pop(session_id, None) + detection_timestamps.pop(session_id, None) + logger.debug(f"Cleaned up expired session: {session_id} (camera: {camera_id})") + + else: + logger.debug(f"🧹 No expired sessions to clean up ({len(detection_timestamps)} active)") + + except Exception as e: + logger.error(f"❌ Error in session cleanup: {e}") + +def cleanup_timer(): + """Background thread for periodic session cleanup""" + logger.info("⏰ Starting session cleanup timer thread...") + + while True: + try: + time.sleep(120) # Run cleanup every 2 minutes + cleanup_expired_sessions() + except Exception as e: + logger.error(f"❌ Error in cleanup timer: {e}") + time.sleep(120) + +def start_lpr_integration(): + """Start LPR integration threads""" + global lpr_listener_thread, cleanup_timer_thread + + # Start LPR listener thread + lpr_listener_thread = threading.Thread(target=license_results_listener, daemon=True, name="LPR-Listener") + lpr_listener_thread.start() + logger.info("βœ… LPR listener thread started") + + # Start cleanup timer thread + cleanup_timer_thread = threading.Thread(target=cleanup_timer, daemon=True, name="Session-Cleanup") + cleanup_timer_thread.start() + logger.info("βœ… Session cleanup timer thread started") + WORKER_TIMEOUT_MS = 10000 logger.debug(f"Heartbeat interval set to {HEARTBEAT_INTERVAL} seconds") @@ -118,15 +473,34 @@ def download_mpta(url: str, dest_path: str) -> str: def fetch_snapshot(url: str): try: from requests.auth import HTTPBasicAuth, HTTPDigestAuth + import requests.adapters + import urllib3 # Parse URL to extract credentials parsed = urlparse(url) - # Prepare headers - some cameras require User-Agent + # Prepare headers - some cameras require User-Agent and specific headers headers = { - 'User-Agent': 'Mozilla/5.0 (compatible; DetectorWorker/1.0)' + 'User-Agent': 'Mozilla/5.0 (compatible; DetectorWorker/1.0)', + 'Accept': 'image/jpeg,image/*,*/*', + 'Connection': 'close', + 'Cache-Control': 'no-cache' } + # Create a session with custom adapter for better connection handling + session = requests.Session() + adapter = requests.adapters.HTTPAdapter( + pool_connections=1, + pool_maxsize=1, + max_retries=urllib3.util.retry.Retry( + total=2, + backoff_factor=0.1, + status_forcelist=[500, 502, 503, 504] + ) + ) + session.mount('http://', adapter) + session.mount('https://', adapter) + # Reconstruct URL without credentials clean_url = f"{parsed.scheme}://{parsed.hostname}" if parsed.port: @@ -136,44 +510,68 @@ def fetch_snapshot(url: str): clean_url += f"?{parsed.query}" auth = None + response = None + if parsed.username and parsed.password: # Try HTTP Digest authentication first (common for IP cameras) try: auth = HTTPDigestAuth(parsed.username, parsed.password) - response = requests.get(clean_url, auth=auth, headers=headers, timeout=10) + response = session.get(clean_url, auth=auth, headers=headers, timeout=(5, 15), stream=True) if response.status_code == 200: logger.debug(f"Successfully authenticated using HTTP Digest for {clean_url}") elif response.status_code == 401: # If Digest fails, try Basic auth logger.debug(f"HTTP Digest failed, trying Basic auth for {clean_url}") auth = HTTPBasicAuth(parsed.username, parsed.password) - response = requests.get(clean_url, auth=auth, headers=headers, timeout=10) + response = session.get(clean_url, auth=auth, headers=headers, timeout=(5, 15), stream=True) if response.status_code == 200: logger.debug(f"Successfully authenticated using HTTP Basic for {clean_url}") except Exception as auth_error: logger.debug(f"Authentication setup error: {auth_error}") # Fallback to original URL with embedded credentials - response = requests.get(url, headers=headers, timeout=10) + response = session.get(url, headers=headers, timeout=(5, 15), stream=True) else: # No credentials in URL, make request as-is - response = requests.get(url, headers=headers, timeout=10) + response = session.get(url, headers=headers, timeout=(5, 15), stream=True) - if response.status_code == 200: + if response and response.status_code == 200: + # Read content with size limit to prevent memory issues + content = b'' + max_size = 10 * 1024 * 1024 # 10MB limit + for chunk in response.iter_content(chunk_size=8192): + content += chunk + if len(content) > max_size: + logger.error(f"Snapshot too large (>{max_size} bytes) from {clean_url}") + return None + # Convert response content to numpy array - nparr = np.frombuffer(response.content, np.uint8) + nparr = np.frombuffer(content, np.uint8) # Decode image frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR) if frame is not None: - logger.debug(f"Successfully fetched snapshot from {clean_url}, shape: {frame.shape}") + logger.debug(f"Successfully fetched snapshot from {clean_url}, shape: {frame.shape}, size: {len(content)} bytes") return frame else: - logger.error(f"Failed to decode image from snapshot URL: {clean_url}") + logger.error(f"Failed to decode image from snapshot URL: {clean_url} (content size: {len(content)} bytes)") return None - else: + elif response: logger.error(f"Failed to fetch snapshot (status code {response.status_code}): {clean_url}") + # Log response headers and first part of content for debugging + logger.debug(f"Response headers: {dict(response.headers)}") + if len(response.content) < 1000: + logger.debug(f"Response content: {response.content[:500]}") return None + else: + logger.error(f"No response received from snapshot URL: {clean_url}") + return None + except requests.exceptions.Timeout as e: + logger.error(f"Timeout fetching snapshot from {url}: {str(e)}") + return None + except requests.exceptions.ConnectionError as e: + logger.error(f"Connection error fetching snapshot from {url}: {str(e)}") + return None except Exception as e: - logger.error(f"Exception fetching snapshot from {url}: {str(e)}") + logger.error(f"Exception fetching snapshot from {url}: {str(e)}", exc_info=True) return None # Helper to get crop coordinates from stream @@ -185,9 +583,137 @@ def get_crop_coords(stream): "cropY2": stream.get("cropY2") } +# Camera state management functions +def set_camera_connected(camera_id, connected=True, error_msg=None): + """Set camera connection state and track error information""" + current_time = time.time() + + if camera_id not in camera_states: + camera_states[camera_id] = { + "connected": True, + "last_error": None, + "last_error_time": None, + "consecutive_failures": 0, + "disconnection_notified": False + } + + state = camera_states[camera_id] + was_connected = state["connected"] + + if connected: + state["connected"] = True + state["consecutive_failures"] = 0 + state["disconnection_notified"] = False + if not was_connected: + logger.info(f"πŸ“Ά CAMERA RECONNECTED: {camera_id}") + else: + state["connected"] = False + state["last_error"] = error_msg + state["last_error_time"] = current_time + state["consecutive_failures"] += 1 + + # Distinguish between temporary and permanent disconnection + is_permanent = state["consecutive_failures"] >= 3 + + if was_connected and is_permanent: + logger.error(f"πŸ“΅ CAMERA DISCONNECTED: {camera_id} - {error_msg} (consecutive failures: {state['consecutive_failures']})") + logger.info(f"🚨 CAMERA ERROR DETECTED - Will send detection: null to reset backend session for {camera_id}") + +def is_camera_connected(camera_id): + """Check if camera is currently connected""" + return camera_states.get(camera_id, {}).get("connected", True) + +def should_notify_disconnection(camera_id): + """Check if we should notify backend about disconnection""" + state = camera_states.get(camera_id, {}) + is_disconnected = not state.get("connected", True) + not_yet_notified = not state.get("disconnection_notified", False) + has_enough_failures = state.get("consecutive_failures", 0) >= 3 + + return is_disconnected and not_yet_notified and has_enough_failures + +def mark_disconnection_notified(camera_id): + """Mark that we've notified backend about this disconnection""" + if camera_id in camera_states: + camera_states[camera_id]["disconnection_notified"] = True + logger.debug(f"Marked disconnection notification sent for camera {camera_id}") + +def get_or_init_session_pipeline_state(camera_id): + """Get or initialize session pipeline state for a camera""" + if camera_id not in session_pipeline_states: + session_pipeline_states[camera_id] = { + "mode": "validation_detecting", # "validation_detecting", "send_detections", "waiting_for_session_id", "full_pipeline", "lightweight", "car_gone_waiting" + "session_id_received": False, + "full_pipeline_completed": False, + "absence_counter": 0, + "validation_counter": 0, # Counter for validation phase + "validation_threshold": 4, # Default validation threshold + "max_absence_frames": 3, + "yolo_inference_enabled": True, # Controls whether to run YOLO inference + "cached_detection_dict": None, # Cached detection dict for lightweight mode + "stable_track_id": None, # The stable track ID we're monitoring + "validated_detection": None, # Stored detection result from validation phase for full_pipeline reuse + "progression_stage": None # Tracks current progression stage (welcome, car_wait_staff, car_fueling, car_waitpayment) + } + return session_pipeline_states[camera_id] + +def update_session_pipeline_mode(camera_id, new_mode, session_id=None): + """Update session pipeline mode and related state""" + state = get_or_init_session_pipeline_state(camera_id) + old_mode = state["mode"] + state["mode"] = new_mode + + # Reset counters based on mode transition + if new_mode == "validation_detecting": + # Transitioning to validation mode - reset both counters for fresh start + old_validation_counter = state.get("validation_counter", 0) + old_absence_counter = state.get("absence_counter", 0) + state["validation_counter"] = 0 + state["absence_counter"] = 0 + if old_validation_counter > 0 or old_absence_counter > 0: + logger.info(f"🧹 Camera {camera_id}: VALIDATION MODE RESET - validation_counter: {old_validation_counter}β†’0, absence_counter: {old_absence_counter}β†’0") + + if session_id: + state["session_id_received"] = True + state["absence_counter"] = 0 # Reset absence counter when session starts + + logger.info(f"πŸ“Š Camera {camera_id}: Pipeline mode changed from '{old_mode}' to '{new_mode}'") + return state + #################################################### # REST API endpoint for image retrieval #################################################### +@app.get("/lpr/debug") +async def get_lpr_debug_info(): + """Debug endpoint to inspect LPR integration state""" + try: + return { + "status": "success", + "lpr_integration_started": lpr_integration_started, + "redis_connected": redis_client_global is not None and redis_client_global.ping() if redis_client_global else False, + "active_sessions": len(session_detections), + "session_details": { + session_id: { + "camera_id": session_to_camera.get(session_id, "unknown"), + "timestamp": detection_timestamps.get(session_id, 0), + "age_seconds": time.time() - detection_timestamps.get(session_id, time.time()), + "has_license": session_detections[session_id].get('data', {}).get('detection', {}).get('licensePlateText') is not None + } + for session_id in session_detections.keys() + }, + "thread_status": { + "lpr_listener_alive": lpr_listener_thread.is_alive() if lpr_listener_thread else False, + "cleanup_timer_alive": cleanup_timer_thread.is_alive() if cleanup_timer_thread else False + }, + "cached_detections_by_camera": list(cached_detections.keys()) + } + except Exception as e: + return { + "status": "error", + "error": str(e), + "lpr_integration_started": lpr_integration_started + } + @app.get("/camera/{camera_id}/image") async def get_camera_image(camera_id: str): """ @@ -236,6 +762,47 @@ async def detect(websocket: WebSocket): async def handle_detection(camera_id, stream, frame, websocket, model_tree, persistent_data): try: + # Check camera connection state first - handle disconnection immediately + if should_notify_disconnection(camera_id): + logger.error(f"🚨 CAMERA DISCONNECTION DETECTED: {camera_id} - sending immediate detection: null") + + # Clear cached detections and occupancy state + cached_detections.pop(camera_id, None) + frame_skip_flags.pop(camera_id, None) + cached_full_pipeline_results.pop(camera_id, None) # Clear cached pipeline results + session_pipeline_states.pop(camera_id, None) # Reset session pipeline state + + # Reset pipeline state immediately + from siwatsystem.pympta import reset_tracking_state + model_id = stream.get("modelId", "unknown") + reset_tracking_state(camera_id, model_id, "camera disconnected") + + # Send immediate detection: null to backend + detection_data = { + "type": "imageDetection", + "subscriptionIdentifier": stream["subscriptionIdentifier"], + "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + "data": { + "detection": None, # null detection for disconnection + "modelId": stream["modelId"], + "modelName": stream["modelName"] + } + } + + try: + ws_logger.info(f"TX -> {json.dumps(detection_data, separators=(',', ':'))}") + await websocket.send_json(detection_data) + except RuntimeError as e: + if "websocket.close" in str(e): + logger.warning(f"WebSocket connection closed - cannot send disconnection signal for camera {camera_id}") + return persistent_data + else: + raise + mark_disconnection_notified(camera_id) + logger.info(f"πŸ“‘ SENT DISCONNECTION SIGNAL - detection: null for camera {camera_id}, backend should clear session") + + return persistent_data + # Apply crop if specified cropped_frame = frame if all(coord is not None for coord in [stream.get("cropX1"), stream.get("cropY1"), stream.get("cropX2"), stream.get("cropY2")]): @@ -250,81 +817,523 @@ async def detect(websocket: WebSocket): subscription_parts = stream["subscriptionIdentifier"].split(';') display_identifier = subscription_parts[0] if subscription_parts else None - # Create context for pipeline execution (session_id will be generated by pipeline) + # Get backend session ID if available + backend_session_id = session_ids.get(display_identifier) + + # Get or initialize session pipeline state + pipeline_state = get_or_init_session_pipeline_state(camera_id) + current_mode = pipeline_state["mode"] + + logger.debug(f"πŸ” SESSIONID LOOKUP: display='{display_identifier}', session_id={repr(backend_session_id)}, mode='{current_mode}'") + logger.debug(f"πŸ” Available session_ids: {session_ids}") + logger.debug(f"πŸ” VALIDATED_DETECTION TRACE: {pipeline_state.get('validated_detection')}") + + # ═══ SESSION ID-BASED PROCESSING MODE ═══ + if not backend_session_id: + # No session ID - handle different modes appropriately + if current_mode == "lightweight": + # Check if we're in car_waitpayment stage - if so, don't reset immediately + current_progression = pipeline_state.get("progression_stage") + if current_progression == "car_waitpayment": + # Stay in lightweight mode - let absence counter + sessionId null logic handle reset + logger.debug(f"πŸ” Camera {camera_id}: No session ID but in car_waitpayment - staying in lightweight mode for dual reset condition") + else: + # Not in car_waitpayment - reset immediately (situation 1) + update_session_pipeline_mode(camera_id, "validation_detecting") + current_mode = "validation_detecting" + logger.debug(f"πŸ” Camera {camera_id}: No session ID - reset to validation_detecting (not in car_waitpayment)") + elif current_mode not in ["validation_detecting", "send_detections", "waiting_for_session_id"]: + # Other modes - reset to validation_detecting + update_session_pipeline_mode(camera_id, "validation_detecting") + current_mode = "validation_detecting" + logger.debug(f"πŸ” Camera {camera_id}: No session ID - reset to validation_detecting from {current_mode}") + else: + logger.debug(f"πŸ” Camera {camera_id}: No session ID - staying in {current_mode} mode") + else: + # Session ID available - switch to full pipeline mode + if current_mode in ["send_detections", "waiting_for_session_id"]: + # Session ID just arrived - switch to full pipeline mode + update_session_pipeline_mode(camera_id, "full_pipeline", backend_session_id) + current_mode = "full_pipeline" + logger.info(f"πŸ”₯ Camera {camera_id}: Session ID received ({backend_session_id}) - switching to FULL PIPELINE mode") + + # Create context for pipeline execution pipeline_context = { "camera_id": camera_id, - "display_id": display_identifier + "display_id": display_identifier, + "backend_session_id": backend_session_id, + "current_mode": current_mode # Pass current mode to pipeline } - detection_result = run_pipeline(cropped_frame, model_tree, context=pipeline_context) - process_time = (time.time() - start_time) * 1000 - logger.debug(f"Detection for camera {camera_id} completed in {process_time:.2f}ms") + start_time = time.time() + detection_result = None + if current_mode == "validation_detecting": + # ═══ TRACK VALIDATION MODE ═══ + # Run tracking-based validation with track ID stability + logger.debug(f"πŸ” Camera {camera_id}: In validation_detecting mode - running track-based validation") + + # Get tracking configuration from model_tree + tracking_config = model_tree.get("tracking", {}) + tracking_enabled = tracking_config.get("enabled", True) + stability_threshold = tracking_config.get("stabilityThreshold", 4) + + # Default to "none" - only proceed after track validation + detection_result = {"class": "none", "confidence": 1.0, "bbox": [0, 0, 0, 0]} + + if tracking_enabled: + # Run full tracking detection to get track IDs + from siwatsystem.pympta import run_detection_with_tracking + all_detections, regions_dict, track_validation_result = run_detection_with_tracking(cropped_frame, model_tree, pipeline_context) + + if track_validation_result.get("validation_complete", False): + # Track validation completed - we have stable track IDs + stable_tracks = track_validation_result.get("stable_tracks", []) + logger.info(f"🎯 Camera {camera_id}: TRACK VALIDATION COMPLETED - stable tracks: {stable_tracks}") + + # Switch to send_detections mode + update_session_pipeline_mode(camera_id, "send_detections") + + # Send the best detection with stable track + if all_detections: + # Find detection with stable track ID + stable_detection = None + for detection in all_detections: + if detection.get("id") in stable_tracks: + stable_detection = detection + break + + if stable_detection: + detection_result = { + "class": stable_detection.get("class", "car"), + "confidence": stable_detection.get("confidence", 0.0), + "bbox": stable_detection.get("bbox", [0, 0, 0, 0]), + "track_id": stable_detection.get("id") + } + + # Store validated detection for full_pipeline mode to reuse + pipeline_state["validated_detection"] = detection_result.copy() + logger.debug(f"πŸ” Camera {camera_id}: VALIDATION DEBUG - storing detection_result = {detection_result}") + logger.debug(f"πŸ” Camera {camera_id}: VALIDATION DEBUG - pipeline_state after storing = {pipeline_state.get('validated_detection')}") + logger.info(f"πŸš— Camera {camera_id}: SENDING STABLE DETECTION - track ID {detection_result['track_id']}") + logger.info(f"πŸ’Ύ Camera {camera_id}: STORED VALIDATED DETECTION for full_pipeline reuse") + else: + logger.warning(f"⚠️ Camera {camera_id}: Stable tracks found but no matching detection") + else: + # Track validation still in progress + stable_tracks = track_validation_result.get("stable_tracks", []) + current_tracks = track_validation_result.get("current_tracks", []) + + if current_tracks: + track_id = current_tracks[0] if current_tracks else "None" + stable_status = "STABLE" if stable_tracks else "validating" + logger.info(f"πŸ” Camera {camera_id}: TRACK VALIDATION - car track_id {track_id} ({stable_status}, need {stability_threshold} consecutive frames)") + else: + logger.debug(f"πŸ‘» Camera {camera_id}: No car detected") + + logger.debug(f"πŸ“€ Camera {camera_id}: Sending 'none' (track validation in progress)") + else: + # Tracking disabled - fall back to basic detection validation + logger.debug(f"πŸ” Camera {camera_id}: Tracking disabled - using basic detection validation") + from siwatsystem.pympta import run_lightweight_detection + basic_detection = run_lightweight_detection(cropped_frame, model_tree) + + if basic_detection and basic_detection.get("car_detected"): + best_detection = basic_detection.get("best_detection") + + # Increment validation counter for basic detection + pipeline_state["validation_counter"] += 1 + current_count = pipeline_state["validation_counter"] + threshold = pipeline_state["validation_threshold"] + + if current_count >= threshold: + update_session_pipeline_mode(camera_id, "send_detections") + detection_result = { + "class": best_detection.get("class", "car"), + "confidence": best_detection.get("confidence", 0.0), + "bbox": best_detection.get("bbox", [0, 0, 0, 0]) + } + + # Store validated detection for full_pipeline mode to reuse + pipeline_state["validated_detection"] = detection_result.copy() + logger.debug(f"πŸ” Camera {camera_id}: BASIC VALIDATION DEBUG - storing detection_result = {detection_result}") + logger.info(f"πŸ’Ύ Camera {camera_id}: STORED BASIC VALIDATED DETECTION for full_pipeline reuse") + logger.info(f"🎯 Camera {camera_id}: BASIC VALIDATION COMPLETED after {current_count} frames") + else: + logger.info(f"πŸ“Š Camera {camera_id}: Basic validation progress {current_count}/{threshold}") + else: + # Reset validation counter + if pipeline_state["validation_counter"] > 0: + pipeline_state["validation_counter"] = 0 + logger.info(f"πŸ”„ Camera {camera_id}: Reset validation counter (no detection)") + + elif current_mode == "send_detections": + # ═══ SEND DETECTIONS MODE ═══ + # Validation completed - now send detection_dict for car detections, detection: null for no car + logger.debug(f"πŸ“€ Camera {camera_id}: In send_detections mode - sending detection_dict for cars") + from siwatsystem.pympta import run_lightweight_detection + basic_detection = run_lightweight_detection(cropped_frame, model_tree) + + if basic_detection and basic_detection.get("car_detected"): + # Car detected - send detection_dict + best_detection = basic_detection.get("best_detection") + detection_result = { + "class": best_detection.get("class", "car"), + "confidence": best_detection.get("confidence", 0.0), + "bbox": best_detection.get("bbox", [0, 0, 0, 0]) + } + logger.info(f"πŸš— Camera {camera_id}: SENDING DETECTION_DICT - {detection_result['class']} (conf={detection_result['confidence']:.3f}) - backend should generate session ID") + else: + # No car detected - send "none" + detection_result = {"class": "none", "confidence": 1.0, "bbox": [0, 0, 0, 0]} + logger.debug(f"πŸ‘» Camera {camera_id}: No car detected - sending 'none'") + + elif current_mode == "waiting_for_session_id": + # ═══ WAITING FOR SESSION ID MODE ═══ + # Stop processing snapshots, wait for session ID + logger.debug(f"⏳ Camera {camera_id}: In waiting_for_session_id mode - not processing snapshots") + return persistent_data # Don't process or send anything + + elif current_mode == "full_pipeline": + # ═══ FULL PIPELINE MODE ═══ + logger.info(f"πŸ”₯ Camera {camera_id}: Running FULL PIPELINE (classification branches + Redis + PostgreSQL)") + + # Use validated detection from validation phase instead of detecting again + validated_detection = pipeline_state.get("validated_detection") + logger.debug(f"πŸ” Camera {camera_id}: FULL_PIPELINE DEBUG - validated_detection = {validated_detection}") + logger.debug(f"πŸ” Camera {camera_id}: FULL_PIPELINE DEBUG - pipeline_state keys = {list(pipeline_state.keys())}") + if validated_detection: + logger.info(f"πŸ”„ Camera {camera_id}: Using validated detection for full pipeline: track_id={validated_detection.get('track_id')}") + detection_result = run_pipeline(cropped_frame, model_tree, context=pipeline_context, validated_detection=validated_detection) + # Clear the validated detection after using it + pipeline_state["validated_detection"] = None + else: + logger.warning(f"⚠️ Camera {camera_id}: No validated detection found for full pipeline - this shouldn't happen") + detection_result = run_pipeline(cropped_frame, model_tree, context=pipeline_context) + + if detection_result and isinstance(detection_result, dict): + # Cache the full pipeline result + cached_full_pipeline_results[camera_id] = { + "result": detection_result.copy(), + "timestamp": time.time() + } + + # Note: Will cache detection_dict after branch processing completes + + # Store the stable track ID for lightweight monitoring + track_id = detection_result.get("track_id") or detection_result.get("id") + if track_id is not None: + pipeline_state["stable_track_id"] = track_id + logger.info(f"πŸ’Ύ Camera {camera_id}: Cached stable track_id={track_id}") + else: + logger.warning(f"⚠️ Camera {camera_id}: No track_id found in detection_result: {detection_result.keys()}") + + # Ensure we have a cached detection dict for lightweight mode + if not pipeline_state.get("cached_detection_dict"): + # Create fallback cached detection dict if branch processing didn't populate it + fallback_detection = { + "carModel": None, + "carBrand": None, + "carYear": None, + "bodyType": None, + "licensePlateText": None, + "licensePlateConfidence": None + } + pipeline_state["cached_detection_dict"] = fallback_detection + logger.warning(f"⚠️ Camera {camera_id}: Created fallback cached detection dict (branch processing may have failed)") + + # Switch to lightweight mode + update_session_pipeline_mode(camera_id, "lightweight") + logger.info(f"βœ… Camera {camera_id}: Full pipeline completed - switching to LIGHTWEIGHT mode") + + elif current_mode == "lightweight": + # ═══ SIMPLIFIED LIGHTWEIGHT MODE ═══ + # Send cached detection dict + check for 2 consecutive empty frames to reset + + stable_track_id = pipeline_state.get("stable_track_id") + cached_detection_dict = pipeline_state.get("cached_detection_dict") + + logger.debug(f"πŸͺΆ Camera {camera_id}: LIGHTWEIGHT MODE - stable_track_id={stable_track_id}") + + if not pipeline_state.get("yolo_inference_enabled", True): + # YOLO inference disabled during car_fueling - continue sending cached detection dict + logger.debug(f"πŸ›‘ Camera {camera_id}: YOLO inference disabled during car_fueling - continue sending cached detection dict") + if cached_detection_dict: + detection_result = cached_detection_dict # Continue sending cached data + logger.info(f"β›½ Camera {camera_id}: YOLO disabled during car_fueling but sending cached detection dict") + else: + logger.warning(f"⚠️ Camera {camera_id}: YOLO disabled but no cached detection dict available") + detection_result = None + else: + # Run lightweight YOLO inference to check car presence for reset logic (no tracking validation needed) + from siwatsystem.pympta import run_lightweight_detection + basic_detection = run_lightweight_detection(cropped_frame, model_tree) + + any_car_detected = basic_detection and basic_detection.get("car_detected", False) + logger.debug(f"πŸ” Camera {camera_id}: LIGHTWEIGHT - simple car presence check: {any_car_detected}") + + if any_car_detected: + # Car detected - reset absence counter, continue sending cached detection dict + pipeline_state["absence_counter"] = 0 # Reset absence since cars are present + + if cached_detection_dict: + detection_result = cached_detection_dict # Always send cached data + logger.info(f"πŸ’Ύ Camera {camera_id}: LIGHTWEIGHT - car detected, sending cached detection dict") + else: + logger.warning(f"⚠️ Camera {camera_id}: LIGHTWEIGHT - car detected but no cached detection dict available") + detection_result = None + else: + # No car detected - increment absence counter + pipeline_state["absence_counter"] += 1 + absence_count = pipeline_state["absence_counter"] + max_absence = 3 # Need 3 consecutive empty frames + + logger.info(f"πŸ‘» Camera {camera_id}: LIGHTWEIGHT - no car detected (absence {absence_count}/{max_absence})") + + # Check if we should reset: Need BOTH 3 consecutive absence frames AND sessionId: null + current_progression = pipeline_state.get("progression_stage") + should_check_session_null = current_progression == "car_waitpayment" + + if absence_count >= max_absence: + if should_check_session_null: + # In car_waitpayment stage - require BOTH conditions + if backend_session_id is None: + # Both conditions met: 3 absence frames + sessionId: null + logger.info(f"πŸ”„ Camera {camera_id}: DUAL RESET CONDITIONS MET - {max_absence} consecutive absence frames + sessionId: null") + + # Clear all state and prepare for next car + cached_full_pipeline_results.pop(camera_id, None) + pipeline_state["cached_detection_dict"] = None + pipeline_state["stable_track_id"] = None + pipeline_state["validated_detection"] = None + pipeline_state["progression_stage"] = None + old_absence_counter = pipeline_state["absence_counter"] + old_validation_counter = pipeline_state.get("validation_counter", 0) + pipeline_state["absence_counter"] = 0 + pipeline_state["validation_counter"] = 0 + pipeline_state["yolo_inference_enabled"] = True + + logger.info(f"🧹 Camera {camera_id}: DUAL RESET - absence_counter: {old_absence_counter}β†’0, validation_counter: {old_validation_counter}β†’0, progression_stage: {current_progression}β†’None") + + # Clear stability tracking data for this camera + from siwatsystem.pympta import reset_camera_stability_tracking + reset_camera_stability_tracking(camera_id, model_tree.get("modelId", "unknown")) + + # Switch back to validation phase + update_session_pipeline_mode(camera_id, "validation_detecting") + logger.info(f"βœ… Camera {camera_id}: DUAL RESET TO VALIDATION COMPLETE - ready for new car") + + # Now in validation mode - send what YOLO detection finds (will be null since no car) + detection_result = {"class": "none", "confidence": 1.0, "bbox": [0, 0, 0, 0]} + else: + # Only absence frames met, but sessionId is not null - continue sending cached detection + logger.info(f"⏳ Camera {camera_id}: {max_absence} absence frames reached but sessionId={backend_session_id} (not null) - continuing with cached detection") + if cached_detection_dict: + detection_result = cached_detection_dict + else: + logger.warning(f"⚠️ Camera {camera_id}: No cached detection dict available") + detection_result = None + else: + # Not in car_waitpayment - use original simple reset condition (situation 1) + logger.info(f"πŸ”„ Camera {camera_id}: SIMPLE RESET CONDITION MET - {max_absence} consecutive empty frames (not in car_waitpayment)") + + # Clear all state and prepare for next car + cached_full_pipeline_results.pop(camera_id, None) + pipeline_state["cached_detection_dict"] = None + pipeline_state["stable_track_id"] = None + pipeline_state["validated_detection"] = None + pipeline_state["progression_stage"] = None + old_absence_counter = pipeline_state["absence_counter"] + old_validation_counter = pipeline_state.get("validation_counter", 0) + pipeline_state["absence_counter"] = 0 + pipeline_state["validation_counter"] = 0 + pipeline_state["yolo_inference_enabled"] = True + + logger.info(f"🧹 Camera {camera_id}: SIMPLE RESET - absence_counter: {old_absence_counter}β†’0, validation_counter: {old_validation_counter}β†’0") + + # Clear stability tracking data for this camera + from siwatsystem.pympta import reset_camera_stability_tracking + reset_camera_stability_tracking(camera_id, model_tree.get("modelId", "unknown")) + + # Switch back to validation phase + update_session_pipeline_mode(camera_id, "validation_detecting") + logger.info(f"βœ… Camera {camera_id}: SIMPLE RESET TO VALIDATION COMPLETE - ready for new car") + + # Now in validation mode - send what YOLO detection finds (will be null since no car) + detection_result = {"class": "none", "confidence": 1.0, "bbox": [0, 0, 0, 0]} + else: + # Still within absence threshold - continue sending cached detection dict + if cached_detection_dict: + detection_result = cached_detection_dict # Send cached data + logger.info(f"⏳ Camera {camera_id}: LIGHTWEIGHT - no car but absence<{max_absence}, still sending cached detection dict") + else: + logger.warning(f"⚠️ Camera {camera_id}: LIGHTWEIGHT - no cached detection dict available") + detection_result = None + + elif current_mode == "car_gone_waiting": + # ═══ CAR GONE WAITING STATE ═══ + # Car is gone (both conditions met), YOLO inference disabled, waiting for new session + + logger.debug(f"πŸ›‘ Camera {camera_id}: CAR GONE WAITING - YOLO inference stopped") + + # Check if backend has started a new session (indicates new car scenario) + if backend_session_id is not None: + # Backend started new session - re-enable YOLO and reset to validation + pipeline_state["yolo_inference_enabled"] = True + pipeline_state["absence_counter"] = 0 + pipeline_state["stable_track_id"] = None + pipeline_state["cached_detection_dict"] = None + pipeline_state["validated_detection"] = None + + # Clear stability tracking data for this camera + from siwatsystem.pympta import reset_camera_stability_tracking + reset_camera_stability_tracking(camera_id, model_tree.get("modelId", "unknown")) + + update_session_pipeline_mode(camera_id, "validation_detecting") + logger.info(f"πŸ”„ Camera {camera_id}: New session detected (id={backend_session_id}) - re-enabling YOLO inference") + logger.info(f"βœ… Camera {camera_id}: Reset to validation mode - cleared all tracking, ready for new car detection") + + # Don't run detection this frame - let next frame start fresh + detection_result = {"class": "none", "confidence": 1.0, "bbox": [0, 0, 0, 0]} + else: + # Still waiting - no sessionId, no detection to send + logger.debug(f"πŸ›‘ Camera {camera_id}: Car gone waiting - no YOLO inference, no data sent") + detection_result = None + + process_time = (time.time() - start_time) * 1000 + logger.debug(f"Detection for camera {camera_id} completed in {process_time:.2f}ms (mode: {current_mode})") + + # Skip processing if no detection result (blocked by session gating) + if detection_result is None: + logger.debug(f"No detection result to process for camera {camera_id}") + return persistent_data + # Log the raw detection result for debugging logger.debug(f"Raw detection result for camera {camera_id}:\n{json.dumps(detection_result, indent=2, default=str)}") - # Extract session_id from pipeline result (generated during database record creation) - session_id = None - if detection_result and isinstance(detection_result, dict): - # Check if pipeline generated a session_id (happens when Car+Frontal detected together) - if "session_id" in detection_result: - session_id = detection_result["session_id"] - logger.debug(f"Extracted session_id from pipeline result: {session_id}") + # Extract session_id from pipeline result (always use backend sessionId) + session_id = backend_session_id + logger.debug(f"Using backend session_id: {session_id}") - # Process detection result - run_pipeline returns the primary detection directly - if detection_result and isinstance(detection_result, dict) and "class" in detection_result: - highest_confidence_detection = detection_result + + # Process detection result based on current mode + if current_mode == "validation_detecting": + # ═══ VALIDATION DETECTING MODE ═══ + # Always send detection: null during validation phase + detection_dict = None + logger.debug(f"πŸ” SENDING 'NONE' - validation_detecting mode for camera {camera_id}") + + elif current_mode == "send_detections": + # ═══ SEND DETECTIONS MODE ═══ + if detection_result.get("class") == "none": + # No car detected - send detection: null + detection_dict = None + logger.debug(f"πŸ“€ SENDING 'NONE' - send_detections mode (no car) for camera {camera_id}") + else: + # Car detected in send_detections mode - ALWAYS send empty dict to trigger backend sessionId + # Purpose: Tell backend "car is here, please create sessionId" + detection_dict = {} + logger.info(f"πŸ“€ SENDING EMPTY DETECTION_DICT - send_detections mode, requesting backend to create sessionId (conf={detection_result.get('confidence', 0):.3f}) for camera {camera_id}") + + if backend_session_id: + logger.debug(f"πŸ”„ Camera {camera_id}: Note - sessionId {backend_session_id} exists but still in send_detections mode (transition pending)") + + elif current_mode == "lightweight": + # ═══ SIMPLIFIED LIGHTWEIGHT MODE DETECTION PROCESSING ═══ + if detection_result.get("class") == "none": + # No car detected - this happens when resetting to validation + detection_dict = None # Send detection: null + logger.info(f"🚫 LIGHTWEIGHT - no car detected, sending detection=null") + elif isinstance(detection_result, dict) and ("carBrand" in detection_result or "carModel" in detection_result): + # Check if we're waiting for dual reset condition + current_progression = pipeline_state.get("progression_stage") + if current_progression == "car_waitpayment" and backend_session_id is None: + # In car_waitpayment + sessionId: null - STOP sending cached detection to prevent new session creation + detection_dict = None + logger.info(f"πŸ›‘ LIGHTWEIGHT - in car_waitpayment with sessionId: null, NOT sending cached detection (waiting for dual reset)") + else: + # Normal lightweight mode - send cached detection dict + detection_dict = detection_result + logger.info(f"πŸ’Ύ LIGHTWEIGHT - sending cached detection dict") + else: + logger.warning(f"⚠️ LIGHTWEIGHT - unexpected detection_result type: {type(detection_result)}") + detection_dict = None + + elif detection_result.get("class") == "none": + # Other modes - send null to clear session + detection_dict = None + logger.info(f"πŸ“€ SENDING 'NONE' (detection: null) - Car absent, expecting backend to clear session for camera {camera_id}") + elif detection_result and "carBrand" in detection_result: + # Handle cached detection dict format (fallback for compatibility) + detection_dict = detection_result + logger.info(f"πŸ’Ύ Camera {camera_id}: LIGHTWEIGHT MODE - using detection_result as detection_dict:") + logger.info(f"πŸ’Ύ Camera {camera_id}: - detection_dict: {detection_dict}") else: - # No detection found - highest_confidence_detection = { - "class": "none", - "confidence": 1.0, - "bbox": [0, 0, 0, 0], - "branch_results": {} + # Valid detection - convert to backend format (will be populated by branch processing) + detection_dict = { + "carModel": None, + "carBrand": None, + "carYear": None, + "bodyType": None, + "licensePlateText": None, + "licensePlateConfidence": None } - # Convert detection format to match backend expectations exactly as in worker.md section 4.2 - detection_dict = { - "carModel": None, - "carBrand": None, - "carYear": None, - "bodyType": None, - "licensePlateText": None, - "licensePlateConfidence": None - } - - # Extract and process branch results from parallel classification - branch_results = highest_confidence_detection.get("branch_results", {}) - if branch_results: - logger.debug(f"Processing branch results: {branch_results}") - - # Transform branch results into backend-expected detection attributes - for branch_id, branch_data in branch_results.items(): - if isinstance(branch_data, dict): - logger.debug(f"Processing branch {branch_id}: {branch_data}") - - # Map common classification fields to backend-expected names - if "brand" in branch_data: - detection_dict["carBrand"] = branch_data["brand"] - if "body_type" in branch_data: - detection_dict["bodyType"] = branch_data["body_type"] - if "class" in branch_data: - class_name = branch_data["class"] + # Extract and process branch results from parallel classification (only for valid detections, skip cached mode) + if detection_result.get("class") != "none" and "branch_results" in detection_result and not detection_result.get("cached_mode", False): + def process_branch_results(branch_results, depth=0): + """Recursively process branch results including nested branches.""" + if not isinstance(branch_results, dict): + return + + indent = " " * depth + for branch_id, branch_data in branch_results.items(): + if isinstance(branch_data, dict): + logger.debug(f"{indent}Processing branch {branch_id}: {branch_data}") - # Map based on branch/model type - if "brand" in branch_id.lower(): - detection_dict["carBrand"] = class_name - elif "bodytype" in branch_id.lower() or "body" in branch_id.lower(): - detection_dict["bodyType"] = class_name - - logger.info(f"Detection payload after branch processing: {detection_dict}") - else: - logger.debug("No branch results found in detection result") + # Map common classification fields to backend-expected names + if "brand" in branch_data: + detection_dict["carBrand"] = branch_data["brand"] + logger.debug(f"{indent}Mapped carBrand: {branch_data['brand']}") + if "body_type" in branch_data: + detection_dict["bodyType"] = branch_data["body_type"] + logger.debug(f"{indent}Mapped bodyType: {branch_data['body_type']}") + if "class" in branch_data: + class_name = branch_data["class"] + + # Map based on branch/model type + if "brand" in branch_id.lower(): + detection_dict["carBrand"] = class_name + logger.debug(f"{indent}Mapped carBrand from class: {class_name}") + elif "bodytype" in branch_id.lower() or "body" in branch_id.lower(): + detection_dict["bodyType"] = class_name + logger.debug(f"{indent}Mapped bodyType from class: {class_name}") + + # Process nested branch results recursively + if "branch_results" in branch_data: + logger.debug(f"{indent}Processing nested branches in {branch_id}") + process_branch_results(branch_data["branch_results"], depth + 1) + + branch_results = detection_result.get("branch_results", {}) + if branch_results: + logger.debug(f"Processing branch results: {branch_results}") + process_branch_results(branch_results) + logger.info(f"Detection payload after branch processing: {detection_dict}") + + # Cache the detection_dict for lightweight mode (after branch processing completes) + if current_mode == "full_pipeline": + pipeline_state = get_or_init_session_pipeline_state(camera_id) + pipeline_state["cached_detection_dict"] = detection_dict.copy() + logger.info(f"πŸ’Ύ Camera {camera_id}: CACHED DETECTION DICT after branch processing: {detection_dict}") + + else: + logger.debug("No branch results found in detection result") detection_data = { "type": "imageDetection", "subscriptionIdentifier": stream["subscriptionIdentifier"], - "timestamp": time.strftime("%Y-%m-%dT%H:%M:%S.%fZ", time.gmtime()), + "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + # "timestamp": time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime()) + f".{int(time.time() * 1000) % 1000:03d}Z", "data": { "detection": detection_dict, "modelId": stream["modelId"], @@ -332,22 +1341,61 @@ async def detect(websocket: WebSocket): } } - # Add session ID if available (generated by pipeline when Car+Frontal detected) - if session_id is not None: - detection_data["sessionId"] = session_id - logger.debug(f"Added session_id to WebSocket response: {session_id}") + # SessionId should NEVER be sent from worker to backend - it's uni-directional (backend -> worker only) + # Backend manages sessionIds independently based on detection content + logger.debug(f"TX message prepared (no sessionId) - detection_dict type: {type(detection_dict)}") - if highest_confidence_detection.get("class") != "none": - confidence = highest_confidence_detection.get("confidence", 0.0) - logger.info(f"Camera {camera_id}: Detected {highest_confidence_detection['class']} with confidence {confidence:.2f} using model {stream['modelName']}") + # Log detection details for different modes + if current_mode == "lightweight": + if detection_result and detection_result.get("class") == "none": + logger.info(f"🚫 Camera {camera_id}: LIGHTWEIGHT - No car detected (resetting to validation)") + elif isinstance(detection_result, dict) and ("carBrand" in detection_result or "carModel" in detection_result): + logger.info(f"πŸ’Ύ Camera {camera_id}: LIGHTWEIGHT - Sending cached detection data") + else: + logger.info(f"πŸͺΆ Camera {camera_id}: LIGHTWEIGHT - Processing detection") + elif detection_result and "class" in detection_result and detection_result.get("class") != "none": + confidence = detection_result.get("confidence", 0.0) + logger.info(f"πŸš— Camera {camera_id}: Detected {detection_result['class']} with confidence {confidence:.2f} using model {stream['modelName']}") + + # Send detection data to backend (session gating handled above in processing logic) + logger.debug(f"πŸ“€ SENDING TO BACKEND for camera {camera_id}: {json.dumps(detection_data, indent=2)}") + try: + ws_logger.info(f"TX -> {json.dumps(detection_data, separators=(',', ':'))}") + await websocket.send_json(detection_data) + logger.debug(f"Sent detection data to client for camera {camera_id}") - # Log session ID if available - if session_id: - logger.debug(f"Detection associated with session ID: {session_id}") + # Cache the detection data for potential resubscriptions (only if not null detection) + if detection_dict is not None and detection_result.get("class") != "none": + cached_detections[camera_id] = detection_data.copy() + logger.debug(f"Cached detection for camera {camera_id}: {detection_dict}") + + # Enhanced caching: Store by session_id for LPR integration + session_id = detection_data.get('sessionId') + if session_id: + session_id_str = str(session_id) + session_detections[session_id_str] = detection_data.copy() + session_to_camera[session_id_str] = camera_id + detection_timestamps[session_id_str] = time.time() + logger.debug(f"πŸ”‘ Cached detection for LPR by session_id {session_id_str}: {camera_id}") + else: + # Don't cache null/none detections - let them reset properly + cached_detections.pop(camera_id, None) + logger.debug(f"Not caching null/none detection for camera {camera_id}") + + except RuntimeError as e: + if "websocket.close" in str(e): + logger.warning(f"WebSocket connection closed - cannot send detection data for camera {camera_id}") + return persistent_data + else: + raise - await websocket.send_json(detection_data) - logger.debug(f"Sent detection data to client for camera {camera_id}") - logger.debug(f"Sent this detection data: {detection_data}") + # Log status after sending (no sessionId sent to backend) + if detection_dict is None: + logger.info(f"πŸ“‘ SENT 'none' detection - backend should clear session for camera {camera_id}") + elif detection_dict == {}: + logger.info(f"πŸ“‘ SENT empty detection - backend should create sessionId for camera {camera_id}") + else: + logger.info(f"πŸ“‘ SENT detection data - backend manages sessionId independently for camera {camera_id}") return persistent_data except Exception as e: logger.error(f"Error in handle_detection for camera {camera_id}: {str(e)}", exc_info=True) @@ -366,8 +1414,10 @@ async def detect(websocket: WebSocket): height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) logger.info(f"Camera {camera_id} opened successfully with resolution {width}x{height}, FPS: {fps}") + set_camera_connected(camera_id, True) else: logger.error(f"Camera {camera_id} failed to open initially") + set_camera_connected(camera_id, False, "Failed to open camera initially") while not stop_event.is_set(): try: @@ -382,20 +1432,25 @@ async def detect(websocket: WebSocket): ret, frame = cap.read() if not ret: - logger.warning(f"Connection lost for camera: {camera_id}, retry {retries+1}/{max_retries}") + error_msg = f"Connection lost for camera: {camera_id}, retry {retries+1}/{max_retries}" + logger.warning(error_msg) + set_camera_connected(camera_id, False, error_msg) cap.release() time.sleep(reconnect_interval) retries += 1 if retries > max_retries and max_retries != -1: logger.error(f"Max retries reached for camera: {camera_id}, stopping frame reader") + set_camera_connected(camera_id, False, "Max retries reached") break # Re-open logger.info(f"Attempting to reopen RTSP stream for camera: {camera_id}") cap = cv2.VideoCapture(streams[camera_id]["rtsp_url"]) if not cap.isOpened(): logger.error(f"Failed to reopen RTSP stream for camera: {camera_id}") + set_camera_connected(camera_id, False, "Failed to reopen RTSP stream") continue logger.info(f"Successfully reopened RTSP stream for camera: {camera_id}") + set_camera_connected(camera_id, True) continue # Successfully read a frame @@ -409,6 +1464,7 @@ async def detect(websocket: WebSocket): logger.debug(f"Successfully read frame from camera {camera_id}, shape: {frame.shape}") retries = 0 + set_camera_connected(camera_id, True) # Mark as connected on successful frame read # Overwrite old frame if buffer is full if not buffer.empty(): @@ -424,21 +1480,28 @@ async def detect(websocket: WebSocket): time.sleep(0.01) except cv2.error as e: - logger.error(f"OpenCV error for camera {camera_id}: {e}", exc_info=True) + error_msg = f"OpenCV error for camera {camera_id}: {e}" + logger.error(error_msg, exc_info=True) + set_camera_connected(camera_id, False, error_msg) cap.release() time.sleep(reconnect_interval) retries += 1 if retries > max_retries and max_retries != -1: logger.error(f"Max retries reached after OpenCV error for camera {camera_id}") + set_camera_connected(camera_id, False, "Max retries reached after OpenCV error") break logger.info(f"Attempting to reopen RTSP stream after OpenCV error for camera: {camera_id}") cap = cv2.VideoCapture(streams[camera_id]["rtsp_url"]) if not cap.isOpened(): logger.error(f"Failed to reopen RTSP stream for camera {camera_id} after OpenCV error") + set_camera_connected(camera_id, False, "Failed to reopen after OpenCV error") continue logger.info(f"Successfully reopened RTSP stream after OpenCV error for camera: {camera_id}") + set_camera_connected(camera_id, True) except Exception as e: - logger.error(f"Unexpected error for camera {camera_id}: {str(e)}", exc_info=True) + error_msg = f"Unexpected error for camera {camera_id}: {str(e)}" + logger.error(error_msg, exc_info=True) + set_camera_connected(camera_id, False, error_msg) cap.release() break except Exception as e: @@ -451,10 +1514,14 @@ async def detect(websocket: WebSocket): def snapshot_reader(camera_id, snapshot_url, snapshot_interval, buffer, stop_event): """Frame reader that fetches snapshots from HTTP/HTTPS URL at specified intervals""" retries = 0 + consecutive_failures = 0 # Track consecutive failures for backoff logger.info(f"Starting snapshot reader thread for camera {camera_id} from {snapshot_url}") frame_count = 0 last_log_time = time.time() + # Initialize camera state + set_camera_connected(camera_id, True) + try: interval_seconds = snapshot_interval / 1000.0 # Convert milliseconds to seconds logger.info(f"Snapshot interval for camera {camera_id}: {interval_seconds}s") @@ -465,15 +1532,34 @@ async def detect(websocket: WebSocket): frame = fetch_snapshot(snapshot_url) if frame is None: - logger.warning(f"Failed to fetch snapshot for camera: {camera_id}, retry {retries+1}/{max_retries}") + consecutive_failures += 1 + error_msg = f"Failed to fetch snapshot for camera: {camera_id}, consecutive failures: {consecutive_failures}" + logger.warning(error_msg) + set_camera_connected(camera_id, False, error_msg) retries += 1 + + # Check network connectivity with a simple ping-like test + if consecutive_failures % 5 == 1: # Every 5th failure, test connectivity + try: + test_response = requests.get(snapshot_url, timeout=(2, 5), stream=False) + logger.info(f"Camera {camera_id}: Connectivity test result: {test_response.status_code}") + except Exception as test_error: + logger.warning(f"Camera {camera_id}: Connectivity test failed: {test_error}") + if retries > max_retries and max_retries != -1: logger.error(f"Max retries reached for snapshot camera: {camera_id}, stopping reader") + set_camera_connected(camera_id, False, "Max retries reached for snapshot camera") break - time.sleep(min(interval_seconds, reconnect_interval)) + + # Exponential backoff based on consecutive failures + backoff_delay = min(30, max(1, min(2 ** min(consecutive_failures - 1, 6), interval_seconds * 2))) # Start with 1s, max 30s + logger.debug(f"Camera {camera_id}: Backing off for {backoff_delay:.1f}s (consecutive failures: {consecutive_failures})") + if stop_event.wait(backoff_delay): # Use wait with timeout instead of sleep + break # Exit if stop_event is set during backoff continue - # Successfully fetched a frame + # Successfully fetched a frame - reset consecutive failures + consecutive_failures = 0 # Reset backoff on success frame_count += 1 current_time = time.time() # Log frame stats every 5 seconds @@ -484,6 +1570,7 @@ async def detect(websocket: WebSocket): logger.debug(f"Successfully fetched snapshot from camera {camera_id}, shape: {frame.shape}") retries = 0 + set_camera_connected(camera_id, True) # Mark as connected on successful snapshot # Overwrite old frame if buffer is full if not buffer.empty(): @@ -502,12 +1589,21 @@ async def detect(websocket: WebSocket): time.sleep(sleep_time) except Exception as e: - logger.error(f"Unexpected error fetching snapshot for camera {camera_id}: {str(e)}", exc_info=True) + consecutive_failures += 1 + error_msg = f"Unexpected error fetching snapshot for camera {camera_id}: {str(e)}" + logger.error(error_msg, exc_info=True) + set_camera_connected(camera_id, False, error_msg) retries += 1 if retries > max_retries and max_retries != -1: logger.error(f"Max retries reached after error for snapshot camera {camera_id}") + set_camera_connected(camera_id, False, "Max retries reached after error") break - time.sleep(min(interval_seconds, reconnect_interval)) + + # Exponential backoff for exceptions too + backoff_delay = min(30, max(1, min(2 ** min(consecutive_failures - 1, 6), interval_seconds * 2))) # Start with 1s, max 30s + logger.debug(f"Camera {camera_id}: Exception backoff for {backoff_delay:.1f}s (consecutive failures: {consecutive_failures})") + if stop_event.wait(backoff_delay): # Use wait with timeout instead of sleep + break # Exit if stop_event is set during backoff except Exception as e: logger.error(f"Error in snapshot_reader thread for camera {camera_id}: {str(e)}", exc_info=True) finally: @@ -543,16 +1639,46 @@ async def detect(websocket: WebSocket): # Check if parameters changed if has_subscription_changed(desired_sub, current_stream): logger.info(f"Parameters changed for {subscription_id}, resubscribing") - await unsubscribe_internal(subscription_id) - await subscribe_internal(desired_sub, websocket) + logger.debug(f"Parameter comparison for {subscription_id}:") + logger.debug(f" rtspUrl: '{desired_sub.get('rtspUrl')}' vs '{current_stream.get('rtsp_url')}'") + logger.debug(f" snapshotUrl: '{desired_sub.get('snapshotUrl')}' vs '{current_stream.get('snapshot_url')}'") + logger.debug(f" modelUrl: '{extract_model_file_identifier(desired_sub.get('modelUrl'))}' vs '{extract_model_file_identifier(current_stream.get('modelUrl'))}'") + logger.debug(f" modelId: {desired_sub.get('modelId')} vs {current_stream.get('modelId')}") + + # Preserve detection state for resubscription + cached_detection = cached_detections.get(subscription_id) + logger.debug(f"Preserving detection state for resubscription: {cached_detection is not None}") + + await unsubscribe_internal(subscription_id, preserve_detection=True) + await subscribe_internal(desired_sub, websocket, cached_detection=cached_detection) # Add new subscriptions for subscription_id in to_add: desired_sub = next(sub for sub in desired_subscriptions if sub["subscriptionIdentifier"] == subscription_id) await subscribe_internal(desired_sub, websocket) + def extract_model_file_identifier(model_url): + """Extract the core model file identifier from S3 URLs, ignoring timestamp parameters""" + if not model_url: + return None + + # For S3 URLs, extract just the path portion before query parameters + try: + from urllib.parse import urlparse + parsed = urlparse(model_url) + # Return the path which contains the actual model file identifier + # e.g. "/adsist-cms-staging/models/bangchak_poc-1756312318569.mpta" + return parsed.path + except Exception as e: + logger.warning(f"Failed to parse model URL {model_url}: {e}") + return model_url + def has_subscription_changed(desired_sub, current_stream): """Check if subscription parameters have changed""" + # Smart model URL comparison - ignore timestamp changes in signed URLs + desired_model_id = extract_model_file_identifier(desired_sub.get("modelUrl")) + current_model_id = extract_model_file_identifier(current_stream.get("modelUrl")) + return ( desired_sub.get("rtspUrl") != current_stream.get("rtsp_url") or desired_sub.get("snapshotUrl") != current_stream.get("snapshot_url") or @@ -562,10 +1688,11 @@ async def detect(websocket: WebSocket): desired_sub.get("cropX2") != current_stream.get("cropX2") or desired_sub.get("cropY2") != current_stream.get("cropY2") or desired_sub.get("modelId") != current_stream.get("modelId") or - desired_sub.get("modelName") != current_stream.get("modelName") + desired_sub.get("modelName") != current_stream.get("modelName") or + desired_model_id != current_model_id ) - async def subscribe_internal(subscription, websocket): + async def subscribe_internal(subscription, websocket, cached_detection=None): """Internal subscription logic extracted from original subscribe handler""" subscriptionIdentifier = subscription.get("subscriptionIdentifier") rtsp_url = subscription.get("rtspUrl") @@ -619,6 +1746,16 @@ async def detect(websocket: WebSocket): if camera_id not in models: models[camera_id] = {} models[camera_id][modelId] = model_tree + + # Start LPR integration threads after first model is loaded (only once) + global lpr_integration_started + if not lpr_integration_started and hasattr(model_tree, 'get') and model_tree.get('redis_client'): + try: + start_lpr_integration() + lpr_integration_started = True + logger.info("πŸš€ LPR integration started after first model load") + except Exception as e: + logger.error(f"❌ Failed to start LPR integration: {e}") # Create stream (same logic as original) if camera_id and (rtsp_url or snapshot_url) and len(streams) < max_streams: @@ -672,21 +1809,49 @@ async def detect(websocket: WebSocket): "buffer": buffer, "thread": thread, "stop_event": stop_event, "modelId": modelId, "modelName": modelName, "subscriptionIdentifier": subscriptionIdentifier, "cropX1": cropX1, "cropY1": cropY1, "cropX2": cropX2, "cropY2": cropY2, - "mode": mode, "camera_url": camera_url, "modelUrl": model_url + "mode": mode, "camera_url": camera_url, "modelUrl": model_url, + # Always store both URLs for comparison consistency + "rtsp_url": rtsp_url, + "snapshot_url": snapshot_url, + "snapshot_interval": snapshot_interval } - if mode == "snapshot": - stream_info["snapshot_url"] = snapshot_url - stream_info["snapshot_interval"] = snapshot_interval - elif mode == "rtsp": - stream_info["rtsp_url"] = rtsp_url + if mode == "rtsp": stream_info["cap"] = shared_stream["cap"] streams[camera_id] = stream_info subscription_to_camera[camera_id] = camera_url logger.info(f"Subscribed to camera {camera_id}") + + # Send initial detection to backend - use cached if available, otherwise "none" + if cached_detection: + # Restore cached detection with updated timestamp (RESUBSCRIPTION STATUS UPDATE) + initial_detection_data = cached_detection.copy() + initial_detection_data["timestamp"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + logger.info(f"πŸ“‘ RESUBSCRIPTION: Restoring cached detection for camera {camera_id}") + logger.debug(f"πŸ“‘ RESUBSCRIPTION: Cached detection has sessionId: {initial_detection_data.get('sessionId', 'None')}") + else: + # Send "none" detection for new subscriptions + initial_detection_data = { + "type": "imageDetection", + "subscriptionIdentifier": subscriptionIdentifier, + "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + "data": { + "detection": None, + "modelId": modelId, + "modelName": modelName + } + } + logger.info(f"πŸ“‘ NEW SUBSCRIPTION: Sending initial 'none' detection for camera {camera_id}") + + ws_logger.info(f"TX -> {json.dumps(initial_detection_data, separators=(',', ':'))}") + await websocket.send_json(initial_detection_data) + logger.debug(f"Initial detection data sent (resubscription={cached_detection is not None}): {initial_detection_data}") + + # This cached detection was just a one-time status update for resubscription + # Normal frame processing will continue independently - async def unsubscribe_internal(subscription_id): + async def unsubscribe_internal(subscription_id, preserve_detection=False): """Internal unsubscription logic""" if subscription_id in streams: stream = streams.pop(subscription_id) @@ -704,7 +1869,14 @@ async def detect(websocket: WebSocket): del camera_streams[camera_url] latest_frames.pop(subscription_id, None) - logger.info(f"Unsubscribed from camera {subscription_id}") + if not preserve_detection: + cached_detections.pop(subscription_id, None) # Clear cached detection only if not preserving + frame_skip_flags.pop(subscription_id, None) # Clear frame skip flag + camera_states.pop(subscription_id, None) # Clear camera state + cached_full_pipeline_results.pop(subscription_id, None) # Clear cached pipeline results + session_pipeline_states.pop(subscription_id, None) # Clear session pipeline state + cleanup_camera_stability(subscription_id) + logger.info(f"Unsubscribed from camera {subscription_id} (preserve_detection={preserve_detection})") async def process_streams(): logger.info("Started processing streams") @@ -801,6 +1973,7 @@ async def detect(websocket: WebSocket): while True: try: msg = await websocket.receive_text() + ws_logger.info(f"RX <- {msg}") logger.debug(f"Received message: {msg}") data = json.loads(msg) msg_type = data.get("type") @@ -864,6 +2037,7 @@ async def detect(websocket: WebSocket): "subscriptionIdentifier": subscriptionIdentifier, "error": f"Failed to download model from {model_url}" } + ws_logger.info(f"TX -> {json.dumps(error_response, separators=(',', ':'))}") await websocket.send_json(error_response) continue model_tree = load_pipeline_from_zip(local_path, extraction_dir) @@ -878,6 +2052,7 @@ async def detect(websocket: WebSocket): "subscriptionIdentifier": subscriptionIdentifier, "error": f"Model file not found: {model_url}" } + ws_logger.info(f"TX -> {json.dumps(error_response, separators=(',', ':'))}") await websocket.send_json(error_response) continue model_tree = load_pipeline_from_zip(model_url, extraction_dir) @@ -895,6 +2070,15 @@ async def detect(websocket: WebSocket): models[camera_id][modelId] = model_tree logger.info(f"Successfully loaded model {modelId} for camera {camera_id}") logger.debug(f"Model extraction directory: {extraction_dir}") + + # Start LPR integration threads after first model is loaded (only once) + if not lpr_integration_started and hasattr(model_tree, 'get') and model_tree.get('redis_client'): + try: + start_lpr_integration() + lpr_integration_started = True + logger.info("πŸš€ LPR integration started after first model load") + except Exception as e: + logger.error(f"❌ Failed to start LPR integration: {e}") if camera_id and (rtsp_url or snapshot_url): with streams_lock: # Determine camera URL for shared stream management @@ -1018,8 +2202,12 @@ async def detect(websocket: WebSocket): else: logger.info(f"Shared stream for {camera_url} still has {shared_stream['ref_count']} references") - # Clean up cached frame + # Clean up cached frame and stability tracking latest_frames.pop(camera_id, None) + cached_detections.pop(camera_id, None) # Clear cached detection + frame_skip_flags.pop(camera_id, None) # Clear frame skip flag + camera_states.pop(camera_id, None) # Clear camera state + cleanup_camera_stability(camera_id) logger.info(f"Unsubscribed from camera {camera_id}") # Note: Keep models in memory for potential reuse elif msg_type == "requestState": @@ -1062,14 +2250,120 @@ async def detect(websocket: WebSocket): display_identifier = payload.get("displayIdentifier") session_id = payload.get("sessionId") + # Debug sessionId value types and contents + session_id_type = type(session_id).__name__ + if session_id is None: + logger.info(f"πŸ†” BACKEND SESSIONID RECEIVED: displayId={display_identifier}, sessionId=None (type: {session_id_type})") + logger.info(f"πŸ”„ BACKEND WANTS TO CLEAR SESSION for display {display_identifier}") + elif session_id == "null": + logger.info(f"πŸ†” BACKEND SESSIONID RECEIVED: displayId={display_identifier}, sessionId='null' (type: {session_id_type})") + logger.info(f"πŸ”„ BACKEND SENT STRING 'null' for display {display_identifier}") + elif session_id == "": + logger.info(f"πŸ†” BACKEND SESSIONID RECEIVED: displayId={display_identifier}, sessionId='' (empty string, type: {session_id_type})") + logger.info(f"πŸ”„ BACKEND SENT EMPTY STRING for display {display_identifier}") + else: + logger.info(f"πŸ†” BACKEND SESSIONID RECEIVED: displayId={display_identifier}, sessionId='{session_id}' (type: {session_id_type}, length: {len(str(session_id))})") + logger.info(f"πŸ”„ BACKEND CREATED/UPDATED SESSION: {session_id} for display {display_identifier}") + + logger.debug(f"Full setSessionId payload: {payload}") + logger.debug(f"WebSocket message raw data: {json.dumps(data, indent=2)}") + logger.debug(f"Current active cameras: {list(streams.keys())}") + if display_identifier: # Store session ID for this display - if session_id is None: + if session_id is None or session_id == "null" or session_id == "": + old_session_id = session_ids.get(display_identifier) session_ids.pop(display_identifier, None) - logger.info(f"Cleared session ID for display {display_identifier}") + + if session_id is None: + logger.info(f"🚫 BACKEND ENDED SESSION: Cleared session ID for display {display_identifier} (was: {old_session_id}) - received None") + elif session_id == "null": + logger.info(f"🚫 BACKEND ENDED SESSION: Cleared session ID for display {display_identifier} (was: {old_session_id}) - received string 'null'") + elif session_id == "": + logger.info(f"🚫 BACKEND ENDED SESSION: Cleared session ID for display {display_identifier} (was: {old_session_id}) - received empty string") + + logger.debug(f"Session IDs after clearing: {session_ids}") + + # Reset tracking state for all cameras associated with this display + with streams_lock: + affected_cameras = [] + for camera_id, stream in streams.items(): + if stream["subscriptionIdentifier"].startswith(display_identifier + ";"): + affected_cameras.append(camera_id) + # Import here to avoid circular import + from siwatsystem.pympta import reset_tracking_state + model_id = stream.get("modelId", "unknown") + reset_tracking_state(camera_id, model_id, "backend session ended") + + + logger.info(f"Reset tracking for camera {camera_id} (display: {display_identifier})") + logger.debug(f"Reset tracking for {len(affected_cameras)} cameras: {affected_cameras}") else: + old_session_id = session_ids.get(display_identifier) session_ids[display_identifier] = session_id - logger.info(f"Set session ID {session_id} for display {display_identifier}") + logger.info(f"βœ… BACKEND SESSION STARTED: Set session ID {session_id} for display {display_identifier} (previous: {old_session_id})") + logger.debug(f"Session IDs after update: {session_ids}") + logger.debug(f"🎯 CMS Backend created sessionId {session_id} after receiving detection data") + + # πŸ”‘ LPR Integration: Retroactively cache the last detection by this new session_id + session_id_str = str(session_id) + logger.info(f"πŸ”‘ LPR: Attempting to retroactively cache detection for session_id {session_id_str}") + + # Find cameras associated with this display + display_cameras = [] + with streams_lock: + for camera_id, stream in streams.items(): + if stream["subscriptionIdentifier"].startswith(display_identifier + ";"): + display_cameras.append(camera_id) + + logger.debug(f"πŸ” Found {len(display_cameras)} cameras for display {display_identifier}: {display_cameras}") + + # Cache the most recent detection for each camera by the new session_id + cached_count = 0 + for camera_id in display_cameras: + if camera_id in cached_detections: + detection_data = cached_detections[camera_id].copy() + + # Add sessionId to the detection data + detection_data['sessionId'] = session_id + + # Cache by session_id for LPR lookup + session_detections[session_id_str] = detection_data + session_to_camera[session_id_str] = camera_id + detection_timestamps[session_id_str] = time.time() + cached_count += 1 + + logger.info(f"βœ… LPR: Cached detection for session_id {session_id_str} -> camera {camera_id}") + logger.debug(f"πŸ” Detection data: {detection_data.get('data', {}).get('detection', {})}") + else: + logger.debug(f"⚠️ No cached detection available for camera {camera_id}") + + if cached_count > 0: + logger.info(f"πŸŽ‰ LPR: Successfully cached {cached_count} detection(s) for session_id {session_id_str}") + logger.info(f"πŸ“Š Total LPR sessions now cached: {len(session_detections)}") + else: + logger.warning(f"⚠️ LPR: No detections could be cached for session_id {session_id_str}") + logger.warning(f" Display cameras: {display_cameras}") + logger.warning(f" Available cached detections: {list(cached_detections.keys())}") + + # Clear waiting state for cameras associated with this display + with streams_lock: + affected_cameras = [] + for camera_id, stream in streams.items(): + if stream["subscriptionIdentifier"].startswith(display_identifier + ";"): + affected_cameras.append(camera_id) + from siwatsystem.pympta import get_camera_stability_data + model_id = stream.get("modelId", "unknown") + stability_data = get_camera_stability_data(camera_id, model_id) + session_state = stability_data["session_state"] + if session_state.get("waiting_for_backend_session", False): + session_state["waiting_for_backend_session"] = False + session_state["wait_start_time"] = 0.0 + logger.info(f"πŸš€ PIPELINE UNBLOCKED: Backend sessionId {session_id} received - camera {camera_id} can proceed with database operations") + logger.debug(f"πŸ“‹ Camera {camera_id}: SessionId {session_id} now available for future database operations") + logger.debug(f"Updated session state for {len(affected_cameras)} cameras: {affected_cameras}") + else: + logger.warning(f"🚨 Invalid setSessionId message: missing displayIdentifier in payload") elif msg_type == "patchSession": session_id = data.get("sessionId") @@ -1084,9 +2378,62 @@ async def detect(websocket: WebSocket): "message": "Session patch acknowledged" } } + ws_logger.info(f"TX -> {json.dumps(response, separators=(',', ':'))}") await websocket.send_json(response) logger.info(f"Acknowledged patch for session {session_id}") + elif msg_type == "setProgressionStage": + payload = data.get("payload", {}) + display_identifier = payload.get("displayIdentifier") + progression_stage = payload.get("progressionStage") + + logger.info(f"🏁 PROGRESSION STAGE RECEIVED: displayId={display_identifier}, stage={progression_stage}") + + if display_identifier: + # Find all cameras associated with this display + with streams_lock: + affected_cameras = [] + for camera_id, stream in streams.items(): + if stream["subscriptionIdentifier"].startswith(display_identifier + ";"): + affected_cameras.append(camera_id) + + logger.debug(f"🎯 Found {len(affected_cameras)} cameras for display {display_identifier}: {affected_cameras}") + + # Handle different progression stages + for camera_id in affected_cameras: + pipeline_state = get_or_init_session_pipeline_state(camera_id) + current_mode = pipeline_state.get("mode", "validation_detecting") + + if progression_stage == "car_fueling": + # Situation 2: Stop YOLO inference, continue sending cached detection dict + if current_mode == "lightweight": + pipeline_state["yolo_inference_enabled"] = False + pipeline_state["progression_stage"] = "car_fueling" + logger.info(f"⏸️ Camera {camera_id}: YOLO inference DISABLED for car_fueling stage (still sending cached detection dict)") + else: + logger.debug(f"πŸ“Š Camera {camera_id}: car_fueling received but not in lightweight mode (mode: {current_mode})") + + elif progression_stage == "car_waitpayment": + # Resume YOLO inference for absence counter + pipeline_state["yolo_inference_enabled"] = True + pipeline_state["progression_stage"] = "car_waitpayment" + logger.info(f"▢️ Camera {camera_id}: YOLO inference RE-ENABLED for car_waitpayment stage") + + elif progression_stage == "welcome": + # Ignore welcome messages during car_waitpayment as per requirement + current_progression = pipeline_state.get("progression_stage") + if current_progression == "car_waitpayment": + logger.info(f"🚫 Camera {camera_id}: IGNORING welcome stage (currently in car_waitpayment)") + else: + pipeline_state["progression_stage"] = "welcome" + logger.info(f"πŸŽ‰ Camera {camera_id}: Progression stage set to welcome") + + elif progression_stage in ["car_wait_staff"]: + pipeline_state["progression_stage"] = progression_stage + logger.info(f"πŸ“‹ Camera {camera_id}: Progression stage set to {progression_stage}") + else: + logger.warning(f"🚨 Invalid setProgressionStage message: missing displayIdentifier in payload") + else: logger.error(f"Unknown message type: {msg_type}") except json.JSONDecodeError: @@ -1128,5 +2475,14 @@ async def detect(websocket: WebSocket): with models_lock: models.clear() latest_frames.clear() + cached_detections.clear() + frame_skip_flags.clear() + camera_states.clear() + cached_full_pipeline_results.clear() + session_pipeline_states.clear() session_ids.clear() + # Clean up LPR integration caches + session_detections.clear() + session_to_camera.clear() + detection_timestamps.clear() logger.info("WebSocket connection closed") diff --git a/debug/test_camera_indices.py b/debug/test_camera_indices.py new file mode 100644 index 0000000..f88bc87 --- /dev/null +++ b/debug/test_camera_indices.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python3 +""" +Test script to check available camera indices +""" + +import cv2 +import logging +import sys +import subprocess + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s" +) +logger = logging.getLogger("camera_index_test") + +def test_camera_index(index): + """Test if a camera index is available""" + try: + cap = cv2.VideoCapture(index) + if cap.isOpened(): + ret, frame = cap.read() + if ret and frame is not None: + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) + + cap.release() + return True, f"{width}x{height} @ {fps}fps" + else: + cap.release() + return False, "Can open but cannot read frames" + else: + cap.release() + return False, "Cannot open camera" + except Exception as e: + return False, f"Error: {str(e)}" + +def get_windows_cameras_ffmpeg(): + """Get available cameras on Windows using FFmpeg""" + try: + result = subprocess.run(['ffmpeg', '-f', 'dshow', '-list_devices', 'true', '-i', 'dummy'], + capture_output=True, text=True, timeout=10, encoding='utf-8', errors='ignore') + output = result.stderr + + lines = output.split('\n') + video_devices = [] + + # Parse the output - look for lines with (video) that contain device names in quotes + for line in lines: + if '[dshow @' in line and '(video)' in line and '"' in line: + # Extract device name between first pair of quotes + start = line.find('"') + 1 + end = line.find('"', start) + if start > 0 and end > start: + device_name = line[start:end] + video_devices.append(device_name) + + logger.info(f"FFmpeg detected video devices: {video_devices}") + return video_devices + except Exception as e: + logger.error(f"Failed to get Windows camera names: {e}") + return [] + +def main(): + logger.info("=== Camera Index Test ===") + + # Check FFmpeg availability for Windows device detection + ffmpeg_available = False + try: + result = subprocess.run(['ffmpeg', '-version'], capture_output=True, text=True, timeout=5) + if result.returncode == 0: + ffmpeg_available = True + logger.info("FFmpeg is available") + except: + logger.info("FFmpeg not available") + + # Get Windows camera names if possible + if sys.platform.startswith('win') and ffmpeg_available: + logger.info("\n=== Windows Camera Devices (FFmpeg) ===") + cameras = get_windows_cameras_ffmpeg() + if cameras: + for i, camera in enumerate(cameras): + logger.info(f"Device {i}: {camera}") + else: + logger.info("No cameras detected via FFmpeg") + + # Test camera indices 0-9 + logger.info("\n=== Testing Camera Indices ===") + available_cameras = [] + + for index in range(10): + logger.info(f"Testing camera index {index}...") + is_available, info = test_camera_index(index) + + if is_available: + logger.info(f"βœ“ Camera {index}: AVAILABLE - {info}") + available_cameras.append(index) + else: + logger.info(f"βœ— Camera {index}: NOT AVAILABLE - {info}") + + # Summary + logger.info("\n=== Summary ===") + if available_cameras: + logger.info(f"Available camera indices: {available_cameras}") + logger.info(f"Default camera index to use: {available_cameras[0]}") + + # Test the first available camera more thoroughly + logger.info(f"\n=== Detailed Test for Camera {available_cameras[0]} ===") + cap = cv2.VideoCapture(available_cameras[0]) + if cap.isOpened(): + # Get properties + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) + backend = cap.getBackendName() + + logger.info(f"Resolution: {width}x{height}") + logger.info(f"FPS: {fps}") + logger.info(f"Backend: {backend}") + + # Test frame capture + ret, frame = cap.read() + if ret and frame is not None: + logger.info(f"Frame capture: SUCCESS") + logger.info(f"Frame shape: {frame.shape}") + logger.info(f"Frame dtype: {frame.dtype}") + else: + logger.info(f"Frame capture: FAILED") + + cap.release() + else: + logger.error("No cameras available!") + logger.info("Possible solutions:") + logger.info("1. Check if camera is connected and not used by another application") + logger.info("2. Check camera permissions") + logger.info("3. Try different camera indices") + logger.info("4. Install camera drivers") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/requirements.base.txt b/requirements.base.txt index af22160..297c86b 100644 --- a/requirements.base.txt +++ b/requirements.base.txt @@ -1,7 +1,13 @@ -torch -torchvision -ultralytics -opencv-python -scipy -filterpy -psycopg2-binary \ No newline at end of file +torch>=1.12.0,<2.1.0 +torchvision>=0.13.0,<0.16.0 +ultralytics>=8.3.0 +opencv-python>=4.6.0,<4.9.0 +scipy>=1.9.0,<1.12.0 +filterpy>=1.4.0,<1.5.0 +psycopg2-binary>=2.9.0,<2.10.0 +easydict +loguru +pyzmq +gitpython +gdown +lap \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 6eaf131..baddeb5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ -fastapi +fastapi[standard] uvicorn websockets -fastapi[standard] redis urllib3<2.0.0 \ No newline at end of file diff --git a/siwatsystem/database.py b/siwatsystem/database.py index 6340986..5bcbf1d 100644 --- a/siwatsystem/database.py +++ b/siwatsystem/database.py @@ -80,37 +80,50 @@ class DatabaseManager: try: cur = self.connection.cursor() - # Build the UPDATE query dynamically + # Build the INSERT and UPDATE query dynamically + insert_placeholders = [] + insert_values = [key_value] # Start with key_value + set_clauses = [] - values = [] + update_values = [] for field, value in fields.items(): if value == "NOW()": + # Special handling for NOW() + insert_placeholders.append("NOW()") set_clauses.append(f"{field} = NOW()") else: + insert_placeholders.append("%s") + insert_values.append(value) set_clauses.append(f"{field} = %s") - values.append(value) + update_values.append(value) # Add schema prefix if table doesn't already have it full_table_name = table if '.' in table else f"gas_station_1.{table}" + # Build the complete query query = f""" INSERT INTO {full_table_name} ({key_field}, {', '.join(fields.keys())}) - VALUES (%s, {', '.join(['%s'] * len(fields))}) + VALUES (%s, {', '.join(insert_placeholders)}) ON CONFLICT ({key_field}) DO UPDATE SET {', '.join(set_clauses)} """ - # Add key_value to the beginning of values list - all_values = [key_value] + list(fields.values()) + values + # Combine values for the query: insert_values + update_values + all_values = insert_values + update_values + + logger.debug(f"SQL Query: {query}") + logger.debug(f"Values: {all_values}") cur.execute(query, all_values) self.connection.commit() cur.close() - logger.info(f"Updated {table} for {key_field}={key_value}") + logger.info(f"βœ… Updated {table} for {key_field}={key_value} with fields: {fields}") return True except Exception as e: - logger.error(f"Failed to execute update on {table}: {e}") + logger.error(f"❌ Failed to execute update on {table}: {e}") + logger.debug(f"Query: {query if 'query' in locals() else 'Query not built'}") + logger.debug(f"Values: {all_values if 'all_values' in locals() else 'Values not prepared'}") if self.connection: self.connection.rollback() return False diff --git a/siwatsystem/pympta.py b/siwatsystem/pympta.py index fd1485d..52fbfa6 100644 --- a/siwatsystem/pympta.py +++ b/siwatsystem/pympta.py @@ -13,10 +13,18 @@ import concurrent.futures from ultralytics import YOLO from urllib.parse import urlparse from .database import DatabaseManager +from datetime import datetime # Create a logger specifically for this module logger = logging.getLogger("detector_worker.pympta") +# Global camera-aware stability tracking +# Structure: {camera_id: {model_id: {"track_stability_counters": {track_id: count}, "stable_tracks": set(), "session_state": {...}}}} +_camera_stability_tracking = {} + +# Session timeout configuration (waiting for backend sessionId) +_session_timeout_seconds = 15 + def validate_redis_config(redis_config: dict) -> bool: """Validate Redis configuration parameters.""" required_fields = ["host", "port"] @@ -78,7 +86,7 @@ def load_pipeline_node(node_config: dict, mpta_dir: str, redis_client, db_manage logger.info(f"Loading model for node {node_config['modelId']} from {model_path}") model = YOLO(model_path) if torch.cuda.is_available(): - logger.info(f"CUDA available. Moving model {node_config['modelId']} to GPU") + logger.info(f"CUDA available. Moving model {node_config['modelId']} to GPU VRAM") model.to("cuda") else: logger.info(f"CUDA not available. Using CPU for model {node_config['modelId']}") @@ -92,19 +100,27 @@ def load_pipeline_node(node_config: dict, mpta_dir: str, redis_client, db_manage if name in trigger_classes] logger.debug(f"Converted trigger classes to indices: {trigger_class_indices}") + # Extract stability threshold from main pipeline config (not tracking config) + tracking_config = node_config.get("tracking", {"enabled": True, "reidConfigPath": "botsort.yaml"}) + stability_threshold = node_config.get("stabilityThreshold", 4) # Read from main config, default to 4 + node = { "modelId": node_config["modelId"], "modelFile": node_config["modelFile"], "triggerClasses": trigger_classes, "triggerClassIndices": trigger_class_indices, + "classMapping": node_config.get("classMapping", {}), "crop": node_config.get("crop", False), "cropClass": node_config.get("cropClass"), "minConfidence": node_config.get("minConfidence", None), + "minBboxAreaRatio": node_config.get("minBboxAreaRatio", 0.0), "multiClass": node_config.get("multiClass", False), "expectedClasses": node_config.get("expectedClasses", []), "parallel": node_config.get("parallel", False), "actions": node_config.get("actions", []), "parallelActions": node_config.get("parallelActions", []), + "tracking": tracking_config, + "stabilityThreshold": stability_threshold, "model": model, "branches": [], "redis_client": redis_client, @@ -437,6 +453,7 @@ def execute_postgresql_update_combined(node, action, detection_result, branch_re key_value = key_value_template.format(**action_context) logger.info(f"Executing database update: table={table}, {key_field}={key_value}") + logger.debug(f"Available branch results: {list(branch_results.keys())}") # Process field mappings mapped_fields = {} @@ -445,26 +462,38 @@ def execute_postgresql_update_combined(node, action, detection_result, branch_re mapped_value = resolve_field_mapping(value_template, branch_results, action_context) if mapped_value is not None: mapped_fields[db_field] = mapped_value - logger.debug(f"Mapped field: {db_field} = {mapped_value}") + logger.info(f"Mapped field: {db_field} = {mapped_value}") else: logger.warning(f"Could not resolve field mapping for {db_field}: {value_template}") + logger.debug(f"Available branch results: {branch_results}") except Exception as e: logger.error(f"Error mapping field {db_field} with template '{value_template}': {e}") + import traceback + logger.debug(f"Field mapping error traceback: {traceback.format_exc()}") if not mapped_fields: logger.warning("No fields mapped successfully, skipping database update") + logger.debug(f"Branch results available: {branch_results}") + logger.debug(f"Field templates: {fields}") return + # Add updated_at field automatically + mapped_fields["updated_at"] = "NOW()" + # Execute the database update + logger.info(f"Attempting database update with fields: {mapped_fields}") success = node["db_manager"].execute_update(table, key_field, key_value, mapped_fields) if success: - logger.info(f"Successfully updated database: {table} with {len(mapped_fields)} fields") + logger.info(f"βœ… Successfully updated database: {table} with {len(mapped_fields)} fields") + logger.info(f"Updated fields: {mapped_fields}") else: - logger.error(f"Failed to update database: {table}") + logger.error(f"❌ Failed to update database: {table}") + logger.error(f"Attempted update with: {key_field}={key_value}, fields={mapped_fields}") except KeyError as e: logger.error(f"Missing required field in postgresql_update_combined action: {e}") + logger.debug(f"Action config: {action}") except Exception as e: logger.error(f"Error in postgresql_update_combined action: {e}") import traceback @@ -473,28 +502,68 @@ def execute_postgresql_update_combined(node, action, detection_result, branch_re def resolve_field_mapping(value_template, branch_results, action_context): """Resolve field mapping templates like {car_brand_cls_v1.brand}.""" try: + logger.debug(f"Resolving field mapping: '{value_template}'") + logger.debug(f"Available branch results: {list(branch_results.keys())}") + # Handle simple context variables first (non-branch references) if not '.' in value_template: - return value_template.format(**action_context) + result = value_template.format(**action_context) + logger.debug(f"Simple template resolved: '{value_template}' -> '{result}'") + return result # Handle branch result references like {model_id.field} import re branch_refs = re.findall(r'\{([^}]+\.[^}]+)\}', value_template) + logger.debug(f"Found branch references: {branch_refs}") resolved_template = value_template for ref in branch_refs: try: model_id, field_name = ref.split('.', 1) + logger.debug(f"Processing branch reference: model_id='{model_id}', field_name='{field_name}'") if model_id in branch_results: branch_data = branch_results[model_id] + logger.debug(f"Branch '{model_id}' data: {branch_data}") + if field_name in branch_data: field_value = branch_data[field_name] resolved_template = resolved_template.replace(f'{{{ref}}}', str(field_value)) - logger.debug(f"Resolved {ref} to {field_value}") + logger.info(f"βœ… Resolved {ref} to '{field_value}'") else: - logger.warning(f"Field '{field_name}' not found in branch '{model_id}' results. Available fields: {list(branch_data.keys())}") - return None + logger.warning(f"Field '{field_name}' not found in branch '{model_id}' results.") + logger.debug(f"Available fields in '{model_id}': {list(branch_data.keys())}") + + # Try alternative field names based on the class result and model type + if isinstance(branch_data, dict): + fallback_value = None + + # First, try the exact field name + if field_name in branch_data: + fallback_value = branch_data[field_name] + # Then try 'class' field as fallback + elif 'class' in branch_data: + fallback_value = branch_data['class'] + logger.info(f"Using 'class' field as fallback for '{field_name}': '{fallback_value}'") + # For brand models, also check if the class name exists as a key + elif field_name == 'brand' and branch_data.get('class') in branch_data: + fallback_value = branch_data[branch_data['class']] + logger.info(f"Found brand value using class name as key: '{fallback_value}'") + # For body_type models, also check if the class name exists as a key + elif field_name == 'body_type' and branch_data.get('class') in branch_data: + fallback_value = branch_data[branch_data['class']] + logger.info(f"Found body_type value using class name as key: '{fallback_value}'") + + if fallback_value is not None: + resolved_template = resolved_template.replace(f'{{{ref}}}', str(fallback_value)) + logger.info(f"βœ… Resolved {ref} to '{fallback_value}' (using fallback)") + else: + logger.error(f"No suitable field found for '{field_name}' in branch '{model_id}'") + logger.debug(f"Branch data structure: {branch_data}") + return None + else: + logger.error(f"Branch data for '{model_id}' is not a dictionary: {type(branch_data)}") + return None else: logger.warning(f"Branch '{model_id}' not found in results. Available branches: {list(branch_results.keys())}") return None @@ -505,6 +574,7 @@ def resolve_field_mapping(value_template, branch_results, action_context): # Format any remaining simple variables try: final_value = resolved_template.format(**action_context) + logger.debug(f"Final resolved value: '{final_value}'") return final_value except KeyError as e: logger.warning(f"Could not resolve context variable in template: {e}") @@ -512,8 +582,559 @@ def resolve_field_mapping(value_template, branch_results, action_context): except Exception as e: logger.error(f"Error resolving field mapping '{value_template}': {e}") + import traceback + logger.debug(f"Field mapping error traceback: {traceback.format_exc()}") return None +def run_detection_with_tracking(frame, node, context=None): + """ + Structured function for running YOLO detection with BoT-SORT tracking. + Now includes track ID-based validation requiring N consecutive frames of the same track ID. + + Args: + frame: Input frame/image + node: Pipeline node configuration with model and settings + context: Optional context information (camera info, session data, etc.) + + Returns: + tuple: (all_detections, regions_dict, track_validation_result) where: + - all_detections: List of all detection objects + - regions_dict: Dict mapping class names to highest confidence detections + - track_validation_result: Dict with validation status and stable tracks + + Configuration options in node: + - model: YOLO model instance + - triggerClassIndices: List of class indices to detect (None for all classes) + - minConfidence: Minimum confidence threshold + - multiClass: Whether to enable multi-class detection mode + - expectedClasses: List of expected class names for multi-class validation + - tracking: Dict with tracking configuration + - enabled: Boolean to enable/disable tracking + - method: Tracking method ("botsort") + - reidConfig: Path to ReID config file + - stabilityThreshold: Number of consecutive frames required for validation + """ + try: + # Extract tracking configuration + tracking_config = node.get("tracking", {}) + tracking_enabled = tracking_config.get("enabled", True) + reid_config_path = tracking_config.get("reidConfig", tracking_config.get("reidConfigPath", "botsort.yaml")) + stability_threshold = tracking_config.get("stabilityThreshold", node.get("stabilityThreshold", 4)) + + # Check if we need to reset tracker after cooldown + camera_id = context.get("camera_id", "unknown") if context else "unknown" + model_id = node.get("modelId", "unknown") + stability_data = get_camera_stability_data(camera_id, model_id) + session_state = stability_data["session_state"] + + if session_state.get("reset_tracker_on_resume", False): + # Reset YOLO tracker to get fresh track IDs + if hasattr(node["model"], 'trackers') and node["model"].trackers: + node["model"].trackers.clear() # Clear tracker state + logger.info(f"Camera {camera_id}: πŸ”„ Reset YOLO tracker - new cars will get fresh track IDs") + session_state["reset_tracker_on_resume"] = False # Clear the flag + + # Get tracking zone from runtime context (camera-specific) + tracking_zone = context.get("trackingZone", []) if context else [] + + # Prepare class filtering + trigger_class_indices = node.get("triggerClassIndices") + class_filter = {"classes": trigger_class_indices} if trigger_class_indices else {} + + logger.debug(f"Running detection for {node['modelId']} - tracking: {tracking_enabled}, stability_threshold: {stability_threshold}, classes: {node.get('triggerClasses', 'all')}") + + if tracking_enabled and tracking_zone: + # Use tracking with zone validation + logger.debug(f"Using tracking with ReID config: {reid_config_path}") + res = node["model"].track( + frame, + stream=False, + persist=True, + tracker=reid_config_path, + **class_filter + )[0] + elif tracking_enabled: + # Use tracking without zone restriction + logger.debug("Using tracking without zone restriction") + res = node["model"].track( + frame, + stream=False, + persist=True, + **class_filter + )[0] + else: + # Use detection only (no tracking) + logger.debug("Using detection only (tracking disabled)") + res = node["model"].predict( + frame, + stream=False, + **class_filter + )[0] + + # Process detection results + candidate_detections = [] + min_confidence = node.get("minConfidence", 0.0) + + if res.boxes is None or len(res.boxes) == 0: + logger.debug(f"🚫 Camera {camera_id}: YOLO returned no detections") + + # Update stability tracking even when no detection (to reset counters) + camera_id = context.get("camera_id", "unknown") if context else "unknown" + model_id = node.get("modelId", "unknown") + track_validation_result = update_single_track_stability(node, None, camera_id, frame.shape, stability_threshold, context) + + # Store validation state in context for pipeline decisions + if context is not None: + context["track_validation_result"] = track_validation_result + + return [], {}, track_validation_result + + logger.debug(f"πŸ” Camera {camera_id}: YOLO detected {len(res.boxes)} raw objects - processing with tracking...") + + # First pass: collect all valid detections + logger.debug(f"πŸ” Camera {camera_id}: === DETECTION ANALYSIS ===") + for i, box in enumerate(res.boxes): + # Extract detection data + conf = float(box.cpu().conf[0]) + cls_id = int(box.cpu().cls[0]) + class_name = node["model"].names[cls_id] + + # Extract bounding box + xy = box.cpu().xyxy[0] + x1, y1, x2, y2 = map(int, xy) + bbox = (x1, y1, x2, y2) + + # Extract tracking ID if available + track_id = None + if hasattr(box, "id") and box.id is not None: + track_id = int(box.id.item()) + + logger.debug(f"πŸ” Camera {camera_id}: Detection {i+1}: class='{class_name}' conf={conf:.3f} track_id={track_id} bbox={bbox}") + + # Apply confidence filtering + if conf < min_confidence: + logger.debug(f"❌ Camera {camera_id}: Detection {i+1} REJECTED - confidence {conf:.3f} < {min_confidence}") + continue + + # Apply tracking zone validation if enabled + if tracking_enabled and tracking_zone: + bbox_center_x = (x1 + x2) // 2 + bbox_center_y = (y1 + y2) // 2 + + # Check if detection center is within tracking zone + if not _point_in_polygon((bbox_center_x, bbox_center_y), tracking_zone): + logger.debug(f"❌ Camera {camera_id}: Detection {i+1} REJECTED - outside tracking zone") + continue + + # Create detection object + detection = { + "class": class_name, + "confidence": conf, + "id": track_id, + "bbox": bbox, + "class_id": cls_id + } + + candidate_detections.append(detection) + logger.debug(f"βœ… Camera {camera_id}: Detection {i+1} ACCEPTED as candidate: {class_name} (conf={conf:.3f}, track_id={track_id})") + + # Second pass: select only the highest confidence detection overall + if not candidate_detections: + logger.debug(f"🚫 Camera {camera_id}: No valid candidates after filtering - no car will be tracked") + + # Update stability tracking even when no detection (to reset counters) + camera_id = context.get("camera_id", "unknown") if context else "unknown" + model_id = node.get("modelId", "unknown") + track_validation_result = update_single_track_stability(node, None, camera_id, frame.shape, stability_threshold, context) + + # Store validation state in context for pipeline decisions + if context is not None: + context["track_validation_result"] = track_validation_result + + return [], {}, track_validation_result + + logger.debug(f"πŸ† Camera {camera_id}: === SELECTING HIGHEST CONFIDENCE CAR ===") + for i, detection in enumerate(candidate_detections): + logger.debug(f"πŸ† Camera {camera_id}: Candidate {i+1}: {detection['class']} conf={detection['confidence']:.3f} track_id={detection['id']}") + + # Find the single highest confidence detection across all detected classes + best_detection = max(candidate_detections, key=lambda x: x["confidence"]) + original_class = best_detection["class"] + track_id = best_detection["id"] + + logger.info(f"🎯 Camera {camera_id}: SELECTED WINNER: {original_class} (conf={best_detection['confidence']:.3f}, track_id={track_id}, bbox={best_detection['bbox']})") + + # Show which cars were NOT selected + for detection in candidate_detections: + if detection != best_detection: + logger.debug(f"🚫 Camera {camera_id}: NOT SELECTED: {detection['class']} (conf={detection['confidence']:.3f}, track_id={detection['id']}) - lower confidence") + + # Apply class mapping if configured + mapped_class = original_class + class_mapping = node.get("classMapping", {}) + if original_class in class_mapping: + mapped_class = class_mapping[original_class] + logger.info(f"Class mapping applied: {original_class} β†’ {mapped_class}") + # Update the detection object with mapped class + best_detection["class"] = mapped_class + best_detection["original_class"] = original_class # Keep original for reference + + # Keep only the single best detection with mapped class + all_detections = [best_detection] + regions_dict = { + mapped_class: { + "bbox": best_detection["bbox"], + "confidence": best_detection["confidence"], + "detection": best_detection, + "track_id": track_id + } + } + + # Multi-class validation + if node.get("multiClass", False) and node.get("expectedClasses"): + expected_classes = node["expectedClasses"] + detected_classes = list(regions_dict.keys()) + + logger.debug(f"Multi-class validation: expected={expected_classes}, detected={detected_classes}") + + # Check for required classes (flexible - at least one must match) + matching_classes = [cls for cls in expected_classes if cls in detected_classes] + if not matching_classes: + logger.warning(f"Multi-class validation failed: no expected classes detected") + return [], {} + + logger.info(f"Multi-class validation passed: {matching_classes} detected") + + logger.info(f"βœ… Camera {camera_id}: DETECTION COMPLETE - tracking single car: track_id={track_id}, conf={best_detection['confidence']:.3f}") + logger.debug(f"πŸ“Š Camera {camera_id}: Detection summary: {len(res.boxes)} raw β†’ {len(candidate_detections)} candidates β†’ 1 selected") + + # Update track-based stability tracking for the single selected car + camera_id = context.get("camera_id", "unknown") if context else "unknown" + model_id = node.get("modelId", "unknown") + + # Update stability tracking for the single best detection + track_validation_result = update_single_track_stability(node, best_detection, camera_id, frame.shape, stability_threshold, context) + + # Store validation state in context for pipeline decisions + if context is not None: + context["track_validation_result"] = track_validation_result + + return all_detections, regions_dict, track_validation_result + + except Exception as e: + logger.error(f"Error in detection_with_tracking for {node.get('modelId', 'unknown')}: {e}") + logger.debug(f"Detection error traceback: {traceback.format_exc()}") + return [], {}, {"validation_complete": False, "stable_tracks": [], "current_tracks": []} + +def _point_in_polygon(point, polygon): + """Check if a point is inside a polygon using ray casting algorithm.""" + if not polygon or len(polygon) < 3: + return True # No zone restriction if invalid polygon + + x, y = point + n = len(polygon) + inside = False + + p1x, p1y = polygon[0] + for i in range(1, n + 1): + p2x, p2y = polygon[i % n] + if y > min(p1y, p2y): + if y <= max(p1y, p2y): + if x <= max(p1x, p2x): + if p1y != p2y: + xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x + if p1x == p2x or x <= xinters: + inside = not inside + p1x, p1y = p2x, p2y + + return inside + +def get_camera_stability_data(camera_id, model_id): + """Get or create stability tracking data for a specific camera and model.""" + global _camera_stability_tracking + + if camera_id not in _camera_stability_tracking: + _camera_stability_tracking[camera_id] = {} + + if model_id not in _camera_stability_tracking[camera_id]: + logger.warning(f"πŸ”„ Camera {camera_id}: Creating NEW stability data for {model_id} - this will reset any cooldown!") + _camera_stability_tracking[camera_id][model_id] = { + "track_stability_counters": {}, # Track ID -> consecutive frame count + "stable_tracks": set(), # Set of track IDs that have reached stability threshold + "session_state": { + "active": True, + "waiting_for_backend_session": False, + "wait_start_time": 0.0, + "reset_tracker_on_resume": False + } + # Removed obsolete occupancy_state - app.py handles all mode transitions now + } + + return _camera_stability_tracking[camera_id][model_id] + +def reset_camera_stability_tracking(camera_id, model_id): + """Reset all stability tracking data for a specific camera and model.""" + if camera_id in _camera_stability_tracking and model_id in _camera_stability_tracking[camera_id]: + stability_data = _camera_stability_tracking[camera_id][model_id] + + # Clear all tracking data + track_counters = stability_data["track_stability_counters"] + stable_tracks = stability_data["stable_tracks"] + session_state = stability_data["session_state"] + + old_counters = dict(track_counters) + old_stable = list(stable_tracks) + + track_counters.clear() + stable_tracks.clear() + + # IMPORTANT: Set flag to reset YOLO tracker on next detection run + # This will ensure track IDs start fresh (1, 2, 3...) instead of continuing from old IDs + session_state["reset_tracker_on_resume"] = True + + logger.info(f"🧹 Camera {camera_id}: CLEARED stability tracking - old_counters={old_counters}, old_stable={old_stable}") + logger.info(f"πŸ”„ Camera {camera_id}: YOLO tracker will be reset on next detection - fresh track IDs will start from 1") + else: + logger.debug(f"🧹 Camera {camera_id}: No stability tracking data to clear for model {model_id}") + +def update_single_track_stability(node, detection, camera_id, frame_shape=None, stability_threshold=4, context=None): + """Update track stability validation for a single highest confidence car.""" + model_id = node.get("modelId", "unknown") + + # Branch nodes should not do validation - only main pipeline should + is_branch_node = node.get("cropClass") is not None or node.get("parallel") is True + if is_branch_node: + logger.debug(f"⏭️ Camera {camera_id}: Skipping validation for branch node {model_id} - validation only done at main pipeline level") + return {"validation_complete": False, "branch_node": True, "stable_tracks": [], "current_tracks": []} + + # Check current mode - VALIDATION COUNTERS should increment in both validation_detecting and full_pipeline modes + current_mode = context.get("current_mode", "unknown") if context else "unknown" + is_validation_mode = (current_mode in ["validation_detecting", "full_pipeline"]) + + # Get camera-specific stability data + stability_data = get_camera_stability_data(camera_id, model_id) + track_counters = stability_data["track_stability_counters"] + stable_tracks = stability_data["stable_tracks"] + + current_track_id = detection.get("id") if detection else None + + # ═══ MODE-AWARE TRACK VALIDATION ═══ + logger.debug(f"πŸ“‹ Camera {camera_id}: === TRACK VALIDATION ANALYSIS ===") + logger.debug(f"πŸ“‹ Camera {camera_id}: Current mode: {current_mode} (validation_mode={is_validation_mode})") + logger.debug(f"πŸ“‹ Camera {camera_id}: Current track_id: {current_track_id} (assigned by YOLO tracking - not sequential)") + logger.debug(f"πŸ“‹ Camera {camera_id}: Existing counters: {dict(track_counters)}") + logger.debug(f"πŸ“‹ Camera {camera_id}: Stable tracks: {list(stable_tracks)}") + + # IMPORTANT: Only modify validation counters during validation_detecting mode + if not is_validation_mode: + logger.debug(f"🚫 Camera {camera_id}: NOT in validation mode - skipping counter modifications") + return { + "validation_complete": False, + "stable_tracks": list(stable_tracks), + "current_tracks": [current_track_id] if current_track_id is not None else [] + } + + if current_track_id is not None: + # Check if this is a different track than we were tracking + previous_track_ids = list(track_counters.keys()) + + # VALIDATION MODE: Reset counter if different track OR if track was previously stable + should_reset = ( + len(previous_track_ids) == 0 or # No previous tracking + current_track_id not in previous_track_ids or # Different track ID + current_track_id in stable_tracks # Track was stable - start fresh validation + ) + + logger.debug(f"πŸ“‹ Camera {camera_id}: Previous track_ids: {previous_track_ids}") + logger.debug(f"πŸ“‹ Camera {camera_id}: Track {current_track_id} was stable: {current_track_id in stable_tracks}") + logger.debug(f"πŸ“‹ Camera {camera_id}: Should reset counters: {should_reset}") + + if should_reset: + # Clear all previous tracking - fresh validation needed + if previous_track_ids: + for old_track_id in previous_track_ids: + old_count = track_counters.pop(old_track_id, 0) + stable_tracks.discard(old_track_id) + logger.info(f"πŸ”„ Camera {camera_id}: VALIDATION RESET - track {old_track_id} counter from {old_count} to 0 (reason: {'stable_track_restart' if current_track_id == old_track_id else 'different_track'})") + + # Start fresh validation for this track + old_count = track_counters.get(current_track_id, 0) # Store old count for logging + track_counters[current_track_id] = 1 + current_count = 1 + logger.info(f"πŸ†• Camera {camera_id}: FRESH VALIDATION - Track {current_track_id} starting at 1/{stability_threshold}") + else: + # Continue validation for same track + old_count = track_counters.get(current_track_id, 0) + track_counters[current_track_id] = old_count + 1 + current_count = track_counters[current_track_id] + + logger.debug(f"πŸ”’ Camera {camera_id}: Track {current_track_id} counter: {old_count} β†’ {current_count}") + logger.info(f"πŸ” Camera {camera_id}: Track ID {current_track_id} validation {current_count}/{stability_threshold}") + + # Check if track has reached stability threshold + logger.debug(f"πŸ“Š Camera {camera_id}: Checking stability: {current_count} >= {stability_threshold}? {current_count >= stability_threshold}") + logger.debug(f"πŸ“Š Camera {camera_id}: Already stable: {current_track_id in stable_tracks}") + + if current_count >= stability_threshold and current_track_id not in stable_tracks: + stable_tracks.add(current_track_id) + logger.info(f"βœ… Camera {camera_id}: Track ID {current_track_id} STABLE after {current_count} consecutive frames") + logger.info(f"🎯 Camera {camera_id}: TRACK VALIDATION COMPLETE") + logger.debug(f"🎯 Camera {camera_id}: Stable tracks now: {list(stable_tracks)}") + return { + "validation_complete": True, + "send_none_detection": True, + "stable_tracks": [current_track_id], + "newly_stable_tracks": [current_track_id], + "current_tracks": [current_track_id] + } + elif current_count >= stability_threshold: + logger.debug(f"πŸ“Š Camera {camera_id}: Track {current_track_id} already stable - not re-adding") + else: + # No car detected - ALWAYS clear all tracking and reset counters + logger.debug(f"🚫 Camera {camera_id}: NO CAR DETECTED - clearing all tracking") + if track_counters or stable_tracks: + logger.debug(f"🚫 Camera {camera_id}: Existing state before reset: counters={dict(track_counters)}, stable={list(stable_tracks)}") + for track_id in list(track_counters.keys()): + old_count = track_counters.pop(track_id, 0) + logger.info(f"πŸ”„ Camera {camera_id}: No car detected - RESET track {track_id} counter from {old_count} to 0") + track_counters.clear() # Ensure complete reset + stable_tracks.clear() # Clear all stable tracks + logger.info(f"βœ… Camera {camera_id}: RESET TO VALIDATION PHASE - All counters and stable tracks cleared") + else: + logger.debug(f"🚫 Camera {camera_id}: No existing counters to clear") + logger.debug(f"Camera {camera_id}: VALIDATION - no car detected (all counters reset)") + + # Final return - validation not complete + result = { + "validation_complete": False, + "stable_tracks": list(stable_tracks), + "current_tracks": [current_track_id] if current_track_id is not None else [] + } + + logger.debug(f"πŸ“‹ Camera {camera_id}: Track stability result: {result}") + logger.debug(f"πŸ“‹ Camera {camera_id}: Final counters: {dict(track_counters)}") + logger.debug(f"πŸ“‹ Camera {camera_id}: Final stable tracks: {list(stable_tracks)}") + + return result + +# Keep the old function for backward compatibility but mark as deprecated +def update_track_stability_validation(node, detections, camera_id, frame_shape=None, stability_threshold=4): + """DEPRECATED: Use update_single_track_stability instead.""" + logger.warning(f"update_track_stability_validation called for camera {camera_id} - this function is deprecated, use update_single_track_stability instead") + if detections: + best_detection = max(detections, key=lambda x: x.get("confidence", 0)) + return update_single_track_stability(node, best_detection, camera_id, frame_shape, stability_threshold, None) + else: + return update_single_track_stability(node, None, camera_id, frame_shape, stability_threshold, None) + +def update_detection_stability(node, detections, camera_id, frame_shape=None): + """Legacy detection-based stability counter - DEPRECATED.""" + # This function is deprecated in favor of track-based validation only + logger.warning(f"update_detection_stability called for camera {camera_id} - this function is deprecated, use track-based validation instead") + return {"validation_complete": False, "valid_detections": 0, "deprecated": True} + +def update_track_stability(node, detections, camera_id, frame_shape=None): + """DEPRECATED: This function is obsolete and should not be used.""" + logger.warning(f"update_track_stability called for camera {camera_id} - this function is deprecated and obsolete") + return {"phase": "validation", "absence_counter": 0, "deprecated": True} + +def check_stable_tracks(camera_id, model_id, regions_dict): + """Check if any stable tracks match the detected classes for a specific camera.""" + # Get camera-specific stability data + stability_data = get_camera_stability_data(camera_id, model_id) + stable_tracks = stability_data["stable_tracks"] + + if not stable_tracks: + return False, [] + + # Check for track-based stability + stable_detections = [] + + for class_name, region_data in regions_dict.items(): + detection = region_data.get("detection", {}) + track_id = detection.get("id") + + if track_id is not None and track_id in stable_tracks: + stable_detections.append((class_name, track_id)) + logger.debug(f"Camera {camera_id}: Found stable detection: {class_name} with stable track ID {track_id}") + + has_stable_tracks = len(stable_detections) > 0 + return has_stable_tracks, stable_detections + +def reset_tracking_state(camera_id, model_id, reason="session ended"): + """Reset tracking state after session completion or timeout.""" + stability_data = get_camera_stability_data(camera_id, model_id) + session_state = stability_data["session_state"] + + # Clear all tracking data for fresh start + stability_data["track_stability_counters"].clear() + stability_data["stable_tracks"].clear() + session_state["active"] = True + session_state["waiting_for_backend_session"] = False + session_state["wait_start_time"] = 0.0 + session_state["reset_tracker_on_resume"] = True + + logger.info(f"Camera {camera_id}: πŸ”„ Reset tracking state - {reason}") + logger.info(f"Camera {camera_id}: 🧹 Cleared stability counters and stable tracks for fresh session") + +def is_camera_active(camera_id, model_id): + """Check if camera should be processing detections.""" + stability_data = get_camera_stability_data(camera_id, model_id) + session_state = stability_data["session_state"] + + # Check if waiting for backend sessionId has timed out + if session_state.get("waiting_for_backend_session", False): + current_time = time.time() + wait_start_time = session_state.get("wait_start_time", 0) + elapsed_time = current_time - wait_start_time + + if elapsed_time >= _session_timeout_seconds: + logger.warning(f"Camera {camera_id}: Backend sessionId timeout ({_session_timeout_seconds}s) - resetting tracking") + reset_tracking_state(camera_id, model_id, "backend sessionId timeout") + return True + else: + remaining_time = _session_timeout_seconds - elapsed_time + logger.debug(f"Camera {camera_id}: Still waiting for backend sessionId - {remaining_time:.1f}s remaining") + return False + + return session_state.get("active", True) + +def cleanup_camera_stability(camera_id): + """Clean up stability tracking data when a camera is disconnected.""" + global _camera_stability_tracking + if camera_id in _camera_stability_tracking: + del _camera_stability_tracking[camera_id] + logger.info(f"Cleaned up stability tracking data for camera {camera_id}") + +def occupancy_detector(camera_id, model_id, enable=True): + """ + Temporary function to stop model inference after pipeline completion. + + Args: + camera_id (str): Camera identifier + model_id (str): Model identifier + enable (bool): True to enable occupancy mode (stop model after pipeline), False to disable + + When enabled: + - Model stops inference after completing full pipeline + - Backend sessionId handling continues in background + + Note: This is a temporary function that will be changed in the future. + """ + stability_data = get_camera_stability_data(camera_id, model_id) + session_state = stability_data["session_state"] + + if enable: + session_state["occupancy_mode"] = True + session_state["occupancy_enabled_at"] = time.time() + # Occupancy mode logging removed - not used in enhanced lightweight mode + else: + session_state["occupancy_mode"] = False + session_state.pop("occupancy_enabled_at", None) + # Occupancy mode logging removed - not used in enhanced lightweight mode + + return session_state.get("occupancy_mode", False) + def validate_pipeline_execution(node, regions_dict): """ Pre-validate that all required branches will execute successfully before @@ -573,7 +1194,144 @@ def validate_pipeline_execution(node, regions_dict): logger.info(f"Pipeline pre-validation PASSED: all required branches {list(required_branches)} will execute") return True, [] -def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None): +def run_lightweight_detection_with_validation(frame, node: dict, min_confidence=0.7, min_bbox_area_ratio=0.3): + """ + Run lightweight detection with validation rules for session ID triggering. + Returns detection info only if it passes validation thresholds. + """ + logger.debug(f"Running lightweight detection with validation: {node['modelId']} (conf>={min_confidence}, bbox_area>={min_bbox_area_ratio})") + + try: + # Run basic detection only - no branches, no actions + model = node["model"] + trigger_classes = node.get("triggerClasses", []) + trigger_class_indices = node.get("triggerClassIndices") + + # Run YOLO inference + res = model(frame, verbose=False) + + best_detection = None + frame_height, frame_width = frame.shape[:2] + frame_area = frame_height * frame_width + + for r in res: + boxes = r.boxes + if boxes is None or len(boxes) == 0: + continue + + for box in boxes: + # Extract detection info + xyxy = box.xyxy[0].cpu().numpy() + conf = box.conf[0].cpu().numpy() + cls_id = int(box.cls[0].cpu().numpy()) + class_name = model.names[cls_id] + + # Apply confidence threshold + if conf < min_confidence: + continue + + # Apply trigger class filtering if specified + if trigger_class_indices and cls_id not in trigger_class_indices: + continue + if trigger_classes and class_name not in trigger_classes: + continue + + # Calculate bbox area ratio + x1, y1, x2, y2 = xyxy + bbox_area = (x2 - x1) * (y2 - y1) + bbox_area_ratio = bbox_area / frame_area if frame_area > 0 else 0 + + # Apply bbox area threshold + if bbox_area_ratio < min_bbox_area_ratio: + logger.debug(f"Detection filtered out: bbox_area_ratio={bbox_area_ratio:.3f} < {min_bbox_area_ratio}") + continue + + # Validation passed + if not best_detection or conf > best_detection["confidence"]: + best_detection = { + "class": class_name, + "confidence": float(conf), + "bbox": [int(x) for x in xyxy], + "bbox_area_ratio": float(bbox_area_ratio), + "validation_passed": True + } + + if best_detection: + logger.debug(f"Validation PASSED: {best_detection['class']} (conf: {best_detection['confidence']:.3f}, area: {best_detection['bbox_area_ratio']:.3f})") + return best_detection + else: + logger.debug(f"Validation FAILED: No detection meets criteria (conf>={min_confidence}, area>={min_bbox_area_ratio})") + return {"validation_passed": False} + + except Exception as e: + logger.error(f"Error in lightweight detection with validation: {str(e)}", exc_info=True) + return {"validation_passed": False} + +def run_lightweight_detection(frame, node: dict): + """ + Run lightweight detection for car presence validation only. + Returns basic detection info without running branches or external actions. + """ + logger.debug(f"Running lightweight detection: {node['modelId']}") + + try: + # Run basic detection only - no branches, no actions + model = node["model"] + min_confidence = node.get("minConfidence", 0.5) + trigger_classes = node.get("triggerClasses", []) + trigger_class_indices = node.get("triggerClassIndices") + + # Run YOLO inference + res = model(frame, verbose=False) + + car_detected = False + best_detection = None + + for r in res: + boxes = r.boxes + if boxes is None or len(boxes) == 0: + continue + + for box in boxes: + # Extract detection info + xyxy = box.xyxy[0].cpu().numpy() + conf = box.conf[0].cpu().numpy() + cls_id = int(box.cls[0].cpu().numpy()) + class_name = model.names[cls_id] + + # Apply confidence threshold + if conf < min_confidence: + continue + + # Apply trigger class filtering if specified + if trigger_class_indices and cls_id not in trigger_class_indices: + continue + if trigger_classes and class_name not in trigger_classes: + continue + + # Car detected + car_detected = True + if not best_detection or conf > best_detection["confidence"]: + best_detection = { + "class": class_name, + "confidence": float(conf), + "bbox": [int(x) for x in xyxy] + } + + logger.debug(f"Lightweight detection result: car_detected={car_detected}") + if best_detection: + logger.debug(f"Best detection: {best_detection['class']} (conf: {best_detection['confidence']:.3f})") + + return { + "car_detected": car_detected, + "best_detection": best_detection + } + + except Exception as e: + logger.error(f"Error in lightweight detection: {str(e)}", exc_info=True) + return {"car_detected": False, "best_detection": None} + +def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None, validated_detection=None): """ Enhanced pipeline that supports: - Multi-class detection (detecting multiple classes simultaneously) @@ -582,6 +1340,14 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None): - Context passing for session/camera information """ try: + # Extract backend sessionId from context at the start of function + backend_session_id = context.get("backend_session_id") if context else None + camera_id = context.get("camera_id", "unknown") if context else "unknown" + model_id = node.get("modelId", "unknown") + + if backend_session_id: + logger.info(f"πŸ”‘ PIPELINE USING BACKEND SESSION_ID: {backend_session_id} for camera {camera_id}") + task = getattr(node["model"], "task", None) # ─── Classification stage ─────────────────────────────────── @@ -615,97 +1381,121 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None): elif "color" in model_id: det["color"] = class_name - execute_actions(node, frame, det) + execute_actions(node, frame, det, context.get("regions_dict") if context else None) return (det, None) if return_bbox else det - # ─── Detection stage - Multi-class support ────────────────── - tk = node["triggerClassIndices"] - logger.debug(f"Running detection for node {node['modelId']} with trigger classes: {node.get('triggerClasses', [])} (indices: {tk})") - logger.debug(f"Node configuration: minConfidence={node['minConfidence']}, multiClass={node.get('multiClass', False)}") - - res = node["model"].track( - frame, - stream=False, - persist=True, - **({"classes": tk} if tk else {}) - )[0] + # ─── Occupancy mode check (stop future frames after pipeline completion) ─────────────────────────────────────── + # Old occupancy mode logic removed - now using two-phase detection system - # Collect all detections above confidence threshold - all_detections = [] - all_boxes = [] - regions_dict = {} - - logger.debug(f"Raw detection results from model: {len(res.boxes) if res.boxes is not None else 0} detections") - - for i, box in enumerate(res.boxes): - conf = float(box.cpu().conf[0]) - cid = int(box.cpu().cls[0]) - name = node["model"].names[cid] - - logger.debug(f"Detection {i}: class='{name}' (id={cid}), confidence={conf:.3f}, threshold={node['minConfidence']}") - - if conf < node["minConfidence"]: - logger.debug(f" -> REJECTED: confidence {conf:.3f} < threshold {node['minConfidence']}") - continue - - xy = box.cpu().xyxy[0] - x1, y1, x2, y2 = map(int, xy) - bbox = (x1, y1, x2, y2) - - detection = { - "class": name, - "confidence": conf, - "id": box.id.item() if hasattr(box, "id") else None, - "bbox": bbox + # ─── Session management check ─────────────────────────────────────── + if not is_camera_active(camera_id, model_id): + logger.debug(f"⏰ Camera {camera_id}: Waiting for backend sessionId, sending 'none' detection") + none_detection = { + "class": "none", + "confidence": 1.0, + "bbox": [0, 0, 0, 0], + "branch_results": {} } - - all_detections.append(detection) - all_boxes.append(bbox) - - logger.debug(f" -> ACCEPTED: {name} with confidence {conf:.3f}, bbox={bbox}") - - # Store highest confidence detection for each class - if name not in regions_dict or conf > regions_dict[name]["confidence"]: - regions_dict[name] = { - "bbox": bbox, - "confidence": conf, - "detection": detection + return (none_detection, [0, 0, 0, 0]) if return_bbox else none_detection + + # ─── Detection stage - Use validated detection if provided (full_pipeline mode) ─── + if validated_detection: + track_id = validated_detection.get('track_id') + logger.info(f"πŸ”„ PIPELINE: Using validated detection from validation phase - track_id={track_id}") + # Convert validated detection back to all_detections format for branch processing + all_detections = [validated_detection] + # Create regions_dict based on validated detection class with proper structure + class_name = validated_detection.get("class", "car") + regions_dict = { + class_name: { + "confidence": validated_detection.get("confidence"), + "bbox": validated_detection.get("bbox", [0, 0, 0, 0]), + "detection": validated_detection } - logger.debug(f" -> Updated regions_dict['{name}'] with confidence {conf:.3f}") - - logger.info(f"Detection summary: {len(all_detections)} accepted detections from {len(res.boxes) if res.boxes is not None else 0} total") - logger.info(f"Detected classes: {list(regions_dict.keys())}") - - if not all_detections: - logger.warning("No detections above confidence threshold - returning null") - return (None, None) if return_bbox else None - - # ─── Multi-class validation ───────────────────────────────── - if node.get("multiClass", False) and node.get("expectedClasses"): - expected_classes = node["expectedClasses"] - detected_classes = list(regions_dict.keys()) - - logger.info(f"Multi-class validation: expected={expected_classes}, detected={detected_classes}") - - # Check if at least one expected class is detected (flexible mode) - matching_classes = [cls for cls in expected_classes if cls in detected_classes] - missing_classes = [cls for cls in expected_classes if cls not in detected_classes] - - logger.debug(f"Matching classes: {matching_classes}, Missing classes: {missing_classes}") - - if not matching_classes: - # No expected classes found at all - logger.warning(f"PIPELINE REJECTED: No expected classes detected. Expected: {expected_classes}, Detected: {detected_classes}") - return (None, None) if return_bbox else None - - if missing_classes: - logger.info(f"Partial multi-class detection: {matching_classes} found, {missing_classes} missing") - else: - logger.info(f"Complete multi-class detection success: {detected_classes}") + } + # Bypass track validation completely - force pipeline execution + track_validation_result = { + "validation_complete": True, + "stable_tracks": ["cached"], # Use dummy stable track to force pipeline execution + "current_tracks": ["cached"], + "bypass_validation": True + } else: - logger.debug("No multi-class validation - proceeding with all detections") + # Normal detection stage - Using structured detection function + all_detections, regions_dict, track_validation_result = run_detection_with_tracking(frame, node, context) + + if not all_detections: + logger.debug("No detections from structured detection function - sending 'none' detection") + none_detection = { + "class": "none", + "confidence": 1.0, + "bbox": [0, 0, 0, 0], + "branch_results": {} + } + return (none_detection, [0, 0, 0, 0]) if return_bbox else none_detection - # ─── Pre-validate pipeline execution ──────────────────────── + # Extract bounding boxes for compatibility + all_boxes = [det["bbox"] for det in all_detections] + + # ─── Track-Based Validation System: Using Track ID Stability ──────────────────────── + tracking_config = node.get("tracking", {}) + stability_threshold = tracking_config.get("stabilityThreshold", node.get("stabilityThreshold", 1)) + + camera_id = context.get("camera_id", "unknown") if context else "unknown" + + if stability_threshold > 1 and tracking_config.get("enabled", True): + # Note: Old occupancy state system removed - app.py handles all mode transitions now + # Track validation is handled by update_single_track_stability function + model_id = node.get("modelId", "unknown") + + # Simplified: just check if we have stable tracks from track validation + current_phase = "validation" # Always validation phase in simplified system + absence_counter = 0 + max_absence_frames = 3 + + if current_phase == "validation": + # ═══ TRACK VALIDATION PHASE ═══ + # Check if this is a branch node - branches should execute regardless of main validation state + is_branch_node = node.get("cropClass") is not None or node.get("parallel") is True + + if is_branch_node: + # This is a branch node - allow normal execution regardless of main pipeline validation + logger.debug(f"πŸ” Camera {camera_id}: Branch node {model_id} executing during track validation phase") + else: + # Main pipeline node during track validation - check for stable tracks + stable_tracks = track_validation_result.get("stable_tracks", []) + + if not stable_tracks: + # No stable tracks yet - return detection without branches until track validation completes + if all_detections: + # Return the best detection but skip branches during validation + primary_detection = max(all_detections, key=lambda x: x["confidence"]) + logger.debug(f"πŸ” Camera {camera_id}: TRACK VALIDATION PHASE - returning detection without branches (stable_tracks: {len(stable_tracks)}, sessionId: {backend_session_id or 'none'})") + else: + # No detection - return none + primary_detection = {"class": "none", "confidence": 0.0, "bbox": [0, 0, 0, 0]} + logger.debug(f"πŸ” Camera {camera_id}: TRACK VALIDATION PHASE - no detection found (sessionId: {backend_session_id or 'none'})") + + primary_bbox = primary_detection.get("bbox", [0, 0, 0, 0]) + return (primary_detection, primary_bbox) if return_bbox else primary_detection + else: + # We have stable tracks - validation is complete, proceed with pipeline + logger.info(f"🎯 Camera {camera_id}: STABLE TRACKS DETECTED - proceeding with full pipeline (tracks: {stable_tracks})") + + # Note: Old waiting_for_session and occupancy phases removed + # app.py lightweight mode handles all state transitions now + + # ─── Pre-validate pipeline execution (only proceed if we have stable tracks for main pipeline) ──────────────────────── + is_branch_node = node.get("cropClass") is not None or node.get("parallel") is True + + if not is_branch_node and stability_threshold > 1 and tracking_config.get("enabled", True): + # Main pipeline node with tracking - check for stable tracks before proceeding + stable_tracks = track_validation_result.get("stable_tracks", []) + if not stable_tracks: + logger.debug(f"πŸ”’ Camera {camera_id}: Main pipeline requires stable tracks - none found, skipping pipeline execution") + none_detection = {"class": "none", "confidence": 1.0, "bbox": [0, 0, 0, 0], "awaiting_stable_tracks": True} + return (none_detection, [0, 0, 0, 0]) if return_bbox else none_detection + pipeline_valid, missing_branches = validate_pipeline_execution(node, regions_dict) if not pipeline_valid: @@ -720,42 +1510,51 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None): **(context or {}) } - # ─── Create initial database record when Car+Frontal detected ──── - if node.get("db_manager") and node.get("multiClass", False): - # Only create database record if we have both Car and Frontal - has_car = "Car" in regions_dict - has_frontal = "Frontal" in regions_dict + # ─── Database operations will be handled when backend sessionId is received ──── + + if node.get("db_manager") and regions_dict: + detected_classes = list(regions_dict.keys()) + logger.debug(f"Valid detections found: {detected_classes}") - if has_car and has_frontal: - # Generate UUID session_id since client session is None for now - import uuid as uuid_lib + if backend_session_id: + # Backend sessionId is available, proceed with database operations from datetime import datetime - generated_session_id = str(uuid_lib.uuid4()) - - # Insert initial detection record display_id = detection_result.get("display_id", "unknown") timestamp = datetime.now().strftime("%Y-%m-%dT%H-%M-%S") inserted_session_id = node["db_manager"].insert_initial_detection( display_id=display_id, captured_timestamp=timestamp, - session_id=generated_session_id + session_id=backend_session_id ) if inserted_session_id: - # Update detection_result with the generated session_id for actions and branches detection_result["session_id"] = inserted_session_id - detection_result["timestamp"] = timestamp # Update with proper timestamp - logger.info(f"Created initial database record with session_id: {inserted_session_id}") + detection_result["timestamp"] = timestamp + logger.info(f"πŸ’Ύ DATABASE RECORD CREATED with backend session_id: {inserted_session_id}") + logger.debug(f"Database record: display_id={display_id}, timestamp={timestamp}") + else: + logger.error(f"Failed to create database record with backend session_id: {backend_session_id}") else: - logger.debug(f"Database record not created - missing required classes. Has Car: {has_car}, Has Frontal: {has_frontal}") + logger.info(f"πŸ“‘ Camera {camera_id}: Full pipeline completed, detection data will be sent to backend. Database operations will occur when sessionId is received.") + # Store detection info for later database operations when sessionId arrives + detection_result["awaiting_session_id"] = True + from datetime import datetime + detection_result["timestamp"] = datetime.now().strftime("%Y-%m-%dT%H-%M-%S") - execute_actions(node, frame, detection_result, regions_dict) + # Execute actions for root node only if it doesn't have branches + # Branch nodes with actions will execute them after branch processing + if not node.get("branches") or node.get("modelId") == "yolo11n": + execute_actions(node, frame, detection_result, regions_dict) - # ─── Parallel branch processing ───────────────────────────── + # ─── Branch processing (no stability check here) ───────────────────────────── if node["branches"]: branch_results = {} + # Extract camera_id for logging + camera_id = detection_result.get("camera_id", context.get("camera_id", "unknown") if context else "unknown") + + # Filter branches that should be triggered active_branches = [] for br in node["branches"]: @@ -786,21 +1585,37 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None): futures = {} for br in active_branches: - crop_class = br.get("cropClass", br.get("triggerClasses", [])[0] if br.get("triggerClasses") else None) sub_frame = frame + crop_class = br.get("cropClass") - logger.info(f"Starting parallel branch: {br['modelId']}, crop_class: {crop_class}") + logger.info(f"Starting parallel branch: {br['modelId']}, cropClass: {crop_class}") if br.get("crop", False) and crop_class: - cropped = crop_region_by_class(frame, regions_dict, crop_class) - if cropped is not None: - sub_frame = cv2.resize(cropped, (224, 224)) - logger.debug(f"Successfully cropped {crop_class} region for {br['modelId']}") + if crop_class in regions_dict: + cropped = crop_region_by_class(frame, regions_dict, crop_class) + if cropped is not None: + sub_frame = cropped # Use cropped image without manual resizing + logger.debug(f"Successfully cropped {crop_class} region for {br['modelId']} - model will handle resizing") + else: + logger.warning(f"Failed to crop {crop_class} region for {br['modelId']}, skipping branch") + continue else: - logger.warning(f"Failed to crop {crop_class} region for {br['modelId']}, skipping branch") + logger.warning(f"Crop class {crop_class} not found in detected regions for {br['modelId']}, skipping branch") continue - future = executor.submit(run_pipeline, sub_frame, br, True, context) + # Add regions_dict and session_id to context for child branches + branch_context = dict(context) if context else {} + branch_context["regions_dict"] = regions_dict + + # Pass session_id from detection_result to branch context for Redis actions + if "session_id" in detection_result: + branch_context["session_id"] = detection_result["session_id"] + logger.debug(f"Added session_id to branch context: {detection_result['session_id']}") + elif backend_session_id: + branch_context["session_id"] = backend_session_id + logger.debug(f"Added backend_session_id to branch context: {backend_session_id}") + + future = executor.submit(run_pipeline, sub_frame, br, True, branch_context) futures[future] = br # Collect results @@ -811,30 +1626,58 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None): if result: branch_results[br["modelId"]] = result logger.info(f"Branch {br['modelId']} completed: {result}") + + # Collect nested branch results if they exist + if "branch_results" in result: + for nested_id, nested_result in result["branch_results"].items(): + branch_results[nested_id] = nested_result + logger.info(f"Collected nested branch result: {nested_id} = {nested_result}") except Exception as e: logger.error(f"Branch {br['modelId']} failed: {e}") else: # Run branches sequentially for br in active_branches: - crop_class = br.get("cropClass", br.get("triggerClasses", [])[0] if br.get("triggerClasses") else None) sub_frame = frame + crop_class = br.get("cropClass") - logger.info(f"Starting sequential branch: {br['modelId']}, crop_class: {crop_class}") + logger.info(f"Starting sequential branch: {br['modelId']}, cropClass: {crop_class}") if br.get("crop", False) and crop_class: - cropped = crop_region_by_class(frame, regions_dict, crop_class) - if cropped is not None: - sub_frame = cv2.resize(cropped, (224, 224)) - logger.debug(f"Successfully cropped {crop_class} region for {br['modelId']}") + if crop_class in regions_dict: + cropped = crop_region_by_class(frame, regions_dict, crop_class) + if cropped is not None: + sub_frame = cropped # Use cropped image without manual resizing + logger.debug(f"Successfully cropped {crop_class} region for {br['modelId']} - model will handle resizing") + else: + logger.warning(f"Failed to crop {crop_class} region for {br['modelId']}, skipping branch") + continue else: - logger.warning(f"Failed to crop {crop_class} region for {br['modelId']}, skipping branch") + logger.warning(f"Crop class {crop_class} not found in detected regions for {br['modelId']}, skipping branch") continue try: - result, _ = run_pipeline(sub_frame, br, True, context) + # Add regions_dict and session_id to context for child branches + branch_context = dict(context) if context else {} + branch_context["regions_dict"] = regions_dict + + # Pass session_id from detection_result to branch context for Redis actions + if "session_id" in detection_result: + branch_context["session_id"] = detection_result["session_id"] + logger.debug(f"Added session_id to sequential branch context: {detection_result['session_id']}") + elif backend_session_id: + branch_context["session_id"] = backend_session_id + logger.debug(f"Added backend_session_id to sequential branch context: {backend_session_id}") + + result, _ = run_pipeline(sub_frame, br, True, branch_context) if result: branch_results[br["modelId"]] = result logger.info(f"Branch {br['modelId']} completed: {result}") + + # Collect nested branch results if they exist + if "branch_results" in result: + for nested_id, nested_result in result["branch_results"].items(): + branch_results[nested_id] = nested_result + logger.info(f"Collected nested branch result: {nested_id} = {nested_result}") else: logger.warning(f"Branch {br['modelId']} returned no result") except Exception as e: @@ -848,6 +1691,26 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None): # ─── Execute Parallel Actions ─────────────────────────────── if node.get("parallelActions") and "branch_results" in detection_result: execute_parallel_actions(node, frame, detection_result, regions_dict) + + # ─── Auto-enable occupancy mode after successful pipeline completion ───────────────── + camera_id = context.get("camera_id", "unknown") if context else "unknown" + model_id = node.get("modelId", "unknown") + + # Enable occupancy detector automatically after first successful pipeline + # Auto-enabling occupancy logging removed - not used in enhanced lightweight mode + occupancy_detector(camera_id, model_id, enable=True) + + logger.info(f"βœ… Camera {camera_id}: Pipeline completed, detection data will be sent to backend") + logger.info(f"πŸ›‘ Camera {camera_id}: Model will stop inference for future frames") + logger.info(f"πŸ“‘ Backend sessionId will be handled when received via WebSocket") + + # ─── Execute actions after successful detection AND branch processing ────────── + # This ensures detection nodes (like frontal_detection_v1) execute their actions + # after completing both detection and branch processing + if node.get("actions") and regions_dict and node.get("modelId") != "yolo11n": + # Execute actions for branch detection nodes, skip root to avoid duplication + logger.debug(f"Executing post-detection actions for branch node {node.get('modelId')}") + execute_actions(node, frame, detection_result, regions_dict) # ─── Return detection result ──────────────────────────────── primary_detection = max(all_detections, key=lambda x: x["confidence"]) @@ -863,5 +1726,6 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None): except Exception as e: logger.error(f"Error in node {node.get('modelId')}: {e}") + import traceback traceback.print_exc() return (None, None) if return_bbox else None diff --git a/test/sample.png b/test/sample.png new file mode 100644 index 0000000..568e38f Binary files /dev/null and b/test/sample.png differ diff --git a/test/sample2.png b/test/sample2.png new file mode 100644 index 0000000..c1e8485 Binary files /dev/null and b/test/sample2.png differ diff --git a/test/test.py b/test/test.py new file mode 100644 index 0000000..ff073c4 --- /dev/null +++ b/test/test.py @@ -0,0 +1,60 @@ +from ultralytics import YOLO +import cv2 +import os + +# Load the model +# model = YOLO('../models/webcam-local-01/4/bangchak_poc/yolo11n.pt') +model = YOLO('yolo11m.pt') + +def test_image(image_path): + """Test a single image with YOLO model""" + if not os.path.exists(image_path): + print(f"Image not found: {image_path}") + return + + # Run inference - filter for car class only (class 2 in COCO) + results = model(image_path, classes=[2, 5, 7]) # 2, 5, 7 = car, bus, truck in COCO dataset + + # Display results + for r in results: + im_array = r.plot() # plot a BGR numpy array of predictions + + # Resize image for display (max width/height 800px) + height, width = im_array.shape[:2] + max_dimension = 800 + if width > max_dimension or height > max_dimension: + if width > height: + new_width = max_dimension + new_height = int(height * (max_dimension / width)) + else: + new_height = max_dimension + new_width = int(width * (max_dimension / height)) + im_array = cv2.resize(im_array, (new_width, new_height)) + + # Show image with predictions + cv2.imshow('YOLO Test - Car Detection Only', im_array) + cv2.waitKey(0) + cv2.destroyAllWindows() + + # Print detection info + print(f"\nDetections for {image_path}:") + if r.boxes is not None and len(r.boxes) > 0: + for i, box in enumerate(r.boxes): + cls = int(box.cls[0]) + conf = float(box.conf[0]) + original_class = model.names[cls] # Original class name (car/bus/truck) + # Get bounding box coordinates + x1, y1, x2, y2 = box.xyxy[0].tolist() + # Rename all vehicle types to "car" + print(f"Detection {i+1}: car (was: {original_class}) - Confidence: {conf:.3f} - BBox: ({x1:.0f}, {y1:.0f}, {x2:.0f}, {y2:.0f})") + print(f"Total cars detected: {len(r.boxes)}") + else: + print("No cars detected in the image") + +if __name__ == "__main__": + # Test with an image file + image_path = input("Enter image path (or press Enter for default test): ") + if not image_path: + image_path = "sample.png" # Default test image + + test_image(image_path) \ No newline at end of file diff --git a/test/test_botsort_zone_track.py b/test/test_botsort_zone_track.py new file mode 100644 index 0000000..bbbd188 --- /dev/null +++ b/test/test_botsort_zone_track.py @@ -0,0 +1,352 @@ +import cv2 +import torch +import numpy as np +import time +from collections import defaultdict +from ultralytics import YOLO + +def point_in_polygon(point, polygon): + """Check if a point is inside a polygon using ray casting algorithm""" + x, y = point + n = len(polygon) + inside = False + + p1x, p1y = polygon[0] + for i in range(1, n + 1): + p2x, p2y = polygon[i % n] + if y > min(p1y, p2y): + if y <= max(p1y, p2y): + if x <= max(p1x, p2x): + if p1y != p2y: + xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x + if p1x == p2x or x <= xinters: + inside = not inside + p1x, p1y = p2x, p2y + + return inside + +def draw_zone(frame, zone_polygon, color=(255, 0, 0), thickness=3): + """Draw tracking zone on frame""" + pts = np.array(zone_polygon, np.int32) + pts = pts.reshape((-1, 1, 2)) + cv2.polylines(frame, [pts], True, color, thickness) + + # Add semi-transparent fill + overlay = frame.copy() + cv2.fillPoly(overlay, [pts], color) + cv2.addWeighted(overlay, 0.2, frame, 0.8, 0, frame) + +def setup_video_writer(output_path, fps, width, height): + """Setup video writer for output""" + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + return cv2.VideoWriter(output_path, fourcc, fps, (width, height)) + +def write_frame_to_video(video_writer, frame, repeat_count): + """Write frame to video with specified repeat count""" + for _ in range(repeat_count): + video_writer.write(frame) + +def finalize_video(video_writer): + """Release video writer""" + video_writer.release() + +def main(): + video_path = "sample2.mp4" + yolo_model = "bangchakv2/yolov8n.pt" + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + print(f"Using device: {device}") + + print("Loading YOLO model...") + model = YOLO(yolo_model) + + print("Opening video...") + cap = cv2.VideoCapture(video_path) + fps = int(cap.get(cv2.CAP_PROP_FPS)) + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + print(f"Video info: {width}x{height}, {fps} FPS, {total_frames} frames") + + # Define tracking zone - Gas station floor area (trapezoidal shape) + # Based on the perspective of the gas station floor from your image + # width 2560, height 1440 + + tracking_zone = [ + (423, 974), # Point 1 + (1540, 1407), # Point 2 + (1976, 806), # Point 3 + (1364, 749) # Point 4 + ] + + print(f"🎯 Tracking zone defined: {tracking_zone}") + + # CONTINUOUS TRACKING: Process every 118 frames (~2.0s intervals) + frame_skip = 118 + + print(f"🎯 CONTINUOUS MODE: Processing every {frame_skip} frames ({frame_skip/fps:.2f}s intervals)") + print(f"🎬 Output video will have same duration as input (each processed frame shown for 2 seconds)") + print("πŸ”₯ ZONE-FIRST TRACKING: Only cars entering the zone will be tracked!") + print("Requires 5 consecutive detections IN ZONE for verification") + print("πŸ• 24/7 MODE: Memory reset every hour to prevent overflow") + print("Press 'q' to quit") + + # Setup video writer for output (same fps as input for normal playback speed) + output_path = "tracking_output_botsort_zone_track.mp4" + output_fps = fps # Use same fps as input video + out = setup_video_writer(output_path, output_fps, width, height) + + # Track car IDs and their consecutive detections + car_id_counts = defaultdict(int) + successful_cars = set() + last_positions = {} + processed_count = 0 + + # ID remapping for clean sequential zone IDs + tracker_to_zone_id = {} # Maps tracker IDs to clean zone IDs + next_zone_id = 1 # Next clean zone ID to assign + + # Store previous frame detections to filter tracking inputs + previous_zone_cars = set() + + # 24/7 operation: Reset every hour (1800 snapshots at 2-sec intervals = 1 hour) + RESET_INTERVAL = 1800 # Reset every 1800 processed frames (1 hour) + + frame_idx = 0 + + while True: + # Skip frames to maintain interval + for _ in range(frame_skip): + ret, frame = cap.read() + if not ret: + print("\nNo more frames to read") + cap.release() + cv2.destroyAllWindows() + return + frame_idx += 1 + + processed_count += 1 + current_time = frame_idx / fps + + print(f"\n🎬 Frame {frame_idx} at {current_time:.2f}s (processed #{processed_count})") + + # 24/7 Memory Management: Reset every hour + if processed_count % RESET_INTERVAL == 0: + print(f"πŸ• HOURLY RESET: Clearing all tracking data (processed {processed_count} frames)") + print(f" πŸ“Š Before reset: {len(tracker_to_zone_id)} tracked cars, next Zone ID was {next_zone_id}") + + # Clear all tracking data + tracker_to_zone_id.clear() + car_id_counts.clear() + successful_cars.clear() + last_positions.clear() + next_zone_id = 1 # Reset to 1 + + # Reset BoT-SORT tracker state + try: + model.reset() + print(f" βœ… BoT-SORT tracker reset successfully") + except: + print(f" ⚠️ BoT-SORT reset not available (continuing without reset)") + + print(f" πŸ†• Zone IDs will start from 1 again") + + # Draw tracking zone on frame + draw_zone(frame, tracking_zone, color=(0, 255, 255), thickness=3) # Yellow zone + + # First run YOLO detection (without tracking) to find cars in zone + detection_results = model(frame, verbose=False, conf=0.7, classes=[2]) + + # Find cars currently in the tracking zone + current_zone_cars = [] + total_detections = 0 + + if detection_results[0].boxes is not None: + boxes = detection_results[0].boxes.xyxy.cpu() + scores = detection_results[0].boxes.conf.cpu() + + total_detections = len(boxes) + print(f" πŸ” Total car detections: {total_detections}") + + for i in range(len(boxes)): + x1, y1, x2, y2 = boxes[i] + conf = float(scores[i]) + + # Check if detection is in zone (using bottom center) + box_bottom = ((x1 + x2) / 2, y2) + if point_in_polygon(box_bottom, tracking_zone): + current_zone_cars.append({ + 'bbox': [float(x1), float(y1), float(x2), float(y2)], + 'conf': conf, + 'center': ((x1 + x2) / 2, (y1 + y2) / 2), + 'bottom': box_bottom + }) + + print(f" 🎯 Cars in zone: {len(current_zone_cars)}") + + # Only run tracking if there are cars in the zone + detected_car_ids = set() + + if current_zone_cars: + # Run tracking on the full frame (let tracker handle associations) + # But we'll filter results to only zone cars afterward + results = model.track( + frame, + persist=True, + verbose=False, + conf=0.7, + classes=[2], + tracker="botsort_reid.yaml" + ) + + if results[0].boxes is not None and results[0].boxes.id is not None: + boxes = results[0].boxes.xyxy.cpu() + scores = results[0].boxes.conf.cpu() + track_ids = results[0].boxes.id.cpu().int() + + print(f" πŸ“Š Total tracked objects: {len(track_ids)}") + + # Filter tracked objects to only those in zone + zone_tracks = [] + for i, track_id in enumerate(track_ids): + x1, y1, x2, y2 = boxes[i] + conf = float(scores[i]) + + # Check if this tracked object is in our zone + box_bottom = ((x1 + x2) / 2, y2) + if point_in_polygon(box_bottom, tracking_zone): + zone_tracks.append({ + 'id': int(track_id), + 'bbox': [int(x1), int(y1), int(x2), int(y2)], + 'conf': conf, + 'center': ((x1 + x2) / 2, (y1 + y2) / 2), + 'bottom': box_bottom + }) + + print(f" βœ… Zone tracks: {len(zone_tracks)}") + + # Process each zone track + for track in zone_tracks: + tracker_id = track['id'] # Original tracker ID + x1, y1, x2, y2 = track['bbox'] + conf = track['conf'] + box_center = track['center'] + + # Map tracker ID to clean zone ID + if tracker_id not in tracker_to_zone_id: + tracker_to_zone_id[tracker_id] = next_zone_id + print(f" πŸ†• New car: Tracker ID {tracker_id} β†’ Zone ID {next_zone_id}") + next_zone_id += 1 + + zone_id = tracker_to_zone_id[tracker_id] # Clean sequential ID + + # Validate track continuity (use tracker_id for internal logic) + is_valid = True + + # Check for suspicious jumps + if tracker_id in last_positions: + last_center = last_positions[tracker_id] + distance = np.sqrt((box_center[0] - last_center[0])**2 + + (box_center[1] - last_center[1])**2) + + if distance > 400: # pixels in ~2.0s + is_valid = False + print(f" ⚠️ Zone ID {zone_id} (Tracker {tracker_id}): suspicious jump {distance:.0f}px") + + # Skip already successful cars (use zone_id for user logic) + if zone_id in successful_cars: + is_valid = False + print(f" βœ… Zone ID {zone_id}: already successful, skipping") + + # Only process valid, high-confidence zone tracks + if is_valid and conf > 0.7: + detected_car_ids.add(zone_id) # Use zone_id for display + car_id_counts[zone_id] += 1 + last_positions[tracker_id] = box_center # Track by tracker_id internally + + # Draw tracking results with clean zone ID + zone_color = (0, 255, 0) # Green for zone cars + cv2.rectangle(frame, (x1, y1), (x2, y2), zone_color, 2) + cv2.putText(frame, f'ZONE ID:{zone_id}', + (x1, y1-30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, zone_color, 2) + cv2.putText(frame, f'#{car_id_counts[zone_id]} {conf:.2f}', + (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, zone_color, 2) + + # Draw center point + cv2.circle(frame, (int(track['bottom'][0]), int(track['bottom'][1])), 5, zone_color, -1) + + print(f" βœ… Zone ID {zone_id} (Tracker {tracker_id}): ZONE detection #{car_id_counts[zone_id]} (conf: {conf:.2f})") + + # Check for success (5 consecutive detections IN ZONE) + if car_id_counts[zone_id] == 5: + print(f"πŸ† SUCCESS: Zone ID {zone_id} achieved 5 continuous ZONE detections - TRIGGER NEXT MODEL!") + successful_cars.add(zone_id) + + # Add success indicator to frame + cv2.putText(frame, f"SUCCESS: Zone Car {zone_id}!", + (50, height-50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 3) + else: + print(" πŸ“‹ No cars in zone - no tracking performed") + + # Draw any cars outside the zone in red (for reference) + if detection_results[0].boxes is not None: + boxes = detection_results[0].boxes.xyxy.cpu() + scores = detection_results[0].boxes.conf.cpu() + + for i in range(len(boxes)): + x1, y1, x2, y2 = boxes[i] + conf = float(scores[i]) + + box_bottom = ((x1 + x2) / 2, y2) + if not point_in_polygon(box_bottom, tracking_zone): + # Draw cars outside zone in red (not tracked) + x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) + cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 1) + cv2.putText(frame, f'OUT {conf:.2f}', + (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) + + # Display results + if detected_car_ids: + print(f" πŸ“‹ Active Zone IDs: {sorted(detected_car_ids)} (Clean sequential IDs)") + + # Show ID mapping for debugging + if tracker_to_zone_id: + mapping_str = ", ".join([f"Tracker{k}β†’Zone{v}" for k, v in tracker_to_zone_id.items()]) + print(f" πŸ”„ ID Mapping: {mapping_str}") + + # Add annotations to frame + cv2.putText(frame, f"BoT-SORT Zone-First Tracking | Frame: {frame_idx} | {current_time:.2f}s", + (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2) + cv2.putText(frame, f"Zone Cars: {len(current_zone_cars)} | Active Tracks: {len(detected_car_ids)}", + (10, 65), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) + cv2.putText(frame, f"Successful Cars: {len(successful_cars)}", + (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) + cv2.putText(frame, "TRACKING ZONE", + (tracking_zone[0][0], tracking_zone[0][1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2) + + # Write annotated frame to output video (repeat for 2 seconds duration) + write_frame_to_video(out, frame, frame_skip) + + # Show video with zone tracking info + display_frame = cv2.resize(frame, (960, 540)) + cv2.imshow('BoT-SORT Zone-First Tracking', display_frame) + + # Quick check for quit + key = cv2.waitKey(1) & 0xFF + if key == ord('q'): + break + + # Small delay to see results + time.sleep(0.1) + + cap.release() + finalize_video(out) + cv2.destroyAllWindows() + print(f"\n🎯 BoT-SORT zone-first tracking completed!") + print(f"πŸ“Š Processed {processed_count} frames with {frame_skip/fps:.2f}s intervals") + print(f"πŸ† Successfully tracked {len(successful_cars)} unique cars IN ZONE") + print(f"πŸ’Ύ Annotated video saved to: {output_path}") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/view_redis_images.py b/view_redis_images.py new file mode 100644 index 0000000..b1b3c63 --- /dev/null +++ b/view_redis_images.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +""" +Script to view frontal images saved in Redis +""" +import redis +import cv2 +import numpy as np +import sys +from datetime import datetime + +# Redis connection config (from pipeline.json) +REDIS_CONFIG = { + "host": "10.100.1.3", + "port": 6379, + "password": "FBQgi0i5RevAAMO5Hh66", + "db": 0 +} + +def connect_redis(): + """Connect to Redis server.""" + try: + client = redis.Redis( + host=REDIS_CONFIG["host"], + port=REDIS_CONFIG["port"], + password=REDIS_CONFIG["password"], + db=REDIS_CONFIG["db"], + decode_responses=False # Keep bytes for images + ) + client.ping() + print(f"βœ… Connected to Redis at {REDIS_CONFIG['host']}:{REDIS_CONFIG['port']}") + return client + except redis.exceptions.ConnectionError as e: + print(f"❌ Failed to connect to Redis: {e}") + return None + +def list_image_keys(client): + """List all image keys in Redis.""" + try: + # Look for keys matching the inference pattern + keys = client.keys("inference:*") + print(f"\nπŸ“‹ Found {len(keys)} image keys:") + for i, key in enumerate(keys): + key_str = key.decode() if isinstance(key, bytes) else key + print(f"{i+1}. {key_str}") + return keys + except Exception as e: + print(f"❌ Error listing keys: {e}") + return [] + +def view_image(client, key): + """View a specific image from Redis.""" + try: + # Get image data from Redis + image_data = client.get(key) + if image_data is None: + print(f"❌ No data found for key: {key}") + return + + print(f"πŸ“Έ Image size: {len(image_data)} bytes") + + # Convert bytes to numpy array + nparr = np.frombuffer(image_data, np.uint8) + + # Decode image + img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) + if img is None: + print("❌ Failed to decode image data") + return + + print(f"πŸ–ΌοΈ Image dimensions: {img.shape[1]}x{img.shape[0]} pixels") + + # Display image + key_str = key.decode() if isinstance(key, bytes) else key + cv2.imshow(f'Redis Image: {key_str}', img) + print("πŸ‘οΈ Image displayed. Press any key to close...") + cv2.waitKey(0) + cv2.destroyAllWindows() + + # Ask if user wants to save the image + save = input("πŸ’Ύ Save image to file? (y/n): ").lower().strip() + if save == 'y': + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + filename = f"redis_image_{timestamp}.jpg" + cv2.imwrite(filename, img) + print(f"πŸ’Ύ Image saved as: {filename}") + + except Exception as e: + print(f"❌ Error viewing image: {e}") + +def monitor_new_images(client): + """Monitor for new images being added to Redis.""" + print("πŸ‘€ Monitoring for new images... (Press Ctrl+C to stop)") + try: + # Subscribe to Redis pub/sub for car detections + pubsub = client.pubsub() + pubsub.subscribe('car_detections') + + for message in pubsub.listen(): + if message['type'] == 'message': + data = message['data'].decode() + print(f"🚨 New detection: {data}") + + # Try to extract image key from message + import json + try: + detection_data = json.loads(data) + image_key = detection_data.get('image_key') + if image_key: + print(f"πŸ–ΌοΈ New image available: {image_key}") + view_choice = input("View this image now? (y/n): ").lower().strip() + if view_choice == 'y': + view_image(client, image_key) + except json.JSONDecodeError: + pass + + except KeyboardInterrupt: + print("\nπŸ‘‹ Stopping monitor...") + except Exception as e: + print(f"❌ Monitor error: {e}") + +def main(): + """Main function.""" + print("πŸ” Redis Image Viewer") + print("=" * 50) + + # Connect to Redis + client = connect_redis() + if not client: + return + + while True: + print("\nπŸ“‹ Options:") + print("1. List all image keys") + print("2. View specific image") + print("3. Monitor for new images") + print("4. Exit") + + choice = input("\nEnter choice (1-4): ").strip() + + if choice == '1': + keys = list_image_keys(client) + elif choice == '2': + keys = list_image_keys(client) + if keys: + try: + idx = int(input(f"\nEnter image number (1-{len(keys)}): ")) - 1 + if 0 <= idx < len(keys): + view_image(client, keys[idx]) + else: + print("❌ Invalid selection") + except ValueError: + print("❌ Please enter a valid number") + elif choice == '3': + monitor_new_images(client) + elif choice == '4': + print("πŸ‘‹ Goodbye!") + break + else: + print("❌ Invalid choice") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/webcam_rtsp_server.py b/webcam_rtsp_server.py new file mode 100644 index 0000000..65698ac --- /dev/null +++ b/webcam_rtsp_server.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python3 +""" +Enhanced webcam server that provides both RTSP streaming and HTTP snapshot endpoints +Compatible with CMS UI requirements for camera configuration +""" + +import cv2 +import threading +import time +import logging +import socket +from http.server import BaseHTTPRequestHandler, HTTPServer +import subprocess +import sys +import os + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s" +) +logger = logging.getLogger("webcam_rtsp_server") + +# Global webcam capture object +webcam_cap = None +rtsp_process = None + +class WebcamHTTPHandler(BaseHTTPRequestHandler): + """HTTP handler for snapshot requests""" + + def do_GET(self): + if self.path == '/snapshot' or self.path == '/snapshot.jpg': + try: + # Capture fresh frame from webcam for each request + ret, frame = webcam_cap.read() + if ret and frame is not None: + # Encode as JPEG + success, buffer = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 85]) + if success: + self.send_response(200) + self.send_header('Content-Type', 'image/jpeg') + self.send_header('Content-Length', str(len(buffer))) + self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate') + self.send_header('Pragma', 'no-cache') + self.send_header('Expires', '0') + self.end_headers() + self.wfile.write(buffer.tobytes()) + logger.debug(f"Served webcam snapshot, size: {len(buffer)} bytes") + return + else: + logger.error("Failed to encode frame as JPEG") + else: + logger.error("Failed to capture frame from webcam") + + # Send error response + self.send_response(500) + self.send_header('Content-Type', 'text/plain') + self.end_headers() + self.wfile.write(b'Failed to capture webcam frame') + + except Exception as e: + logger.error(f"Error serving snapshot: {e}") + self.send_response(500) + self.send_header('Content-Type', 'text/plain') + self.end_headers() + self.wfile.write(f'Error: {str(e)}'.encode()) + + elif self.path == '/status': + # Status endpoint for health checking + self.send_response(200) + self.send_header('Content-Type', 'application/json') + self.end_headers() + + width = int(webcam_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(webcam_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = webcam_cap.get(cv2.CAP_PROP_FPS) + + status = f'{{"status": "online", "width": {width}, "height": {height}, "fps": {fps}}}' + self.wfile.write(status.encode()) + + else: + # 404 for other paths + self.send_response(404) + self.send_header('Content-Type', 'text/plain') + self.end_headers() + self.wfile.write(b'Not Found - Available endpoints: /snapshot, /snapshot.jpg, /status') + + def log_message(self, format, *args): + # Suppress default HTTP server logging to avoid spam + pass + +def check_ffmpeg(): + """Check if FFmpeg is available for RTSP streaming""" + try: + result = subprocess.run(['ffmpeg', '-version'], + capture_output=True, text=True, timeout=5) + if result.returncode == 0: + logger.info("FFmpeg found and working") + return True + except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError): + pass + + logger.warning("FFmpeg not found. RTSP streaming will not be available.") + logger.info("To enable RTSP streaming, install FFmpeg:") + logger.info(" Windows: Download from https://ffmpeg.org/download.html") + logger.info(" Linux: sudo apt install ffmpeg") + logger.info(" macOS: brew install ffmpeg") + return False + +def get_windows_camera_name(): + """Get the actual camera device name on Windows""" + try: + # List video devices using FFmpeg with proper encoding handling + result = subprocess.run(['ffmpeg', '-f', 'dshow', '-list_devices', 'true', '-i', 'dummy'], + capture_output=True, text=True, timeout=10, encoding='utf-8', errors='ignore') + output = result.stderr # FFmpeg outputs device list to stderr + + # Look for video devices in the output + lines = output.split('\n') + video_devices = [] + + # Parse the output - look for lines with (video) that contain device names in quotes + for line in lines: + if '[dshow @' in line and '(video)' in line and '"' in line: + # Extract device name between first pair of quotes + start = line.find('"') + 1 + end = line.find('"', start) + if start > 0 and end > start: + device_name = line[start:end] + video_devices.append(device_name) + + logger.info(f"Found Windows video devices: {video_devices}") + if video_devices: + # Force use the first device (index 0) which is the Logitech HD webcam + return video_devices[0] # This will be "η½—ζŠ€ι«˜ζΈ…η½‘η»œζ‘„εƒζœΊ C930c" + else: + logger.info("No devices found via FFmpeg detection, using fallback") + # Fall through to fallback names + + except Exception as e: + logger.debug(f"Failed to get Windows camera name: {e}") + + # Try common camera device names as fallback + # Prioritize Integrated Camera since that's what's working now + common_names = [ + "Integrated Camera", # This is working for the current setup + "USB Video Device", # Common name for USB cameras + "USB2.0 Camera", + "C930c", # Direct model name + "HD Pro Webcam C930c", # Full Logitech name + "Logitech", # Brand name + "USB Camera", + "Webcam" + ] + logger.info(f"Using fallback camera names: {common_names}") + return common_names[0] # Return "Integrated Camera" first + +def start_rtsp_stream(webcam_index=0, rtsp_port=8554): + """Start RTSP streaming using FFmpeg""" + global rtsp_process + + if not check_ffmpeg(): + return None + + try: + # Get the actual camera device name for Windows + if sys.platform.startswith('win'): + camera_name = get_windows_camera_name() + logger.info(f"Using Windows camera device: {camera_name}") + + # FFmpeg command to stream webcam via RTSP + if sys.platform.startswith('win'): + cmd = [ + 'ffmpeg', + '-f', 'dshow', + '-i', f'video={camera_name}', # Use detected camera name + '-c:v', 'libx264', + '-preset', 'veryfast', + '-tune', 'zerolatency', + '-r', '30', + '-s', '1280x720', + '-f', 'rtsp', + f'rtsp://localhost:{rtsp_port}/stream' + ] + elif sys.platform.startswith('linux'): + cmd = [ + 'ffmpeg', + '-f', 'v4l2', + '-i', f'/dev/video{webcam_index}', + '-c:v', 'libx264', + '-preset', 'veryfast', + '-tune', 'zerolatency', + '-r', '30', + '-s', '1280x720', + '-f', 'rtsp', + f'rtsp://localhost:{rtsp_port}/stream' + ] + else: # macOS + cmd = [ + 'ffmpeg', + '-f', 'avfoundation', + '-i', f'{webcam_index}:', + '-c:v', 'libx264', + '-preset', 'veryfast', + '-tune', 'zerolatency', + '-r', '30', + '-s', '1280x720', + '-f', 'rtsp', + f'rtsp://localhost:{rtsp_port}/stream' + ] + + logger.info(f"Starting RTSP stream on rtsp://localhost:{rtsp_port}/stream") + logger.info(f"FFmpeg command: {' '.join(cmd)}") + + rtsp_process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) + + # Give FFmpeg a moment to start + time.sleep(2) + + # Check if process is still running + if rtsp_process.poll() is None: + logger.info("RTSP streaming started successfully") + return rtsp_process + else: + # Get error output if process failed + stdout, stderr = rtsp_process.communicate(timeout=2) + logger.error("RTSP streaming failed to start") + logger.error(f"FFmpeg stdout: {stdout}") + logger.error(f"FFmpeg stderr: {stderr}") + return None + + except Exception as e: + logger.error(f"Failed to start RTSP stream: {e}") + return None + +def get_local_ip(): + """Get the Wireguard IP address for external access""" + # Use Wireguard IP for external access + return "10.101.1.4" + +def main(): + global webcam_cap, rtsp_process + + # Configuration - Force use index 0 for Logitech HD webcam + webcam_index = 0 # Logitech HD webcam C930c (1920x1080@30fps) + http_port = 8080 + rtsp_port = 8554 + + logger.info("=== Webcam RTSP & HTTP Server ===") + + # Initialize webcam + logger.info("Initializing webcam...") + webcam_cap = cv2.VideoCapture(webcam_index) + + if not webcam_cap.isOpened(): + logger.error(f"Failed to open webcam at index {webcam_index}") + logger.info("Try different webcam indices (0, 1, 2, etc.)") + return + + # Set webcam properties - Use high resolution for Logitech HD webcam + webcam_cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) + webcam_cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) + webcam_cap.set(cv2.CAP_PROP_FPS, 30) + + width = int(webcam_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(webcam_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = webcam_cap.get(cv2.CAP_PROP_FPS) + + logger.info(f"Webcam initialized: {width}x{height} @ {fps}fps") + + # Get local IP for CMS configuration + local_ip = get_local_ip() + + # Start RTSP streaming (optional, requires FFmpeg) + rtsp_process = start_rtsp_stream(webcam_index, rtsp_port) + + # Start HTTP server for snapshots + server_address = ('0.0.0.0', http_port) # Bind to all interfaces + http_server = HTTPServer(server_address, WebcamHTTPHandler) + + logger.info("\n=== Server URLs for CMS Configuration ===") + logger.info(f"HTTP Snapshot URL: http://{local_ip}:{http_port}/snapshot") + + if rtsp_process: + logger.info(f"RTSP Stream URL: rtsp://{local_ip}:{rtsp_port}/stream") + else: + logger.info("RTSP Stream: Not available (FFmpeg not found)") + logger.info("HTTP-only mode: Use Snapshot URL for camera input") + + logger.info(f"Status URL: http://{local_ip}:{http_port}/status") + logger.info("\n=== CMS Configuration Suggestions ===") + logger.info(f"Camera Identifier: webcam-local-01") + logger.info(f"RTSP Stream URL: rtsp://{local_ip}:{rtsp_port}/stream") + logger.info(f"Snapshot URL: http://{local_ip}:{http_port}/snapshot") + logger.info(f"Snapshot Interval: 2000 (ms)") + logger.info("\nPress Ctrl+C to stop all servers") + + try: + # Start HTTP server + http_server.serve_forever() + except KeyboardInterrupt: + logger.info("Shutting down servers...") + finally: + # Clean up + if webcam_cap: + webcam_cap.release() + + if rtsp_process: + logger.info("Stopping RTSP stream...") + rtsp_process.terminate() + try: + rtsp_process.wait(timeout=5) + except subprocess.TimeoutExpired: + rtsp_process.kill() + + http_server.server_close() + logger.info("All servers stopped") + +if __name__ == "__main__": + main() \ No newline at end of file