tracking 70%

This commit is contained in:
Pongsatorn 2025-08-28 11:57:15 +07:00
parent 07eddd3f0d
commit 5bf2d49e6b
3 changed files with 2042 additions and 199 deletions

666
app.py
View file

@ -43,6 +43,16 @@ camera_streams: Dict[str, Dict[str, Any]] = {}
subscription_to_camera: Dict[str, str] = {}
# Store latest frames for REST API access (separate from processing buffer)
latest_frames: Dict[str, Any] = {}
# Store cached detection dict after successful pipeline completion
cached_detections: Dict[str, Dict[str, Any]] = {}
# Track frame skipping for pipeline buffer after detection
frame_skip_flags: Dict[str, bool] = {}
# Track camera connection states for immediate error handling
camera_states: Dict[str, Dict[str, Any]] = {}
# Track session ID states and pipeline modes per camera
session_pipeline_states: Dict[str, Dict[str, Any]] = {}
# Store full pipeline results for caching
cached_full_pipeline_results: Dict[str, Dict[str, Any]] = {}
with open("config.json", "r") as f:
config = json.load(f)
@ -69,11 +79,22 @@ logging.basicConfig(
logger = logging.getLogger("detector_worker")
logger.setLevel(logging.DEBUG) # Set app-specific logger to DEBUG level
# Create WebSocket communication logger
ws_logger = logging.getLogger("websocket_comm")
ws_logger.setLevel(logging.INFO)
ws_handler = logging.FileHandler("websocket_comm.log", encoding='utf-8')
ws_formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
ws_handler.setFormatter(ws_formatter)
ws_logger.addHandler(ws_handler)
ws_logger.propagate = False # Don't propagate to root logger
# Ensure all other libraries (including root) use at least INFO level
logging.getLogger().setLevel(logging.INFO)
logger.info("Starting detector worker application")
logger.info(f"Configuration: Target FPS: {TARGET_FPS}, Max streams: {max_streams}, Max retries: {max_retries}")
ws_logger.info("WebSocket communication logging started - TX/RX format")
logger.info("WebSocket communication will be logged to websocket_comm.log")
# Ensure the models directory exists
os.makedirs("models", exist_ok=True)
@ -228,6 +249,87 @@ def get_crop_coords(stream):
"cropY2": stream.get("cropY2")
}
# Camera state management functions
def set_camera_connected(camera_id, connected=True, error_msg=None):
"""Set camera connection state and track error information"""
current_time = time.time()
if camera_id not in camera_states:
camera_states[camera_id] = {
"connected": True,
"last_error": None,
"last_error_time": None,
"consecutive_failures": 0,
"disconnection_notified": False
}
state = camera_states[camera_id]
was_connected = state["connected"]
if connected:
state["connected"] = True
state["consecutive_failures"] = 0
state["disconnection_notified"] = False
if not was_connected:
logger.info(f"📶 CAMERA RECONNECTED: {camera_id}")
else:
state["connected"] = False
state["last_error"] = error_msg
state["last_error_time"] = current_time
state["consecutive_failures"] += 1
# Distinguish between temporary and permanent disconnection
is_permanent = state["consecutive_failures"] >= 3
if was_connected and is_permanent:
logger.error(f"📵 CAMERA DISCONNECTED: {camera_id} - {error_msg} (consecutive failures: {state['consecutive_failures']})")
logger.info(f"🚨 CAMERA ERROR DETECTED - Will send detection: null to reset backend session for {camera_id}")
def is_camera_connected(camera_id):
"""Check if camera is currently connected"""
return camera_states.get(camera_id, {}).get("connected", True)
def should_notify_disconnection(camera_id):
"""Check if we should notify backend about disconnection"""
state = camera_states.get(camera_id, {})
is_disconnected = not state.get("connected", True)
not_yet_notified = not state.get("disconnection_notified", False)
has_enough_failures = state.get("consecutive_failures", 0) >= 3
return is_disconnected and not_yet_notified and has_enough_failures
def mark_disconnection_notified(camera_id):
"""Mark that we've notified backend about this disconnection"""
if camera_id in camera_states:
camera_states[camera_id]["disconnection_notified"] = True
logger.debug(f"Marked disconnection notification sent for camera {camera_id}")
def get_or_init_session_pipeline_state(camera_id):
"""Get or initialize session pipeline state for a camera"""
if camera_id not in session_pipeline_states:
session_pipeline_states[camera_id] = {
"mode": "validation_detecting", # "validation_detecting", "send_detections", "waiting_for_session_id", "full_pipeline", "lightweight"
"session_id_received": False,
"full_pipeline_completed": False,
"absence_counter": 0,
"max_absence_frames": 3
# Removed validation_counter and validation_threshold - now using only track-based validation
}
return session_pipeline_states[camera_id]
def update_session_pipeline_mode(camera_id, new_mode, session_id=None):
"""Update session pipeline mode and related state"""
state = get_or_init_session_pipeline_state(camera_id)
old_mode = state["mode"]
state["mode"] = new_mode
if session_id:
state["session_id_received"] = True
state["absence_counter"] = 0 # Reset absence counter when session starts
logger.info(f"📊 Camera {camera_id}: Pipeline mode changed from '{old_mode}' to '{new_mode}'")
return state
####################################################
# REST API endpoint for image retrieval
####################################################
@ -279,6 +381,47 @@ async def detect(websocket: WebSocket):
async def handle_detection(camera_id, stream, frame, websocket, model_tree, persistent_data):
try:
# Check camera connection state first - handle disconnection immediately
if should_notify_disconnection(camera_id):
logger.error(f"🚨 CAMERA DISCONNECTION DETECTED: {camera_id} - sending immediate detection: null")
# Clear cached detections and occupancy state
cached_detections.pop(camera_id, None)
frame_skip_flags.pop(camera_id, None)
cached_full_pipeline_results.pop(camera_id, None) # Clear cached pipeline results
session_pipeline_states.pop(camera_id, None) # Reset session pipeline state
# Reset pipeline state immediately
from siwatsystem.pympta import reset_tracking_state
model_id = stream.get("modelId", "unknown")
reset_tracking_state(camera_id, model_id, "camera disconnected")
# Send immediate detection: null to backend
detection_data = {
"type": "imageDetection",
"subscriptionIdentifier": stream["subscriptionIdentifier"],
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"data": {
"detection": None, # null detection for disconnection
"modelId": stream["modelId"],
"modelName": stream["modelName"]
}
}
try:
ws_logger.info(f"TX -> {json.dumps(detection_data, separators=(',', ':'))}")
await websocket.send_json(detection_data)
except RuntimeError as e:
if "websocket.close" in str(e):
logger.warning(f"WebSocket connection closed - cannot send disconnection signal for camera {camera_id}")
return persistent_data
else:
raise
mark_disconnection_notified(camera_id)
logger.info(f"📡 SENT DISCONNECTION SIGNAL - detection: null for camera {camera_id}, backend should clear session")
return persistent_data
# Apply crop if specified
cropped_frame = frame
if all(coord is not None for coord in [stream.get("cropX1"), stream.get("cropY1"), stream.get("cropX2"), stream.get("cropY2")]):
@ -296,87 +439,318 @@ async def detect(websocket: WebSocket):
# Get backend session ID if available
backend_session_id = session_ids.get(display_identifier)
# Create context for pipeline execution with backend sessionId
# Get or initialize session pipeline state
pipeline_state = get_or_init_session_pipeline_state(camera_id)
current_mode = pipeline_state["mode"]
logger.debug(f"🔍 SESSIONID LOOKUP: display='{display_identifier}', session_id={repr(backend_session_id)}, mode='{current_mode}'")
logger.debug(f"🔍 Available session_ids: {session_ids}")
# ═══ SESSION ID-BASED PROCESSING MODE ═══
if not backend_session_id:
# No session ID - keep current mode if it's validation_detecting or send_detections
if current_mode not in ["validation_detecting", "send_detections", "waiting_for_session_id"]:
update_session_pipeline_mode(camera_id, "validation_detecting")
current_mode = "validation_detecting"
logger.debug(f"🔍 Camera {camera_id}: No session ID - in {current_mode} mode")
else:
# Session ID available - switch to full pipeline mode
if current_mode in ["send_detections", "waiting_for_session_id"]:
# Session ID just arrived - switch to full pipeline mode
update_session_pipeline_mode(camera_id, "full_pipeline", backend_session_id)
current_mode = "full_pipeline"
logger.info(f"🔥 Camera {camera_id}: Session ID received ({backend_session_id}) - switching to FULL PIPELINE mode")
# Create context for pipeline execution
pipeline_context = {
"camera_id": camera_id,
"display_id": display_identifier,
"backend_session_id": backend_session_id
}
if backend_session_id:
logger.info(f"🔥 USING BACKEND SESSION_ID: {backend_session_id} for camera {camera_id} (display: {display_identifier})")
logger.debug(f"Pipeline context includes backend session_id: {backend_session_id}")
else:
logger.debug(f"❌ No backend session_id available for display: {display_identifier} (session_ids: {session_ids})")
start_time = time.time()
detection_result = None
if current_mode == "validation_detecting":
# ═══ TRACK VALIDATION MODE ═══
# Run tracking-based validation with track ID stability
logger.debug(f"🔍 Camera {camera_id}: In validation_detecting mode - running track-based validation")
# Get tracking configuration from model_tree
tracking_config = model_tree.get("tracking", {})
tracking_enabled = tracking_config.get("enabled", True)
stability_threshold = tracking_config.get("stabilityThreshold", 4)
# Default to "none" - only proceed after track validation
detection_result = {"class": "none", "confidence": 1.0, "bbox": [0, 0, 0, 0]}
if tracking_enabled:
# Run full tracking detection to get track IDs
from siwatsystem.pympta import run_detection_with_tracking
all_detections, regions_dict, track_validation_result = run_detection_with_tracking(cropped_frame, model_tree, pipeline_context)
if track_validation_result.get("validation_complete", False):
# Track validation completed - we have stable track IDs
stable_tracks = track_validation_result.get("stable_tracks", [])
logger.info(f"🎯 Camera {camera_id}: TRACK VALIDATION COMPLETED - stable tracks: {stable_tracks}")
# Switch to send_detections mode
update_session_pipeline_mode(camera_id, "send_detections")
# Send the best detection with stable track
if all_detections:
# Find detection with stable track ID
stable_detection = None
for detection in all_detections:
if detection.get("id") in stable_tracks:
stable_detection = detection
break
if stable_detection:
detection_result = {
"class": stable_detection.get("class", "car"),
"confidence": stable_detection.get("confidence", 0.0),
"bbox": stable_detection.get("bbox", [0, 0, 0, 0]),
"track_id": stable_detection.get("id")
}
logger.info(f"🚗 Camera {camera_id}: SENDING STABLE DETECTION - track ID {detection_result['track_id']}")
else:
logger.warning(f"⚠️ Camera {camera_id}: Stable tracks found but no matching detection")
else:
# Track validation still in progress
stable_tracks = track_validation_result.get("stable_tracks", [])
current_tracks = track_validation_result.get("current_tracks", [])
if current_tracks:
track_id = current_tracks[0] if current_tracks else "None"
stable_status = "STABLE" if stable_tracks else "validating"
logger.info(f"🔍 Camera {camera_id}: TRACK VALIDATION - car track_id {track_id} ({stable_status}, need {stability_threshold} consecutive frames)")
else:
logger.debug(f"👻 Camera {camera_id}: No car detected")
logger.debug(f"📤 Camera {camera_id}: Sending 'none' (track validation in progress)")
else:
# Tracking disabled - fall back to basic detection validation
logger.debug(f"🔍 Camera {camera_id}: Tracking disabled - using basic detection validation")
from siwatsystem.pympta import run_lightweight_detection
basic_detection = run_lightweight_detection(cropped_frame, model_tree)
if basic_detection and basic_detection.get("car_detected"):
best_detection = basic_detection.get("best_detection")
# Increment validation counter for basic detection
pipeline_state["validation_counter"] += 1
current_count = pipeline_state["validation_counter"]
threshold = pipeline_state["validation_threshold"]
if current_count >= threshold:
update_session_pipeline_mode(camera_id, "send_detections")
detection_result = {
"class": best_detection.get("class", "car"),
"confidence": best_detection.get("confidence", 0.0),
"bbox": best_detection.get("bbox", [0, 0, 0, 0])
}
logger.info(f"🎯 Camera {camera_id}: BASIC VALIDATION COMPLETED after {current_count} frames")
else:
logger.info(f"📊 Camera {camera_id}: Basic validation progress {current_count}/{threshold}")
else:
# Reset validation counter
if pipeline_state["validation_counter"] > 0:
pipeline_state["validation_counter"] = 0
logger.info(f"🔄 Camera {camera_id}: Reset validation counter (no detection)")
elif current_mode == "send_detections":
# ═══ SEND DETECTIONS MODE ═══
# Validation completed - now send detection_dict for car detections, detection: null for no car
logger.debug(f"📤 Camera {camera_id}: In send_detections mode - sending detection_dict for cars")
from siwatsystem.pympta import run_lightweight_detection
basic_detection = run_lightweight_detection(cropped_frame, model_tree)
if basic_detection and basic_detection.get("car_detected"):
# Car detected - send detection_dict
best_detection = basic_detection.get("best_detection")
detection_result = {
"class": best_detection.get("class", "car"),
"confidence": best_detection.get("confidence", 0.0),
"bbox": best_detection.get("bbox", [0, 0, 0, 0])
}
logger.info(f"🚗 Camera {camera_id}: SENDING DETECTION_DICT - {detection_result['class']} (conf={detection_result['confidence']:.3f}) - backend should generate session ID")
else:
# No car detected - send "none"
detection_result = {"class": "none", "confidence": 1.0, "bbox": [0, 0, 0, 0]}
logger.debug(f"👻 Camera {camera_id}: No car detected - sending 'none'")
elif current_mode == "waiting_for_session_id":
# ═══ WAITING FOR SESSION ID MODE ═══
# Stop processing snapshots, wait for session ID
logger.debug(f"⏳ Camera {camera_id}: In waiting_for_session_id mode - not processing snapshots")
return persistent_data # Don't process or send anything
elif current_mode == "full_pipeline":
# ═══ FULL PIPELINE MODE ═══
logger.info(f"🔥 Camera {camera_id}: Running FULL PIPELINE (detection + branches + Redis + PostgreSQL)")
detection_result = run_pipeline(cropped_frame, model_tree, context=pipeline_context)
if detection_result and isinstance(detection_result, dict):
# Cache the full pipeline result
cached_full_pipeline_results[camera_id] = {
"result": detection_result.copy(),
"timestamp": time.time()
}
# Switch to lightweight mode
update_session_pipeline_mode(camera_id, "lightweight")
logger.info(f"✅ Camera {camera_id}: Full pipeline completed - switching to LIGHTWEIGHT mode")
elif current_mode == "lightweight":
# ═══ LIGHTWEIGHT MODE ═══
# Use tracking to check for stable car presence
from siwatsystem.pympta import run_detection_with_tracking
all_detections, regions_dict, track_validation_result = run_detection_with_tracking(cropped_frame, model_tree, pipeline_context)
stable_tracks = track_validation_result.get("stable_tracks", [])
current_tracks = track_validation_result.get("current_tracks", [])
stable_tracks_present = bool(set(stable_tracks) & set(current_tracks))
if stable_tracks_present:
# Stable tracked car still present - use cached result
pipeline_state["absence_counter"] = 0
if camera_id in cached_full_pipeline_results:
detection_result = cached_full_pipeline_results[camera_id]["result"]
logger.debug(f"🔄 Camera {camera_id}: Stable tracked car still present - using cached detection")
else:
logger.warning(f"⚠️ Camera {camera_id}: Stable tracked car detected but no cached result available")
detection_result = None
else:
# No stable tracked cars - increment absence counter
pipeline_state["absence_counter"] += 1
absence_count = pipeline_state["absence_counter"]
max_absence = pipeline_state["max_absence_frames"]
logger.debug(f"👻 Camera {camera_id}: No stable tracked cars - absence {absence_count}/{max_absence}")
if absence_count >= max_absence:
# Send "none" detection and reset to validation mode
detection_result = {
"class": "none",
"confidence": 1.0,
"bbox": [0, 0, 0, 0],
"branch_results": {}
}
cached_full_pipeline_results.pop(camera_id, None) # Clear cache
update_session_pipeline_mode(camera_id, "validation_detecting")
logger.info(f"📤 Camera {camera_id}: Stable tracked cars absent for {absence_count} frames - sending 'none' and resetting to track validation")
else:
# Still within absence tolerance - use cached result
if camera_id in cached_full_pipeline_results:
detection_result = cached_full_pipeline_results[camera_id]["result"]
logger.debug(f"⏳ Camera {camera_id}: Stable tracked cars absent {absence_count}/{max_absence} - still using cached detection")
else:
detection_result = None
detection_result = run_pipeline(cropped_frame, model_tree, context=pipeline_context)
process_time = (time.time() - start_time) * 1000
logger.debug(f"Detection for camera {camera_id} completed in {process_time:.2f}ms")
logger.debug(f"Detection for camera {camera_id} completed in {process_time:.2f}ms (mode: {current_mode})")
# Skip processing if no detection result (blocked by session gating)
if detection_result is None:
logger.debug(f"No detection result to process for camera {camera_id}")
return persistent_data
# Log the raw detection result for debugging
logger.debug(f"Raw detection result for camera {camera_id}:\n{json.dumps(detection_result, indent=2, default=str)}")
# Extract session_id from pipeline result (uses backend sessionId)
session_id = None
if detection_result and isinstance(detection_result, dict):
# Check if pipeline used backend session_id for operations
if "session_id" in detection_result:
session_id = detection_result["session_id"]
logger.debug(f"Pipeline used session_id: {session_id}")
elif backend_session_id:
# Use backend session_id even if pipeline didn't return it
session_id = backend_session_id
logger.debug(f"Using backend session_id for WebSocket response: {session_id}")
# Extract session_id from pipeline result (always use backend sessionId)
session_id = backend_session_id
logger.debug(f"Using backend session_id: {session_id}")
# Process detection result - run_pipeline returns the primary detection directly
if detection_result and isinstance(detection_result, dict) and "class" in detection_result:
highest_confidence_detection = detection_result
# Process detection result based on current mode
if current_mode == "validation_detecting":
# ═══ VALIDATION DETECTING MODE ═══
# Always send detection: null during validation phase
detection_dict = None
logger.debug(f"🔍 SENDING 'NONE' - validation_detecting mode for camera {camera_id}")
elif current_mode == "send_detections":
# ═══ SEND DETECTIONS MODE ═══
if detection_result.get("class") == "none":
# No car detected - send detection: null
detection_dict = None
logger.debug(f"📤 SENDING 'NONE' - send_detections mode (no car) for camera {camera_id}")
else:
# Car detected - check if we have sessionId to determine what to send
if backend_session_id:
# Have sessionId - send full detection_dict for database updates
detection_dict = {
"carModel": None,
"carBrand": None,
"carYear": None,
"bodyType": None,
"licensePlateText": None,
"licensePlateConfidence": None
}
logger.info(f"📤 SENDING FULL DETECTION_DICT - send_detections mode with sessionId {backend_session_id} (conf={detection_result.get('confidence', 0):.3f}) for camera {camera_id}")
else:
# No sessionId - send empty detection_dict to trigger backend to generate sessionId
detection_dict = {}
logger.info(f"📤 SENDING EMPTY DETECTION_DICT - send_detections mode without sessionId, triggering backend to generate sessionId (conf={detection_result.get('confidence', 0):.3f}) for camera {camera_id}")
elif detection_result.get("class") == "none":
# "None" detection in other modes (lightweight) - car left or absent for 3 frames
detection_dict = None
logger.info(f"📤 SENDING 'NONE' (detection: null) - Car absent, expecting backend to clear session for camera {camera_id}")
else:
# No detection found
highest_confidence_detection = {
"class": "none",
"confidence": 1.0,
"bbox": [0, 0, 0, 0],
"branch_results": {}
# Valid detection - convert to backend format
detection_dict = {
"carModel": None,
"carBrand": None,
"carYear": None,
"bodyType": None,
"licensePlateText": None,
"licensePlateConfidence": None
}
# Convert detection format to match backend expectations exactly as in worker.md section 4.2
detection_dict = {
"carModel": None,
"carBrand": None,
"carYear": None,
"bodyType": None,
"licensePlateText": None,
"licensePlateConfidence": None
}
# Extract and process branch results from parallel classification
branch_results = highest_confidence_detection.get("branch_results", {})
if branch_results:
logger.debug(f"Processing branch results: {branch_results}")
# Transform branch results into backend-expected detection attributes
for branch_id, branch_data in branch_results.items():
if isinstance(branch_data, dict):
logger.debug(f"Processing branch {branch_id}: {branch_data}")
# Map common classification fields to backend-expected names
if "brand" in branch_data:
detection_dict["carBrand"] = branch_data["brand"]
if "body_type" in branch_data:
detection_dict["bodyType"] = branch_data["body_type"]
if "class" in branch_data:
class_name = branch_data["class"]
# Extract and process branch results from parallel classification (only for valid detections)
if detection_result.get("class") != "none" and "branch_results" in detection_result:
def process_branch_results(branch_results, depth=0):
"""Recursively process branch results including nested branches."""
if not isinstance(branch_results, dict):
return
indent = " " * depth
for branch_id, branch_data in branch_results.items():
if isinstance(branch_data, dict):
logger.debug(f"{indent}Processing branch {branch_id}: {branch_data}")
# Map based on branch/model type
if "brand" in branch_id.lower():
detection_dict["carBrand"] = class_name
elif "bodytype" in branch_id.lower() or "body" in branch_id.lower():
detection_dict["bodyType"] = class_name
logger.info(f"Detection payload after branch processing: {detection_dict}")
else:
logger.debug("No branch results found in detection result")
# Map common classification fields to backend-expected names
if "brand" in branch_data:
detection_dict["carBrand"] = branch_data["brand"]
logger.debug(f"{indent}Mapped carBrand: {branch_data['brand']}")
if "body_type" in branch_data:
detection_dict["bodyType"] = branch_data["body_type"]
logger.debug(f"{indent}Mapped bodyType: {branch_data['body_type']}")
if "class" in branch_data:
class_name = branch_data["class"]
# Map based on branch/model type
if "brand" in branch_id.lower():
detection_dict["carBrand"] = class_name
logger.debug(f"{indent}Mapped carBrand from class: {class_name}")
elif "bodytype" in branch_id.lower() or "body" in branch_id.lower():
detection_dict["bodyType"] = class_name
logger.debug(f"{indent}Mapped bodyType from class: {class_name}")
# Process nested branch results recursively
if "branch_results" in branch_data:
logger.debug(f"{indent}Processing nested branches in {branch_id}")
process_branch_results(branch_data["branch_results"], depth + 1)
branch_results = detection_result.get("branch_results", {})
if branch_results:
logger.debug(f"Processing branch results: {branch_results}")
process_branch_results(branch_results)
logger.info(f"Detection payload after branch processing: {detection_dict}")
else:
logger.debug("No branch results found in detection result")
detection_data = {
"type": "imageDetection",
@ -390,25 +764,38 @@ async def detect(websocket: WebSocket):
}
}
# Add session ID if available (from backend or pipeline operations)
if session_id is not None:
# Add session ID to detection data (NOT for "none" detections - backend uses absence of sessionId to know to clear the session)
if session_id and detection_result.get("class") != "none":
detection_data["sessionId"] = session_id
logger.debug(f"Including sessionId {session_id} in WebSocket message")
elif detection_result.get("class") == "none":
logger.debug(f"NOT including sessionId in 'none' detection - backend should clear session")
# Log detection details
if detection_result.get("class") != "none":
confidence = detection_result.get("confidence", 0.0)
logger.info(f"Camera {camera_id}: Detected {detection_result['class']} with confidence {confidence:.2f} using model {stream['modelName']}")
# Send detection data to backend (session gating handled above in processing logic)
logger.debug(f"📤 SENDING TO BACKEND for camera {camera_id}: {json.dumps(detection_data, indent=2)}")
try:
ws_logger.info(f"TX -> {json.dumps(detection_data, separators=(',', ':'))}")
await websocket.send_json(detection_data)
logger.debug(f"Sent detection data to client for camera {camera_id}")
except RuntimeError as e:
if "websocket.close" in str(e):
logger.warning(f"WebSocket connection closed - cannot send detection data for camera {camera_id}")
return persistent_data
else:
raise
# Log status after sending
if session_id and detection_result.get("class") != "none":
logger.info(f"📤 WEBSOCKET RESPONSE with sessionId: {session_id} for camera {camera_id}")
logger.debug(f"Added session_id to WebSocket response: {session_id}")
elif detection_result.get("class") == "none":
logger.info(f"📡 SENT 'none' detection - backend should clear session for camera {camera_id}")
else:
logger.debug(f"📤 WebSocket response WITHOUT sessionId for camera {camera_id}")
if highest_confidence_detection.get("class") != "none":
confidence = highest_confidence_detection.get("confidence", 0.0)
logger.info(f"Camera {camera_id}: Detected {highest_confidence_detection['class']} with confidence {confidence:.2f} using model {stream['modelName']}")
# Log session ID if available
if session_id:
logger.debug(f"Detection associated with session ID: {session_id}")
await websocket.send_json(detection_data)
logger.debug(f"Sent detection data to client for camera {camera_id}")
logger.debug(f"Sent this detection data: {detection_data}")
logger.info(f"📡 Detection data sent for camera {camera_id}")
return persistent_data
except Exception as e:
logger.error(f"Error in handle_detection for camera {camera_id}: {str(e)}", exc_info=True)
@ -427,8 +814,10 @@ async def detect(websocket: WebSocket):
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
logger.info(f"Camera {camera_id} opened successfully with resolution {width}x{height}, FPS: {fps}")
set_camera_connected(camera_id, True)
else:
logger.error(f"Camera {camera_id} failed to open initially")
set_camera_connected(camera_id, False, "Failed to open camera initially")
while not stop_event.is_set():
try:
@ -443,20 +832,25 @@ async def detect(websocket: WebSocket):
ret, frame = cap.read()
if not ret:
logger.warning(f"Connection lost for camera: {camera_id}, retry {retries+1}/{max_retries}")
error_msg = f"Connection lost for camera: {camera_id}, retry {retries+1}/{max_retries}"
logger.warning(error_msg)
set_camera_connected(camera_id, False, error_msg)
cap.release()
time.sleep(reconnect_interval)
retries += 1
if retries > max_retries and max_retries != -1:
logger.error(f"Max retries reached for camera: {camera_id}, stopping frame reader")
set_camera_connected(camera_id, False, "Max retries reached")
break
# Re-open
logger.info(f"Attempting to reopen RTSP stream for camera: {camera_id}")
cap = cv2.VideoCapture(streams[camera_id]["rtsp_url"])
if not cap.isOpened():
logger.error(f"Failed to reopen RTSP stream for camera: {camera_id}")
set_camera_connected(camera_id, False, "Failed to reopen RTSP stream")
continue
logger.info(f"Successfully reopened RTSP stream for camera: {camera_id}")
set_camera_connected(camera_id, True)
continue
# Successfully read a frame
@ -470,6 +864,7 @@ async def detect(websocket: WebSocket):
logger.debug(f"Successfully read frame from camera {camera_id}, shape: {frame.shape}")
retries = 0
set_camera_connected(camera_id, True) # Mark as connected on successful frame read
# Overwrite old frame if buffer is full
if not buffer.empty():
@ -485,21 +880,28 @@ async def detect(websocket: WebSocket):
time.sleep(0.01)
except cv2.error as e:
logger.error(f"OpenCV error for camera {camera_id}: {e}", exc_info=True)
error_msg = f"OpenCV error for camera {camera_id}: {e}"
logger.error(error_msg, exc_info=True)
set_camera_connected(camera_id, False, error_msg)
cap.release()
time.sleep(reconnect_interval)
retries += 1
if retries > max_retries and max_retries != -1:
logger.error(f"Max retries reached after OpenCV error for camera {camera_id}")
set_camera_connected(camera_id, False, "Max retries reached after OpenCV error")
break
logger.info(f"Attempting to reopen RTSP stream after OpenCV error for camera: {camera_id}")
cap = cv2.VideoCapture(streams[camera_id]["rtsp_url"])
if not cap.isOpened():
logger.error(f"Failed to reopen RTSP stream for camera {camera_id} after OpenCV error")
set_camera_connected(camera_id, False, "Failed to reopen after OpenCV error")
continue
logger.info(f"Successfully reopened RTSP stream after OpenCV error for camera: {camera_id}")
set_camera_connected(camera_id, True)
except Exception as e:
logger.error(f"Unexpected error for camera {camera_id}: {str(e)}", exc_info=True)
error_msg = f"Unexpected error for camera {camera_id}: {str(e)}"
logger.error(error_msg, exc_info=True)
set_camera_connected(camera_id, False, error_msg)
cap.release()
break
except Exception as e:
@ -517,6 +919,9 @@ async def detect(websocket: WebSocket):
frame_count = 0
last_log_time = time.time()
# Initialize camera state
set_camera_connected(camera_id, True)
try:
interval_seconds = snapshot_interval / 1000.0 # Convert milliseconds to seconds
logger.info(f"Snapshot interval for camera {camera_id}: {interval_seconds}s")
@ -528,7 +933,9 @@ async def detect(websocket: WebSocket):
if frame is None:
consecutive_failures += 1
logger.warning(f"Failed to fetch snapshot for camera: {camera_id}, consecutive failures: {consecutive_failures}")
error_msg = f"Failed to fetch snapshot for camera: {camera_id}, consecutive failures: {consecutive_failures}"
logger.warning(error_msg)
set_camera_connected(camera_id, False, error_msg)
retries += 1
# Check network connectivity with a simple ping-like test
@ -541,6 +948,7 @@ async def detect(websocket: WebSocket):
if retries > max_retries and max_retries != -1:
logger.error(f"Max retries reached for snapshot camera: {camera_id}, stopping reader")
set_camera_connected(camera_id, False, "Max retries reached for snapshot camera")
break
# Exponential backoff based on consecutive failures
@ -562,6 +970,7 @@ async def detect(websocket: WebSocket):
logger.debug(f"Successfully fetched snapshot from camera {camera_id}, shape: {frame.shape}")
retries = 0
set_camera_connected(camera_id, True) # Mark as connected on successful snapshot
# Overwrite old frame if buffer is full
if not buffer.empty():
@ -581,10 +990,13 @@ async def detect(websocket: WebSocket):
except Exception as e:
consecutive_failures += 1
logger.error(f"Unexpected error fetching snapshot for camera {camera_id}: {str(e)}", exc_info=True)
error_msg = f"Unexpected error fetching snapshot for camera {camera_id}: {str(e)}"
logger.error(error_msg, exc_info=True)
set_camera_connected(camera_id, False, error_msg)
retries += 1
if retries > max_retries and max_retries != -1:
logger.error(f"Max retries reached after error for snapshot camera {camera_id}")
set_camera_connected(camera_id, False, "Max retries reached after error")
break
# Exponential backoff for exceptions too
@ -769,6 +1181,23 @@ async def detect(websocket: WebSocket):
streams[camera_id] = stream_info
subscription_to_camera[camera_id] = camera_url
logger.info(f"Subscribed to camera {camera_id}")
# Send initial "none" detection to backend on camera connect
initial_detection_data = {
"type": "imageDetection",
"subscriptionIdentifier": subscriptionIdentifier,
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"data": {
"detection": None,
"modelId": modelId,
"modelName": modelName
}
}
ws_logger.info(f"TX -> {json.dumps(initial_detection_data, separators=(',', ':'))}")
await websocket.send_json(initial_detection_data)
logger.info(f"📡 Sent initial 'none' detection to backend for camera {camera_id}")
logger.debug(f"Initial detection data: {initial_detection_data}")
async def unsubscribe_internal(subscription_id):
"""Internal unsubscription logic"""
@ -788,6 +1217,11 @@ async def detect(websocket: WebSocket):
del camera_streams[camera_url]
latest_frames.pop(subscription_id, None)
cached_detections.pop(subscription_id, None) # Clear cached detection
frame_skip_flags.pop(subscription_id, None) # Clear frame skip flag
camera_states.pop(subscription_id, None) # Clear camera state
cached_full_pipeline_results.pop(subscription_id, None) # Clear cached pipeline results
session_pipeline_states.pop(subscription_id, None) # Clear session pipeline state
cleanup_camera_stability(subscription_id)
logger.info(f"Unsubscribed from camera {subscription_id}")
@ -886,6 +1320,7 @@ async def detect(websocket: WebSocket):
while True:
try:
msg = await websocket.receive_text()
ws_logger.info(f"RX <- {msg}")
logger.debug(f"Received message: {msg}")
data = json.loads(msg)
msg_type = data.get("type")
@ -949,6 +1384,7 @@ async def detect(websocket: WebSocket):
"subscriptionIdentifier": subscriptionIdentifier,
"error": f"Failed to download model from {model_url}"
}
ws_logger.info(f"TX -> {json.dumps(error_response, separators=(',', ':'))}")
await websocket.send_json(error_response)
continue
model_tree = load_pipeline_from_zip(local_path, extraction_dir)
@ -963,6 +1399,7 @@ async def detect(websocket: WebSocket):
"subscriptionIdentifier": subscriptionIdentifier,
"error": f"Model file not found: {model_url}"
}
ws_logger.info(f"TX -> {json.dumps(error_response, separators=(',', ':'))}")
await websocket.send_json(error_response)
continue
model_tree = load_pipeline_from_zip(model_url, extraction_dir)
@ -1105,6 +1542,9 @@ async def detect(websocket: WebSocket):
# Clean up cached frame and stability tracking
latest_frames.pop(camera_id, None)
cached_detections.pop(camera_id, None) # Clear cached detection
frame_skip_flags.pop(camera_id, None) # Clear frame skip flag
camera_states.pop(camera_id, None) # Clear camera state
cleanup_camera_stability(camera_id)
logger.info(f"Unsubscribed from camera {camera_id}")
# Note: Keep models in memory for potential reuse
@ -1148,33 +1588,67 @@ async def detect(websocket: WebSocket):
display_identifier = payload.get("displayIdentifier")
session_id = payload.get("sessionId")
logger.info(f"🆔 BACKEND SESSIONID RECEIVED: displayId={display_identifier}, sessionId={session_id}")
# Debug sessionId value types and contents
session_id_type = type(session_id).__name__
if session_id is None:
logger.info(f"🆔 BACKEND SESSIONID RECEIVED: displayId={display_identifier}, sessionId=None (type: {session_id_type})")
logger.info(f"🔄 BACKEND WANTS TO CLEAR SESSION for display {display_identifier}")
elif session_id == "null":
logger.info(f"🆔 BACKEND SESSIONID RECEIVED: displayId={display_identifier}, sessionId='null' (type: {session_id_type})")
logger.info(f"🔄 BACKEND SENT STRING 'null' for display {display_identifier}")
elif session_id == "":
logger.info(f"🆔 BACKEND SESSIONID RECEIVED: displayId={display_identifier}, sessionId='' (empty string, type: {session_id_type})")
logger.info(f"🔄 BACKEND SENT EMPTY STRING for display {display_identifier}")
else:
logger.info(f"🆔 BACKEND SESSIONID RECEIVED: displayId={display_identifier}, sessionId='{session_id}' (type: {session_id_type}, length: {len(str(session_id))})")
logger.info(f"🔄 BACKEND CREATED/UPDATED SESSION: {session_id} for display {display_identifier}")
logger.debug(f"Full setSessionId payload: {payload}")
logger.debug(f"WebSocket message raw data: {json.dumps(data, indent=2)}")
logger.debug(f"Current active cameras: {list(streams.keys())}")
if display_identifier:
# Store session ID for this display
if session_id is None:
if session_id is None or session_id == "null" or session_id == "":
old_session_id = session_ids.get(display_identifier)
session_ids.pop(display_identifier, None)
logger.info(f"🚫 BACKEND ENDED SESSION: Cleared session ID for display {display_identifier} - resetting tracking")
if session_id is None:
logger.info(f"🚫 BACKEND ENDED SESSION: Cleared session ID for display {display_identifier} (was: {old_session_id}) - received None")
elif session_id == "null":
logger.info(f"🚫 BACKEND ENDED SESSION: Cleared session ID for display {display_identifier} (was: {old_session_id}) - received string 'null'")
elif session_id == "":
logger.info(f"🚫 BACKEND ENDED SESSION: Cleared session ID for display {display_identifier} (was: {old_session_id}) - received empty string")
logger.debug(f"Session IDs after clearing: {session_ids}")
# Reset tracking state for all cameras associated with this display
with streams_lock:
affected_cameras = []
for camera_id, stream in streams.items():
if stream["subscriptionIdentifier"].startswith(display_identifier + ";"):
affected_cameras.append(camera_id)
# Import here to avoid circular import
from siwatsystem.pympta import reset_tracking_state
model_id = stream.get("modelId", "unknown")
reset_tracking_state(camera_id, model_id, "backend session ended")
logger.info(f"Reset tracking for camera {camera_id} (display: {display_identifier})")
logger.debug(f"Reset tracking for {len(affected_cameras)} cameras: {affected_cameras}")
else:
old_session_id = session_ids.get(display_identifier)
session_ids[display_identifier] = session_id
logger.info(f"✅ BACKEND SESSION STARTED: Set session ID {session_id} for display {display_identifier}")
logger.debug(f"Current session_ids dict: {session_ids}")
logger.info(f"✅ BACKEND SESSION STARTED: Set session ID {session_id} for display {display_identifier} (previous: {old_session_id})")
logger.debug(f"Session IDs after update: {session_ids}")
logger.debug(f"🎯 CMS Backend created sessionId {session_id} after receiving detection data")
# Clear waiting state for cameras associated with this display
with streams_lock:
affected_cameras = []
for camera_id, stream in streams.items():
if stream["subscriptionIdentifier"].startswith(display_identifier + ";"):
affected_cameras.append(camera_id)
from siwatsystem.pympta import get_camera_stability_data
model_id = stream.get("modelId", "unknown")
stability_data = get_camera_stability_data(camera_id, model_id)
@ -1182,7 +1656,11 @@ async def detect(websocket: WebSocket):
if session_state.get("waiting_for_backend_session", False):
session_state["waiting_for_backend_session"] = False
session_state["wait_start_time"] = 0.0
logger.info(f"🚀 PIPELINE UNBLOCKED: Backend sessionId {session_id} received - camera {camera_id} can proceed with pipeline")
logger.info(f"🚀 PIPELINE UNBLOCKED: Backend sessionId {session_id} received - camera {camera_id} can proceed with database operations")
logger.debug(f"📋 Camera {camera_id}: SessionId {session_id} now available for future database operations")
logger.debug(f"Updated session state for {len(affected_cameras)} cameras: {affected_cameras}")
else:
logger.warning(f"🚨 Invalid setSessionId message: missing displayIdentifier in payload")
elif msg_type == "patchSession":
session_id = data.get("sessionId")
@ -1197,6 +1675,7 @@ async def detect(websocket: WebSocket):
"message": "Session patch acknowledged"
}
}
ws_logger.info(f"TX -> {json.dumps(response, separators=(',', ':'))}")
await websocket.send_json(response)
logger.info(f"Acknowledged patch for session {session_id}")
@ -1241,5 +1720,10 @@ async def detect(websocket: WebSocket):
with models_lock:
models.clear()
latest_frames.clear()
cached_detections.clear()
frame_skip_flags.clear()
camera_states.clear()
cached_full_pipeline_results.clear()
session_pipeline_states.clear()
session_ids.clear()
logger.info("WebSocket connection closed")