Successful

This commit is contained in:
Pongsatorn 2025-08-29 02:13:22 +07:00
parent 39394caa8e
commit 5875b76d74
3 changed files with 846 additions and 0 deletions

438
app.py
View file

@ -17,6 +17,7 @@ import ssl
import urllib3
import subprocess
import tempfile
import redis
from urllib.parse import urlparse
from requests.adapters import HTTPAdapter
from urllib3.util.ssl_ import create_urllib3_context
@ -45,6 +46,10 @@ subscription_to_camera: Dict[str, str] = {}
latest_frames: Dict[str, Any] = {}
# Store cached detection dict after successful pipeline completion
cached_detections: Dict[str, Dict[str, Any]] = {}
# Enhanced caching system for LPR integration
session_detections: Dict[str, Dict[str, Any]] = {} # session_id -> detection data
session_to_camera: Dict[str, str] = {} # session_id -> camera_id
detection_timestamps: Dict[str, float] = {} # session_id -> timestamp (for cleanup)
# Track frame skipping for pipeline buffer after detection
frame_skip_flags: Dict[str, bool] = {}
# Track camera connection states for immediate error handling
@ -102,6 +107,335 @@ logger.info("Ensured models directory exists")
# Constants for heartbeat and timeouts
HEARTBEAT_INTERVAL = 2 # seconds
# Global Redis connection for LPR integration
redis_client_global = None
lpr_listener_thread = None
cleanup_timer_thread = None
lpr_integration_started = False
# Redis connection helper functions
def get_redis_config_from_model(camera_id: str) -> Dict[str, Any]:
"""Extract Redis configuration from loaded model pipeline"""
try:
for model_id, model_tree in models.get(camera_id, {}).items():
if hasattr(model_tree, 'get') and 'redis_client' in model_tree:
# Extract config from existing Redis client
client = model_tree['redis_client']
if client:
return {
'host': client.connection_pool.connection_kwargs['host'],
'port': client.connection_pool.connection_kwargs['port'],
'password': client.connection_pool.connection_kwargs.get('password'),
'db': client.connection_pool.connection_kwargs.get('db', 0)
}
except Exception as e:
logger.debug(f"Could not extract Redis config from model: {e}")
# Fallback - try to read from pipeline.json directly
try:
pipeline_dirs = []
models_dir = "models"
if os.path.exists(models_dir):
for root, dirs, files in os.walk(models_dir):
if "pipeline.json" in files:
with open(os.path.join(root, "pipeline.json"), 'r') as f:
config = json.load(f)
if 'redis' in config:
return config['redis']
except Exception as e:
logger.debug(f"Could not read Redis config from pipeline.json: {e}")
return None
def create_redis_connection() -> redis.Redis:
"""Create Redis connection using config from pipeline"""
global redis_client_global
if redis_client_global is not None:
try:
redis_client_global.ping()
return redis_client_global
except:
redis_client_global = None
# Find any camera with a loaded model to get Redis config
redis_config = None
for camera_id in models.keys():
redis_config = get_redis_config_from_model(camera_id)
if redis_config:
break
if not redis_config:
logger.error("No Redis configuration found in any loaded models")
return None
try:
redis_client_global = redis.Redis(
host=redis_config['host'],
port=redis_config['port'],
password=redis_config.get('password'),
db=redis_config.get('db', 0),
decode_responses=True,
socket_connect_timeout=5,
socket_timeout=5
)
redis_client_global.ping()
logger.info(f"✅ Connected to Redis for LPR at {redis_config['host']}:{redis_config['port']}")
return redis_client_global
except Exception as e:
logger.error(f"❌ Failed to connect to Redis for LPR: {e}")
redis_client_global = None
return None
# LPR Integration Functions
def process_license_result(lpr_data: Dict[str, Any]):
"""Process incoming LPR result and update backend"""
try:
# Enhanced debugging for LPR data reception
logger.info("=" * 60)
logger.info("🚗 LPR SERVICE DATA RECEIVED")
logger.info("=" * 60)
logger.info(f"📥 Raw LPR data: {json.dumps(lpr_data, indent=2)}")
session_id = str(lpr_data.get('session_id', ''))
license_text = lpr_data.get('license_character', '')
logger.info(f"🔍 Extracted session_id: '{session_id}'")
logger.info(f"🔍 Extracted license_character: '{license_text}'")
logger.info(f"📊 Current cached sessions count: {len(session_detections)}")
logger.info(f"📊 Available session IDs: {list(session_detections.keys())}")
# Find cached detection by session_id
if session_id not in session_detections:
logger.warning("❌ LPR SESSION ID NOT FOUND!")
logger.warning(f" Looking for session_id: '{session_id}'")
logger.warning(f" Available sessions: {list(session_detections.keys())}")
logger.warning(f" Session count: {len(session_detections)}")
# Additional debugging - show session timestamps
if session_detections:
logger.warning("📅 Available session details:")
for sid, timestamp in detection_timestamps.items():
age = time.time() - timestamp
camera = session_to_camera.get(sid, 'unknown')
logger.warning(f" Session {sid}: camera={camera}, age={age:.1f}s")
else:
logger.warning(" No cached sessions available - worker may not have processed any detections yet")
logger.warning("💡 Possible causes:")
logger.warning(" 1. Session expired (TTL: 10 minutes)")
logger.warning(" 2. Session ID mismatch between detection and LPR service")
logger.warning(" 3. Detection was not cached (no sessionId from backend)")
logger.warning(" 4. Worker restarted after detection but before LPR result")
return
# Get the original detection data
detection_data = session_detections[session_id].copy()
camera_id = session_to_camera.get(session_id, 'unknown')
logger.info("✅ LPR SESSION FOUND!")
logger.info(f" 📹 Camera ID: {camera_id}")
logger.info(f" ⏰ Session age: {time.time() - detection_timestamps.get(session_id, 0):.1f} seconds")
# Show original detection structure before update
original_license = detection_data.get('data', {}).get('detection', {}).get('licensePlateText')
logger.info(f" 🔍 Original licensePlateText: {original_license}")
logger.info(f" 🆕 New licensePlateText: '{license_text}'")
# Update licensePlateText in detection
if 'data' in detection_data and 'detection' in detection_data['data']:
detection_data['data']['detection']['licensePlateText'] = license_text
logger.info("🎯 LICENSE PLATE UPDATE SUCCESS!")
logger.info(f" ✅ Updated detection for session {session_id}")
logger.info(f" ✅ Set licensePlateText = '{license_text}'")
# Show full detection structure after update
detection_dict = detection_data['data']['detection']
logger.info("📋 Updated detection dictionary:")
logger.info(f" carModel: {detection_dict.get('carModel')}")
logger.info(f" carBrand: {detection_dict.get('carBrand')}")
logger.info(f" bodyType: {detection_dict.get('bodyType')}")
logger.info(f" licensePlateText: {detection_dict.get('licensePlateText')} ← UPDATED")
logger.info(f" licensePlateConfidence: {detection_dict.get('licensePlateConfidence')}")
else:
logger.error("❌ INVALID DETECTION DATA STRUCTURE!")
logger.error(f" Session {session_id} has malformed detection data")
logger.error(f" Detection data keys: {list(detection_data.keys())}")
if 'data' in detection_data:
logger.error(f" Data keys: {list(detection_data['data'].keys())}")
return
# Update timestamp to indicate this is an LPR update
detection_data['timestamp'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
# Update all caches with new data
session_detections[session_id] = detection_data.copy()
cached_detections[camera_id] = detection_data.copy()
# CRITICAL: Also update the pipeline state cached detection dict (used by lightweight mode)
if camera_id in session_pipeline_states:
pipeline_state = session_pipeline_states[camera_id]
current_cached_dict = pipeline_state.get("cached_detection_dict", {})
# Update the pipeline cached detection dict with new license plate
updated_dict = current_cached_dict.copy() if current_cached_dict else {}
updated_dict['licensePlateText'] = license_text
pipeline_state["cached_detection_dict"] = updated_dict
logger.info(f"✅ LPR: Updated pipeline state cached_detection_dict for camera {camera_id}")
logger.debug(f"🔍 Pipeline cached dict now: {updated_dict}")
else:
logger.warning(f"⚠️ Camera {camera_id} not found in session_pipeline_states - pipeline cache not updated")
logger.info("📡 SENDING UPDATED DETECTION TO BACKEND")
logger.info(f" 📹 Camera ID: {camera_id}")
logger.info(f" 📨 Updated licensePlateText: '{license_text}'")
logger.info(" 🔄 Updated both cache systems:")
logger.info(f" 1⃣ cached_detections[{camera_id}] ✅")
logger.info(f" 2⃣ session_pipeline_states[{camera_id}].cached_detection_dict ✅")
# Log the full message being sent
logger.info("📋 Updated detection data in cache:")
logger.info(json.dumps(detection_data, indent=2))
logger.info("✅ ALL CACHES UPDATED!")
logger.info(f" 🎯 Lightweight mode will now use updated licensePlateText")
logger.info(f" 📤 Backend will receive: licensePlateText = '{license_text}'")
logger.info(" 🔄 Both cache systems synchronized with LPR data")
logger.info("=" * 60)
logger.info("🏁 LPR PROCESSING COMPLETE")
logger.info(f" Session: {session_id}")
logger.info(f" License: '{license_text}'")
logger.info(f" Status: ✅ SUCCESS - DETECTION CACHE UPDATED")
logger.info("=" * 60)
except Exception as e:
logger.error("=" * 60)
logger.error("❌ LPR PROCESSING FAILED")
logger.error("=" * 60)
logger.error(f"Error: {e}")
import traceback
logger.error(f"Traceback: {traceback.format_exc()}")
logger.error("=" * 60)
# LPR integration now uses cached detection mechanism instead of direct WebSocket sending
def license_results_listener():
"""Background thread to listen for LPR results from Redis"""
logger.info("🎧 Starting LPR listener thread...")
while True:
try:
redis_client = create_redis_connection()
if not redis_client:
logger.error("❌ No Redis connection available for LPR listener")
time.sleep(10)
continue
pubsub = redis_client.pubsub()
pubsub.subscribe("license_results")
logger.info("✅ LPR listener subscribed to 'license_results' channel")
for message in pubsub.listen():
try:
if message['type'] == 'message':
logger.info("🔔 REDIS MESSAGE RECEIVED!")
logger.info(f" 📡 Channel: {message['channel']}")
logger.info(f" 📥 Raw data: {message['data']}")
logger.info(f" 📏 Data size: {len(str(message['data']))} bytes")
try:
lpr_data = json.loads(message['data'])
logger.info("✅ JSON parsing successful")
logger.info("🏁 Starting LPR processing...")
process_license_result(lpr_data)
logger.info("✅ LPR processing completed")
except json.JSONDecodeError as e:
logger.error("❌ JSON PARSING FAILED!")
logger.error(f" Error: {e}")
logger.error(f" Raw data: {message['data']}")
logger.error(f" Data type: {type(message['data'])}")
except Exception as e:
logger.error("❌ LPR PROCESSING ERROR!")
logger.error(f" Error: {e}")
import traceback
logger.error(f" Traceback: {traceback.format_exc()}")
elif message['type'] == 'subscribe':
logger.info(f"📡 LPR listener subscribed to channel: {message['channel']}")
logger.info("🎧 Ready to receive license plate results...")
elif message['type'] == 'unsubscribe':
logger.warning(f"📡 LPR listener unsubscribed from channel: {message['channel']}")
else:
logger.debug(f"📡 Redis message type: {message['type']}")
except Exception as e:
logger.error(f"❌ Error in LPR message processing loop: {e}")
break
except redis.exceptions.ConnectionError as e:
logger.error(f"❌ Redis connection lost in LPR listener: {e}")
time.sleep(5) # Wait before reconnecting
except Exception as e:
logger.error(f"❌ Unexpected error in LPR listener: {e}")
time.sleep(10)
logger.warning("🛑 LPR listener thread stopped")
def cleanup_expired_sessions():
"""Remove sessions older than TTL (10 minutes)"""
try:
current_time = time.time()
ttl_seconds = 600 # 10 minutes
expired_sessions = [
session_id for session_id, timestamp in detection_timestamps.items()
if current_time - timestamp > ttl_seconds
]
if expired_sessions:
logger.info(f"🧹 Cleaning up {len(expired_sessions)} expired sessions")
for session_id in expired_sessions:
session_detections.pop(session_id, None)
camera_id = session_to_camera.pop(session_id, None)
detection_timestamps.pop(session_id, None)
logger.debug(f"Cleaned up expired session: {session_id} (camera: {camera_id})")
else:
logger.debug(f"🧹 No expired sessions to clean up ({len(detection_timestamps)} active)")
except Exception as e:
logger.error(f"❌ Error in session cleanup: {e}")
def cleanup_timer():
"""Background thread for periodic session cleanup"""
logger.info("⏰ Starting session cleanup timer thread...")
while True:
try:
time.sleep(120) # Run cleanup every 2 minutes
cleanup_expired_sessions()
except Exception as e:
logger.error(f"❌ Error in cleanup timer: {e}")
time.sleep(120)
def start_lpr_integration():
"""Start LPR integration threads"""
global lpr_listener_thread, cleanup_timer_thread
# Start LPR listener thread
lpr_listener_thread = threading.Thread(target=license_results_listener, daemon=True, name="LPR-Listener")
lpr_listener_thread.start()
logger.info("✅ LPR listener thread started")
# Start cleanup timer thread
cleanup_timer_thread = threading.Thread(target=cleanup_timer, daemon=True, name="Session-Cleanup")
cleanup_timer_thread.start()
logger.info("✅ Session cleanup timer thread started")
WORKER_TIMEOUT_MS = 10000
logger.debug(f"Heartbeat interval set to {HEARTBEAT_INTERVAL} seconds")
@ -335,6 +669,37 @@ def update_session_pipeline_mode(camera_id, new_mode, session_id=None):
####################################################
# REST API endpoint for image retrieval
####################################################
@app.get("/lpr/debug")
async def get_lpr_debug_info():
"""Debug endpoint to inspect LPR integration state"""
try:
return {
"status": "success",
"lpr_integration_started": lpr_integration_started,
"redis_connected": redis_client_global is not None and redis_client_global.ping() if redis_client_global else False,
"active_sessions": len(session_detections),
"session_details": {
session_id: {
"camera_id": session_to_camera.get(session_id, "unknown"),
"timestamp": detection_timestamps.get(session_id, 0),
"age_seconds": time.time() - detection_timestamps.get(session_id, time.time()),
"has_license": session_detections[session_id].get('data', {}).get('detection', {}).get('licensePlateText') is not None
}
for session_id in session_detections.keys()
},
"thread_status": {
"lpr_listener_alive": lpr_listener_thread.is_alive() if lpr_listener_thread else False,
"cleanup_timer_alive": cleanup_timer_thread.is_alive() if cleanup_timer_thread else False
},
"cached_detections_by_camera": list(cached_detections.keys())
}
except Exception as e:
return {
"status": "error",
"error": str(e),
"lpr_integration_started": lpr_integration_started
}
@app.get("/camera/{camera_id}/image")
async def get_camera_image(camera_id: str):
"""
@ -889,6 +1254,15 @@ async def detect(websocket: WebSocket):
if detection_dict is not None and detection_result.get("class") != "none":
cached_detections[camera_id] = detection_data.copy()
logger.debug(f"Cached detection for camera {camera_id}: {detection_dict}")
# Enhanced caching: Store by session_id for LPR integration
session_id = detection_data.get('sessionId')
if session_id:
session_id_str = str(session_id)
session_detections[session_id_str] = detection_data.copy()
session_to_camera[session_id_str] = camera_id
detection_timestamps[session_id_str] = time.time()
logger.debug(f"🔑 Cached detection for LPR by session_id {session_id_str}: {camera_id}")
else:
# Don't cache null/none detections - let them reset properly
cached_detections.pop(camera_id, None)
@ -1258,6 +1632,16 @@ async def detect(websocket: WebSocket):
if camera_id not in models:
models[camera_id] = {}
models[camera_id][modelId] = model_tree
# Start LPR integration threads after first model is loaded (only once)
global lpr_integration_started
if not lpr_integration_started and hasattr(model_tree, 'get') and model_tree.get('redis_client'):
try:
start_lpr_integration()
lpr_integration_started = True
logger.info("🚀 LPR integration started after first model load")
except Exception as e:
logger.error(f"❌ Failed to start LPR integration: {e}")
# Create stream (same logic as original)
if camera_id and (rtsp_url or snapshot_url) and len(streams) < max_streams:
@ -1572,6 +1956,15 @@ async def detect(websocket: WebSocket):
models[camera_id][modelId] = model_tree
logger.info(f"Successfully loaded model {modelId} for camera {camera_id}")
logger.debug(f"Model extraction directory: {extraction_dir}")
# Start LPR integration threads after first model is loaded (only once)
if not lpr_integration_started and hasattr(model_tree, 'get') and model_tree.get('redis_client'):
try:
start_lpr_integration()
lpr_integration_started = True
logger.info("🚀 LPR integration started after first model load")
except Exception as e:
logger.error(f"❌ Failed to start LPR integration: {e}")
if camera_id and (rtsp_url or snapshot_url):
with streams_lock:
# Determine camera URL for shared stream management
@ -1798,6 +2191,47 @@ async def detect(websocket: WebSocket):
logger.debug(f"Session IDs after update: {session_ids}")
logger.debug(f"🎯 CMS Backend created sessionId {session_id} after receiving detection data")
# 🔑 LPR Integration: Retroactively cache the last detection by this new session_id
session_id_str = str(session_id)
logger.info(f"🔑 LPR: Attempting to retroactively cache detection for session_id {session_id_str}")
# Find cameras associated with this display
display_cameras = []
with streams_lock:
for camera_id, stream in streams.items():
if stream["subscriptionIdentifier"].startswith(display_identifier + ";"):
display_cameras.append(camera_id)
logger.debug(f"🔍 Found {len(display_cameras)} cameras for display {display_identifier}: {display_cameras}")
# Cache the most recent detection for each camera by the new session_id
cached_count = 0
for camera_id in display_cameras:
if camera_id in cached_detections:
detection_data = cached_detections[camera_id].copy()
# Add sessionId to the detection data
detection_data['sessionId'] = session_id
# Cache by session_id for LPR lookup
session_detections[session_id_str] = detection_data
session_to_camera[session_id_str] = camera_id
detection_timestamps[session_id_str] = time.time()
cached_count += 1
logger.info(f"✅ LPR: Cached detection for session_id {session_id_str} -> camera {camera_id}")
logger.debug(f"🔍 Detection data: {detection_data.get('data', {}).get('detection', {})}")
else:
logger.debug(f"⚠️ No cached detection available for camera {camera_id}")
if cached_count > 0:
logger.info(f"🎉 LPR: Successfully cached {cached_count} detection(s) for session_id {session_id_str}")
logger.info(f"📊 Total LPR sessions now cached: {len(session_detections)}")
else:
logger.warning(f"⚠️ LPR: No detections could be cached for session_id {session_id_str}")
logger.warning(f" Display cameras: {display_cameras}")
logger.warning(f" Available cached detections: {list(cached_detections.keys())}")
# Clear waiting state for cameras associated with this display
with streams_lock:
affected_cameras = []
@ -1881,4 +2315,8 @@ async def detect(websocket: WebSocket):
cached_full_pipeline_results.clear()
session_pipeline_states.clear()
session_ids.clear()
# Clean up LPR integration caches
session_detections.clear()
session_to_camera.clear()
detection_timestamps.clear()
logger.info("WebSocket connection closed")