dev #1

Merged
chawanwit.p merged 7 commits from dev into main 2025-08-10 11:07:37 +00:00
4 changed files with 845 additions and 104 deletions

108
app.py
View file

@ -35,6 +35,8 @@ session_ids: Dict[str, int] = {}
camera_streams: Dict[str, Dict[str, Any]] = {}
# Map subscriptions to their camera URL
subscription_to_camera: Dict[str, str] = {}
# Store latest frames for REST API access (separate from processing buffer)
latest_frames: Dict[str, Any] = {}
with open("config.json", "r") as f:
config = json.load(f)
@ -109,20 +111,60 @@ def download_mpta(url: str, dest_path: str) -> str:
# Add helper to fetch snapshot image from HTTP/HTTPS URL
def fetch_snapshot(url: str):
try:
response = requests.get(url, timeout=10)
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
# Parse URL to extract credentials
parsed = urlparse(url)
# Prepare headers - some cameras require User-Agent
headers = {
'User-Agent': 'Mozilla/5.0 (compatible; DetectorWorker/1.0)'
}
# Reconstruct URL without credentials
clean_url = f"{parsed.scheme}://{parsed.hostname}"
if parsed.port:
clean_url += f":{parsed.port}"
clean_url += parsed.path
if parsed.query:
clean_url += f"?{parsed.query}"
auth = None
if parsed.username and parsed.password:
# Try HTTP Digest authentication first (common for IP cameras)
try:
auth = HTTPDigestAuth(parsed.username, parsed.password)
response = requests.get(clean_url, auth=auth, headers=headers, timeout=10)
if response.status_code == 200:
logger.debug(f"Successfully authenticated using HTTP Digest for {clean_url}")
elif response.status_code == 401:
# If Digest fails, try Basic auth
logger.debug(f"HTTP Digest failed, trying Basic auth for {clean_url}")
auth = HTTPBasicAuth(parsed.username, parsed.password)
response = requests.get(clean_url, auth=auth, headers=headers, timeout=10)
if response.status_code == 200:
logger.debug(f"Successfully authenticated using HTTP Basic for {clean_url}")
except Exception as auth_error:
logger.debug(f"Authentication setup error: {auth_error}")
# Fallback to original URL with embedded credentials
response = requests.get(url, headers=headers, timeout=10)
else:
# No credentials in URL, make request as-is
response = requests.get(url, headers=headers, timeout=10)
if response.status_code == 200:
# Convert response content to numpy array
nparr = np.frombuffer(response.content, np.uint8)
# Decode image
frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if frame is not None:
logger.debug(f"Successfully fetched snapshot from {url}, shape: {frame.shape}")
logger.debug(f"Successfully fetched snapshot from {clean_url}, shape: {frame.shape}")
return frame
else:
logger.error(f"Failed to decode image from snapshot URL: {url}")
logger.error(f"Failed to decode image from snapshot URL: {clean_url}")
return None
else:
logger.error(f"Failed to fetch snapshot (status code {response.status_code}): {url}")
logger.error(f"Failed to fetch snapshot (status code {response.status_code}): {clean_url}")
return None
except Exception as e:
logger.error(f"Exception fetching snapshot from {url}: {str(e)}")
@ -146,26 +188,24 @@ async def get_camera_image(camera_id: str):
Get the current frame from a camera as JPEG image
"""
try:
# URL decode the camera_id to handle encoded characters like %3B for semicolon
from urllib.parse import unquote
original_camera_id = camera_id
camera_id = unquote(camera_id)
logger.debug(f"REST API request: original='{original_camera_id}', decoded='{camera_id}'")
with streams_lock:
if camera_id not in streams:
logger.warning(f"Camera ID '{camera_id}' not found in streams. Current streams: {list(streams.keys())}")
raise HTTPException(status_code=404, detail=f"Camera {camera_id} not found or not active")
stream = streams[camera_id]
buffer = stream["buffer"]
logger.debug(f"Camera '{camera_id}' buffer size: {buffer.qsize()}, buffer empty: {buffer.empty()}")
logger.debug(f"Buffer queue contents: {getattr(buffer, 'queue', None)}")
if buffer.empty():
logger.warning(f"No frame available for camera '{camera_id}'. Buffer is empty.")
# Check if we have a cached frame for this camera
if camera_id not in latest_frames:
logger.warning(f"No cached frame available for camera '{camera_id}'.")
raise HTTPException(status_code=404, detail=f"No frame available for camera {camera_id}")
# Get the latest frame (non-blocking)
try:
frame = buffer.queue[-1] # Get the most recent frame without removing it
except IndexError:
logger.warning(f"Buffer queue is empty for camera '{camera_id}' when trying to access last frame.")
raise HTTPException(status_code=404, detail=f"No frame available for camera {camera_id}")
frame = latest_frames[camera_id]
logger.debug(f"Retrieved cached frame for camera '{camera_id}', frame shape: {frame.shape}")
# Encode frame as JPEG
success, buffer_img = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 85])
if not success:
@ -199,7 +239,20 @@ async def detect(websocket: WebSocket):
logger.debug(f"Processing frame for camera {camera_id} with model {stream['modelId']}")
start_time = time.time()
detection_result = run_pipeline(cropped_frame, model_tree)
# Extract display identifier for session ID lookup
subscription_parts = stream["subscriptionIdentifier"].split(';')
display_identifier = subscription_parts[0] if subscription_parts else None
session_id = session_ids.get(display_identifier) if display_identifier else None
# Create context for pipeline execution
pipeline_context = {
"camera_id": camera_id,
"display_id": display_identifier,
"session_id": session_id
}
detection_result = run_pipeline(cropped_frame, model_tree, context=pipeline_context)
process_time = (time.time() - start_time) * 1000
logger.debug(f"Detection for camera {camera_id} completed in {process_time:.2f}ms")
@ -258,11 +311,6 @@ async def detect(websocket: WebSocket):
if key not in ["box", "id"]: # Skip internal fields
detection_dict[key] = value
# Extract display identifier for session ID lookup
subscription_parts = stream["subscriptionIdentifier"].split(';')
display_identifier = subscription_parts[0] if subscription_parts else None
session_id = session_ids.get(display_identifier) if display_identifier else None
detection_data = {
"type": "imageDetection",
"subscriptionIdentifier": stream["subscriptionIdentifier"],
@ -282,9 +330,6 @@ async def detect(websocket: WebSocket):
logger.info(f"Camera {camera_id}: Detected {highest_confidence_detection['class']} with confidence {highest_confidence_detection['confidence']:.2f} using model {stream['modelName']}")
# Log session ID if available
subscription_parts = stream["subscriptionIdentifier"].split(';')
display_identifier = subscription_parts[0] if subscription_parts else None
session_id = session_ids.get(display_identifier) if display_identifier else None
if session_id:
logger.debug(f"Detection associated with session ID: {session_id}")
@ -476,6 +521,10 @@ async def detect(websocket: WebSocket):
logger.debug(f"Got frame from buffer for camera {camera_id}")
frame = buffer.get()
# Cache the frame for REST API access
latest_frames[camera_id] = frame.copy()
logger.debug(f"Cached frame for REST API access for camera {camera_id}")
with models_lock:
model_tree = models.get(camera_id, {}).get(stream["modelId"])
if not model_tree:
@ -647,7 +696,7 @@ async def detect(websocket: WebSocket):
if snapshot_url and snapshot_interval:
logger.info(f"Creating new snapshot stream for camera {camera_id}: {snapshot_url}")
thread = threading.Thread(target=snapshot_reader, args=(camera_identifier, snapshot_url, snapshot_interval, buffer, stop_event))
thread = threading.Thread(target=snapshot_reader, args=(camera_id, snapshot_url, snapshot_interval, buffer, stop_event))
thread.daemon = True
thread.start()
mode = "snapshot"
@ -670,7 +719,7 @@ async def detect(websocket: WebSocket):
if not cap.isOpened():
logger.error(f"Failed to open RTSP stream for camera {camera_id}")
continue
thread = threading.Thread(target=frame_reader, args=(camera_identifier, cap, buffer, stop_event))
thread = threading.Thread(target=frame_reader, args=(camera_id, cap, buffer, stop_event))
thread.daemon = True
thread.start()
mode = "rtsp"
@ -744,6 +793,8 @@ async def detect(websocket: WebSocket):
else:
logger.info(f"Shared stream for {camera_url} still has {shared_stream['ref_count']} references")
# Clean up cached frame
latest_frames.pop(camera_id, None)
logger.info(f"Unsubscribed from camera {camera_id}")
# Note: Keep models in memory for potential reuse
elif msg_type == "requestState":
@ -847,5 +898,6 @@ async def detect(websocket: WebSocket):
subscription_to_camera.clear()
with models_lock:
models.clear()
latest_frames.clear()
session_ids.clear()
logger.info("WebSocket connection closed")

View file

@ -7,3 +7,7 @@ opencv-python
websockets
fastapi[standard]
redis
urllib3<2.0.0
psycopg2-binary
scipy
filterpy

211
siwatsystem/database.py Normal file
View file

@ -0,0 +1,211 @@
import psycopg2
import psycopg2.extras
from typing import Optional, Dict, Any
import logging
import uuid
logger = logging.getLogger(__name__)
class DatabaseManager:
def __init__(self, config: Dict[str, Any]):
self.config = config
self.connection: Optional[psycopg2.extensions.connection] = None
def connect(self) -> bool:
try:
self.connection = psycopg2.connect(
host=self.config['host'],
port=self.config['port'],
database=self.config['database'],
user=self.config['username'],
password=self.config['password']
)
logger.info("PostgreSQL connection established successfully")
return True
except Exception as e:
logger.error(f"Failed to connect to PostgreSQL: {e}")
return False
def disconnect(self):
if self.connection:
self.connection.close()
self.connection = None
logger.info("PostgreSQL connection closed")
def is_connected(self) -> bool:
try:
if self.connection and not self.connection.closed:
cur = self.connection.cursor()
cur.execute("SELECT 1")
cur.fetchone()
cur.close()
return True
except:
pass
return False
def update_car_info(self, session_id: str, brand: str, model: str, body_type: str) -> bool:
if not self.is_connected():
if not self.connect():
return False
try:
cur = self.connection.cursor()
query = """
INSERT INTO car_frontal_info (session_id, car_brand, car_model, car_body_type, updated_at)
VALUES (%s, %s, %s, %s, NOW())
ON CONFLICT (session_id)
DO UPDATE SET
car_brand = EXCLUDED.car_brand,
car_model = EXCLUDED.car_model,
car_body_type = EXCLUDED.car_body_type,
updated_at = NOW()
"""
cur.execute(query, (session_id, brand, model, body_type))
self.connection.commit()
cur.close()
logger.info(f"Updated car info for session {session_id}: {brand} {model} ({body_type})")
return True
except Exception as e:
logger.error(f"Failed to update car info: {e}")
if self.connection:
self.connection.rollback()
return False
def execute_update(self, table: str, key_field: str, key_value: str, fields: Dict[str, str]) -> bool:
if not self.is_connected():
if not self.connect():
return False
try:
cur = self.connection.cursor()
# Build the UPDATE query dynamically
set_clauses = []
values = []
for field, value in fields.items():
if value == "NOW()":
set_clauses.append(f"{field} = NOW()")
else:
set_clauses.append(f"{field} = %s")
values.append(value)
# Add schema prefix if table doesn't already have it
full_table_name = table if '.' in table else f"gas_station_1.{table}"
query = f"""
INSERT INTO {full_table_name} ({key_field}, {', '.join(fields.keys())})
VALUES (%s, {', '.join(['%s'] * len(fields))})
ON CONFLICT ({key_field})
DO UPDATE SET {', '.join(set_clauses)}
"""
# Add key_value to the beginning of values list
all_values = [key_value] + list(fields.values()) + values
cur.execute(query, all_values)
self.connection.commit()
cur.close()
logger.info(f"Updated {table} for {key_field}={key_value}")
return True
except Exception as e:
logger.error(f"Failed to execute update on {table}: {e}")
if self.connection:
self.connection.rollback()
return False
def create_car_frontal_info_table(self) -> bool:
"""Create the car_frontal_info table in gas_station_1 schema if it doesn't exist."""
if not self.is_connected():
if not self.connect():
return False
try:
cur = self.connection.cursor()
# Create schema if it doesn't exist
cur.execute("CREATE SCHEMA IF NOT EXISTS gas_station_1")
# Create table if it doesn't exist
create_table_query = """
CREATE TABLE IF NOT EXISTS gas_station_1.car_frontal_info (
display_id VARCHAR(255),
captured_timestamp VARCHAR(255),
session_id VARCHAR(255) PRIMARY KEY,
license_character VARCHAR(255) DEFAULT NULL,
license_type VARCHAR(255) DEFAULT 'No model available',
car_brand VARCHAR(255) DEFAULT NULL,
car_model VARCHAR(255) DEFAULT NULL,
car_body_type VARCHAR(255) DEFAULT NULL,
updated_at TIMESTAMP DEFAULT NOW()
)
"""
cur.execute(create_table_query)
# Add columns if they don't exist (for existing tables)
alter_queries = [
"ALTER TABLE gas_station_1.car_frontal_info ADD COLUMN IF NOT EXISTS car_brand VARCHAR(255) DEFAULT NULL",
"ALTER TABLE gas_station_1.car_frontal_info ADD COLUMN IF NOT EXISTS car_model VARCHAR(255) DEFAULT NULL",
"ALTER TABLE gas_station_1.car_frontal_info ADD COLUMN IF NOT EXISTS car_body_type VARCHAR(255) DEFAULT NULL",
"ALTER TABLE gas_station_1.car_frontal_info ADD COLUMN IF NOT EXISTS updated_at TIMESTAMP DEFAULT NOW()"
]
for alter_query in alter_queries:
try:
cur.execute(alter_query)
logger.debug(f"Executed: {alter_query}")
except Exception as e:
# Ignore errors if column already exists (for older PostgreSQL versions)
if "already exists" in str(e).lower():
logger.debug(f"Column already exists, skipping: {alter_query}")
else:
logger.warning(f"Error in ALTER TABLE: {e}")
self.connection.commit()
cur.close()
logger.info("Successfully created/verified car_frontal_info table with all required columns")
return True
except Exception as e:
logger.error(f"Failed to create car_frontal_info table: {e}")
if self.connection:
self.connection.rollback()
return False
def insert_initial_detection(self, display_id: str, captured_timestamp: str, session_id: str = None) -> str:
"""Insert initial detection record and return the session_id."""
if not self.is_connected():
if not self.connect():
return None
# Generate session_id if not provided
if not session_id:
session_id = str(uuid.uuid4())
try:
# Ensure table exists
if not self.create_car_frontal_info_table():
logger.error("Failed to create/verify table before insertion")
return None
cur = self.connection.cursor()
insert_query = """
INSERT INTO gas_station_1.car_frontal_info
(display_id, captured_timestamp, session_id, license_character, license_type, car_brand, car_model, car_body_type)
VALUES (%s, %s, %s, NULL, 'No model available', NULL, NULL, NULL)
ON CONFLICT (session_id) DO NOTHING
"""
cur.execute(insert_query, (display_id, captured_timestamp, session_id))
self.connection.commit()
cur.close()
logger.info(f"Inserted initial detection record with session_id: {session_id}")
return session_id
except Exception as e:
logger.error(f"Failed to insert initial detection record: {e}")
if self.connection:
self.connection.rollback()
return None

View file

@ -3,20 +3,72 @@ import json
import logging
import torch
import cv2
import requests
import zipfile
import shutil
import traceback
import redis
import time
import uuid
import concurrent.futures
from ultralytics import YOLO
from urllib.parse import urlparse
from .database import DatabaseManager
# Create a logger specifically for this module
logger = logging.getLogger("detector_worker.pympta")
def load_pipeline_node(node_config: dict, mpta_dir: str, redis_client) -> dict:
def validate_redis_config(redis_config: dict) -> bool:
"""Validate Redis configuration parameters."""
required_fields = ["host", "port"]
for field in required_fields:
if field not in redis_config:
logger.error(f"Missing required Redis config field: {field}")
return False
if not isinstance(redis_config["port"], int) or redis_config["port"] <= 0:
logger.error(f"Invalid Redis port: {redis_config['port']}")
return False
return True
def validate_postgresql_config(pg_config: dict) -> bool:
"""Validate PostgreSQL configuration parameters."""
required_fields = ["host", "port", "database", "username", "password"]
for field in required_fields:
if field not in pg_config:
logger.error(f"Missing required PostgreSQL config field: {field}")
return False
if not isinstance(pg_config["port"], int) or pg_config["port"] <= 0:
logger.error(f"Invalid PostgreSQL port: {pg_config['port']}")
return False
return True
def crop_region_by_class(frame, regions_dict, class_name):
"""Crop a specific region from frame based on detected class."""
if class_name not in regions_dict:
logger.warning(f"Class '{class_name}' not found in detected regions")
return None
bbox = regions_dict[class_name]['bbox']
x1, y1, x2, y2 = bbox
cropped = frame[y1:y2, x1:x2]
if cropped.size == 0:
logger.warning(f"Empty crop for class '{class_name}' with bbox {bbox}")
return None
return cropped
def format_action_context(base_context, additional_context=None):
"""Format action context with dynamic values."""
context = {**base_context}
if additional_context:
context.update(additional_context)
return context
def load_pipeline_node(node_config: dict, mpta_dir: str, redis_client, db_manager=None) -> dict:
# Recursively load a model node from configuration.
model_path = os.path.join(mpta_dir, node_config["modelFile"])
if not os.path.exists(model_path):
@ -46,16 +98,22 @@ def load_pipeline_node(node_config: dict, mpta_dir: str, redis_client) -> dict:
"triggerClasses": trigger_classes,
"triggerClassIndices": trigger_class_indices,
"crop": node_config.get("crop", False),
"cropClass": node_config.get("cropClass"),
"minConfidence": node_config.get("minConfidence", None),
"multiClass": node_config.get("multiClass", False),
"expectedClasses": node_config.get("expectedClasses", []),
"parallel": node_config.get("parallel", False),
"actions": node_config.get("actions", []),
"parallelActions": node_config.get("parallelActions", []),
"model": model,
"branches": [],
"redis_client": redis_client
"redis_client": redis_client,
"db_manager": db_manager
}
logger.debug(f"Configured node {node_config['modelId']} with trigger classes: {node['triggerClasses']}")
for child in node_config.get("branches", []):
logger.debug(f"Loading branch for parent node {node_config['modelId']}")
node["branches"].append(load_pipeline_node(child, mpta_dir, redis_client))
node["branches"].append(load_pipeline_node(child, mpta_dir, redis_client, db_manager))
return node
def load_pipeline_from_zip(zip_source: str, target_dir: str) -> dict:
@ -168,6 +226,9 @@ def load_pipeline_from_zip(zip_source: str, target_dir: str) -> dict:
redis_client = None
if "redis" in pipeline_config:
redis_config = pipeline_config["redis"]
if not validate_redis_config(redis_config):
logger.error("Invalid Redis configuration, skipping Redis connection")
else:
try:
redis_client = redis.Redis(
host=redis_config["host"],
@ -182,7 +243,25 @@ def load_pipeline_from_zip(zip_source: str, target_dir: str) -> dict:
logger.error(f"Failed to connect to Redis: {e}")
redis_client = None
return load_pipeline_node(pipeline_config["pipeline"], mpta_dir, redis_client)
# Establish PostgreSQL connection if configured
db_manager = None
if "postgresql" in pipeline_config:
pg_config = pipeline_config["postgresql"]
if not validate_postgresql_config(pg_config):
logger.error("Invalid PostgreSQL configuration, skipping database connection")
else:
try:
db_manager = DatabaseManager(pg_config)
if db_manager.connect():
logger.info(f"Successfully connected to PostgreSQL at {pg_config['host']}:{pg_config['port']}")
else:
logger.error("Failed to connect to PostgreSQL")
db_manager = None
except Exception as e:
logger.error(f"Error initializing PostgreSQL connection: {e}")
db_manager = None
return load_pipeline_node(pipeline_config["pipeline"], mpta_dir, redis_client, db_manager)
except json.JSONDecodeError as e:
logger.error(f"Error parsing pipeline.json: {str(e)}", exc_info=True)
return None
@ -193,22 +272,53 @@ def load_pipeline_from_zip(zip_source: str, target_dir: str) -> dict:
logger.error(f"Error loading pipeline.json: {str(e)}", exc_info=True)
return None
def execute_actions(node, frame, detection_result):
def execute_actions(node, frame, detection_result, regions_dict=None):
if not node["redis_client"] or not node["actions"]:
return
# Create a dynamic context for this detection event
from datetime import datetime
action_context = {
**detection_result,
"timestamp_ms": int(time.time() * 1000),
"uuid": str(uuid.uuid4()),
"timestamp": datetime.now().strftime("%Y-%m-%dT%H-%M-%S"),
"filename": f"{uuid.uuid4()}.jpg"
}
for action in node["actions"]:
try:
if action["type"] == "redis_save_image":
key = action["key"].format(**action_context)
_, buffer = cv2.imencode('.jpg', frame)
# Check if we need to crop a specific region
region_name = action.get("region")
image_to_save = frame
if region_name and regions_dict:
cropped_image = crop_region_by_class(frame, regions_dict, region_name)
if cropped_image is not None:
image_to_save = cropped_image
logger.debug(f"Cropped region '{region_name}' for redis_save_image")
else:
logger.warning(f"Could not crop region '{region_name}', saving full frame instead")
# Encode image with specified format and quality (default to JPEG)
img_format = action.get("format", "jpeg").lower()
quality = action.get("quality", 90)
if img_format == "jpeg":
encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
success, buffer = cv2.imencode('.jpg', image_to_save, encode_params)
elif img_format == "png":
success, buffer = cv2.imencode('.png', image_to_save)
else:
success, buffer = cv2.imencode('.jpg', image_to_save, [cv2.IMWRITE_JPEG_QUALITY, quality])
if not success:
logger.error(f"Failed to encode image for redis_save_image")
continue
expire_seconds = action.get("expire_seconds")
if expire_seconds:
node["redis_client"].setex(key, expire_seconds, buffer.tobytes())
@ -216,60 +326,244 @@ def execute_actions(node, frame, detection_result):
else:
node["redis_client"].set(key, buffer.tobytes())
logger.info(f"Saved image to Redis with key: {key}")
# Add the generated key to the context for subsequent actions
action_context["image_key"] = key
elif action["type"] == "redis_publish":
channel = action["channel"]
message = action["message"].format(**action_context)
node["redis_client"].publish(channel, message)
try:
# Handle JSON message format by creating it programmatically
message_template = action["message"]
# Check if the message is JSON-like (starts and ends with braces)
if message_template.strip().startswith('{') and message_template.strip().endswith('}'):
# Create JSON data programmatically to avoid formatting issues
json_data = {}
# Add common fields
json_data["event"] = "frontal_detected"
json_data["display_id"] = action_context.get("display_id", "unknown")
json_data["session_id"] = action_context.get("session_id")
json_data["timestamp"] = action_context.get("timestamp", "")
json_data["image_key"] = action_context.get("image_key", "")
# Convert to JSON string
message = json.dumps(json_data)
else:
# Use regular string formatting for non-JSON messages
message = message_template.format(**action_context)
# Publish to Redis
if not node["redis_client"]:
logger.error("Redis client is None, cannot publish message")
continue
# Test Redis connection
try:
node["redis_client"].ping()
logger.debug("Redis connection is active")
except Exception as ping_error:
logger.error(f"Redis connection test failed: {ping_error}")
continue
result = node["redis_client"].publish(channel, message)
logger.info(f"Published message to Redis channel '{channel}': {message}")
logger.info(f"Redis publish result (subscribers count): {result}")
# Additional debug info
if result == 0:
logger.warning(f"No subscribers listening to channel '{channel}'")
else:
logger.info(f"Message delivered to {result} subscriber(s)")
except KeyError as e:
logger.error(f"Missing key in redis_publish message template: {e}")
logger.debug(f"Available context keys: {list(action_context.keys())}")
except Exception as e:
logger.error(f"Error in redis_publish action: {e}")
logger.debug(f"Message template: {action['message']}")
logger.debug(f"Available context keys: {list(action_context.keys())}")
import traceback
logger.debug(f"Full traceback: {traceback.format_exc()}")
except Exception as e:
logger.error(f"Error executing action {action['type']}: {e}")
def run_pipeline(frame, node: dict, return_bbox: bool=False):
def execute_parallel_actions(node, frame, detection_result, regions_dict):
"""Execute parallel actions after all required branches have completed."""
if not node.get("parallelActions"):
return
logger.debug("Executing parallel actions...")
branch_results = detection_result.get("branch_results", {})
for action in node["parallelActions"]:
try:
action_type = action.get("type")
logger.debug(f"Processing parallel action: {action_type}")
if action_type == "postgresql_update_combined":
# Check if all required branches have completed
wait_for_branches = action.get("waitForBranches", [])
missing_branches = [branch for branch in wait_for_branches if branch not in branch_results]
if missing_branches:
logger.warning(f"Cannot execute postgresql_update_combined: missing branch results for {missing_branches}")
continue
logger.info(f"All required branches completed: {wait_for_branches}")
# Execute the database update
execute_postgresql_update_combined(node, action, detection_result, branch_results)
else:
logger.warning(f"Unknown parallel action type: {action_type}")
except Exception as e:
logger.error(f"Error executing parallel action {action.get('type', 'unknown')}: {e}")
import traceback
logger.debug(f"Full traceback: {traceback.format_exc()}")
def execute_postgresql_update_combined(node, action, detection_result, branch_results):
"""Execute a PostgreSQL update with combined branch results."""
if not node.get("db_manager"):
logger.error("No database manager available for postgresql_update_combined action")
return
try:
table = action["table"]
key_field = action["key_field"]
key_value_template = action["key_value"]
fields = action["fields"]
# Create context for key value formatting
action_context = {**detection_result}
key_value = key_value_template.format(**action_context)
logger.info(f"Executing database update: table={table}, {key_field}={key_value}")
# Process field mappings
mapped_fields = {}
for db_field, value_template in fields.items():
try:
mapped_value = resolve_field_mapping(value_template, branch_results, action_context)
if mapped_value is not None:
mapped_fields[db_field] = mapped_value
logger.debug(f"Mapped field: {db_field} = {mapped_value}")
else:
logger.warning(f"Could not resolve field mapping for {db_field}: {value_template}")
except Exception as e:
logger.error(f"Error mapping field {db_field} with template '{value_template}': {e}")
if not mapped_fields:
logger.warning("No fields mapped successfully, skipping database update")
return
# Execute the database update
success = node["db_manager"].execute_update(table, key_field, key_value, mapped_fields)
if success:
logger.info(f"Successfully updated database: {table} with {len(mapped_fields)} fields")
else:
logger.error(f"Failed to update database: {table}")
except KeyError as e:
logger.error(f"Missing required field in postgresql_update_combined action: {e}")
except Exception as e:
logger.error(f"Error in postgresql_update_combined action: {e}")
import traceback
logger.debug(f"Full traceback: {traceback.format_exc()}")
def resolve_field_mapping(value_template, branch_results, action_context):
"""Resolve field mapping templates like {car_brand_cls_v1.brand}."""
try:
# Handle simple context variables first (non-branch references)
if not '.' in value_template:
return value_template.format(**action_context)
# Handle branch result references like {model_id.field}
import re
branch_refs = re.findall(r'\{([^}]+\.[^}]+)\}', value_template)
resolved_template = value_template
for ref in branch_refs:
try:
model_id, field_name = ref.split('.', 1)
if model_id in branch_results:
branch_data = branch_results[model_id]
if field_name in branch_data:
field_value = branch_data[field_name]
resolved_template = resolved_template.replace(f'{{{ref}}}', str(field_value))
logger.debug(f"Resolved {ref} to {field_value}")
else:
logger.warning(f"Field '{field_name}' not found in branch '{model_id}' results. Available fields: {list(branch_data.keys())}")
return None
else:
logger.warning(f"Branch '{model_id}' not found in results. Available branches: {list(branch_results.keys())}")
return None
except ValueError as e:
logger.error(f"Invalid branch reference format: {ref}")
return None
# Format any remaining simple variables
try:
final_value = resolved_template.format(**action_context)
return final_value
except KeyError as e:
logger.warning(f"Could not resolve context variable in template: {e}")
return resolved_template
except Exception as e:
logger.error(f"Error resolving field mapping '{value_template}': {e}")
return None
def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None):
"""
- For detection nodes (task != 'classify'):
runs `track(..., classes=triggerClassIndices)`
picks top box minConfidence
optionally crops & resizes recurse into child
else returns (det_dict, bbox)
- For classify nodes:
runs `predict()`
returns top (class,confidence) and no bbox
Enhanced pipeline that supports:
- Multi-class detection (detecting multiple classes simultaneously)
- Parallel branch processing
- Region-based actions and cropping
- Context passing for session/camera information
"""
try:
task = getattr(node["model"], "task", None)
# ─── Classification stage ───────────────────────────────────
if task == "classify":
# run the classifier and grab its top-1 directly via the Probs API
results = node["model"].predict(frame, stream=False)
# nothing returned?
if not results:
return (None, None) if return_bbox else None
# take the first result's probs object
r = results[0]
probs = r.probs
if probs is None:
return (None, None) if return_bbox else None
# get the top-1 class index and its confidence
top1_idx = int(probs.top1)
top1_conf = float(probs.top1conf)
class_name = node["model"].names[top1_idx]
det = {
"class": node["model"].names[top1_idx],
"class": class_name,
"confidence": top1_conf,
"id": None
"id": None,
class_name: class_name # Add class name as key for backward compatibility
}
# Add specific field mappings for database operations based on model type
model_id = node.get("modelId", "").lower()
if "brand" in model_id or "brand_cls" in model_id:
det["brand"] = class_name
elif "bodytype" in model_id or "body" in model_id:
det["body_type"] = class_name
elif "color" in model_id:
det["color"] = class_name
execute_actions(node, frame, det)
return (det, None) if return_bbox else det
# ─── Detection stage ────────────────────────────────────────
# only look for your triggerClasses
# ─── Detection stage - Multi-class support ──────────────────
tk = node["triggerClassIndices"]
logger.debug(f"Running detection for node {node['modelId']} with trigger classes: {node.get('triggerClasses', [])} (indices: {tk})")
logger.debug(f"Node configuration: minConfidence={node['minConfidence']}, multiClass={node.get('multiClass', False)}")
res = node["model"].track(
frame,
stream=False,
@ -277,48 +571,228 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False):
**({"classes": tk} if tk else {})
)[0]
dets, boxes = [], []
for box in res.boxes:
# Collect all detections above confidence threshold
all_detections = []
all_boxes = []
regions_dict = {}
logger.debug(f"Raw detection results from model: {len(res.boxes) if res.boxes is not None else 0} detections")
for i, box in enumerate(res.boxes):
conf = float(box.cpu().conf[0])
cid = int(box.cpu().cls[0])
name = node["model"].names[cid]
if conf < node["minConfidence"]:
continue
xy = box.cpu().xyxy[0]
x1,y1,x2,y2 = map(int, xy)
dets.append({"class": name, "confidence": conf,
"id": box.id.item() if hasattr(box, "id") else None})
boxes.append((x1, y1, x2, y2))
if not dets:
logger.debug(f"Detection {i}: class='{name}' (id={cid}), confidence={conf:.3f}, threshold={node['minConfidence']}")
if conf < node["minConfidence"]:
logger.debug(f" -> REJECTED: confidence {conf:.3f} < threshold {node['minConfidence']}")
continue
xy = box.cpu().xyxy[0]
x1, y1, x2, y2 = map(int, xy)
bbox = (x1, y1, x2, y2)
detection = {
"class": name,
"confidence": conf,
"id": box.id.item() if hasattr(box, "id") else None,
"bbox": bbox
}
all_detections.append(detection)
all_boxes.append(bbox)
logger.debug(f" -> ACCEPTED: {name} with confidence {conf:.3f}, bbox={bbox}")
# Store highest confidence detection for each class
if name not in regions_dict or conf > regions_dict[name]["confidence"]:
regions_dict[name] = {
"bbox": bbox,
"confidence": conf,
"detection": detection
}
logger.debug(f" -> Updated regions_dict['{name}'] with confidence {conf:.3f}")
logger.info(f"Detection summary: {len(all_detections)} accepted detections from {len(res.boxes) if res.boxes is not None else 0} total")
logger.info(f"Detected classes: {list(regions_dict.keys())}")
if not all_detections:
logger.warning("No detections above confidence threshold - returning null")
return (None, None) if return_bbox else None
# take highestconfidence
best_idx = max(range(len(dets)), key=lambda i: dets[i]["confidence"])
best_det = dets[best_idx]
best_box = boxes[best_idx]
# ─── Multi-class validation ─────────────────────────────────
if node.get("multiClass", False) and node.get("expectedClasses"):
expected_classes = node["expectedClasses"]
detected_classes = list(regions_dict.keys())
# ─── Branch (classification) ───────────────────────────────
logger.info(f"Multi-class validation: expected={expected_classes}, detected={detected_classes}")
# Check if at least one expected class is detected (flexible mode)
matching_classes = [cls for cls in expected_classes if cls in detected_classes]
missing_classes = [cls for cls in expected_classes if cls not in detected_classes]
logger.debug(f"Matching classes: {matching_classes}, Missing classes: {missing_classes}")
if not matching_classes:
# No expected classes found at all
logger.warning(f"PIPELINE REJECTED: No expected classes detected. Expected: {expected_classes}, Detected: {detected_classes}")
return (None, None) if return_bbox else None
if missing_classes:
logger.info(f"Partial multi-class detection: {matching_classes} found, {missing_classes} missing")
else:
logger.info(f"Complete multi-class detection success: {detected_classes}")
else:
logger.debug("No multi-class validation - proceeding with all detections")
# ─── Execute actions with region information ────────────────
detection_result = {
"detections": all_detections,
"regions": regions_dict,
**(context or {})
}
# ─── Create initial database record when Car+Frontal detected ────
if node.get("db_manager") and node.get("multiClass", False):
# Only create database record if we have both Car and Frontal
has_car = "Car" in regions_dict
has_frontal = "Frontal" in regions_dict
if has_car and has_frontal:
# Generate UUID session_id since client session is None for now
import uuid as uuid_lib
from datetime import datetime
generated_session_id = str(uuid_lib.uuid4())
# Insert initial detection record
display_id = detection_result.get("display_id", "unknown")
timestamp = datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
inserted_session_id = node["db_manager"].insert_initial_detection(
display_id=display_id,
captured_timestamp=timestamp,
session_id=generated_session_id
)
if inserted_session_id:
# Update detection_result with the generated session_id for actions and branches
detection_result["session_id"] = inserted_session_id
detection_result["timestamp"] = timestamp # Update with proper timestamp
logger.info(f"Created initial database record with session_id: {inserted_session_id}")
else:
logger.debug(f"Database record not created - missing required classes. Has Car: {has_car}, Has Frontal: {has_frontal}")
execute_actions(node, frame, detection_result, regions_dict)
# ─── Parallel branch processing ─────────────────────────────
if node["branches"]:
branch_results = {}
# Filter branches that should be triggered
active_branches = []
for br in node["branches"]:
if (best_det["class"] in br["triggerClasses"]
and best_det["confidence"] >= br["minConfidence"]):
# crop if requested
sub = frame
if br["crop"]:
x1,y1,x2,y2 = best_box
sub = frame[y1:y2, x1:x2]
sub = cv2.resize(sub, (224, 224))
trigger_classes = br.get("triggerClasses", [])
min_conf = br.get("minConfidence", 0)
det2, _ = run_pipeline(sub, br, return_bbox=True)
if det2:
# return classification result + original bbox
execute_actions(br, sub, det2)
return (det2, best_box) if return_bbox else det2
logger.debug(f"Evaluating branch {br['modelId']}: trigger_classes={trigger_classes}, min_conf={min_conf}")
# ─── No branch matched → return this detection ─────────────
execute_actions(node, frame, best_det)
return (best_det, best_box) if return_bbox else best_det
# Check if any detected class matches branch trigger
branch_triggered = False
for det_class in regions_dict:
det_confidence = regions_dict[det_class]["confidence"]
logger.debug(f" Checking detected class '{det_class}' (confidence={det_confidence:.3f}) against triggers {trigger_classes}")
if (det_class in trigger_classes and det_confidence >= min_conf):
active_branches.append(br)
branch_triggered = True
logger.info(f"Branch {br['modelId']} activated by class '{det_class}' (conf={det_confidence:.3f} >= {min_conf})")
break
if not branch_triggered:
logger.debug(f"Branch {br['modelId']} not triggered - no matching classes or insufficient confidence")
if active_branches:
if node.get("parallel", False) or any(br.get("parallel", False) for br in active_branches):
# Run branches in parallel
with concurrent.futures.ThreadPoolExecutor(max_workers=len(active_branches)) as executor:
futures = {}
for br in active_branches:
crop_class = br.get("cropClass", br.get("triggerClasses", [])[0] if br.get("triggerClasses") else None)
sub_frame = frame
logger.info(f"Starting parallel branch: {br['modelId']}, crop_class: {crop_class}")
if br.get("crop", False) and crop_class:
cropped = crop_region_by_class(frame, regions_dict, crop_class)
if cropped is not None:
sub_frame = cv2.resize(cropped, (224, 224))
logger.debug(f"Successfully cropped {crop_class} region for {br['modelId']}")
else:
logger.warning(f"Failed to crop {crop_class} region for {br['modelId']}, skipping branch")
continue
future = executor.submit(run_pipeline, sub_frame, br, True, context)
futures[future] = br
# Collect results
for future in concurrent.futures.as_completed(futures):
br = futures[future]
try:
result, _ = future.result()
if result:
branch_results[br["modelId"]] = result
logger.info(f"Branch {br['modelId']} completed: {result}")
except Exception as e:
logger.error(f"Branch {br['modelId']} failed: {e}")
else:
# Run branches sequentially
for br in active_branches:
crop_class = br.get("cropClass", br.get("triggerClasses", [])[0] if br.get("triggerClasses") else None)
sub_frame = frame
logger.info(f"Starting sequential branch: {br['modelId']}, crop_class: {crop_class}")
if br.get("crop", False) and crop_class:
cropped = crop_region_by_class(frame, regions_dict, crop_class)
if cropped is not None:
sub_frame = cv2.resize(cropped, (224, 224))
logger.debug(f"Successfully cropped {crop_class} region for {br['modelId']}")
else:
logger.warning(f"Failed to crop {crop_class} region for {br['modelId']}, skipping branch")
continue
try:
result, _ = run_pipeline(sub_frame, br, True, context)
if result:
branch_results[br["modelId"]] = result
logger.info(f"Branch {br['modelId']} completed: {result}")
else:
logger.warning(f"Branch {br['modelId']} returned no result")
except Exception as e:
logger.error(f"Error in sequential branch {br['modelId']}: {e}")
import traceback
logger.debug(f"Branch error traceback: {traceback.format_exc()}")
# Store branch results in detection_result for parallel actions
detection_result["branch_results"] = branch_results
# ─── Execute Parallel Actions ───────────────────────────────
if node.get("parallelActions") and "branch_results" in detection_result:
execute_parallel_actions(node, frame, detection_result, regions_dict)
# ─── Return detection result ────────────────────────────────
primary_detection = max(all_detections, key=lambda x: x["confidence"])
primary_bbox = primary_detection["bbox"]
# Add branch results to primary detection for compatibility
if "branch_results" in detection_result:
primary_detection["branch_results"] = detection_result["branch_results"]
return (primary_detection, primary_bbox) if return_bbox else primary_detection
except Exception as e:
logging.error(f"Error in node {node.get('modelId')}: {e}")
logger.error(f"Error in node {node.get('modelId')}: {e}")
traceback.print_exc()
return (None, None) if return_bbox else None