diff --git a/.gitignore b/.gitignore index c990ddb..b36f421 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,11 @@ -/models +# Do not know how to use +archive/ +Dockerfile + +# /models app.log *.pt +.venv/ # All pycache directories __pycache__/ @@ -11,5 +16,6 @@ detector_worker.log .gitignore no_frame_debug.log -feeder/ -.venv/ + +# Result from tracker +feeder/runs/ \ No newline at end of file diff --git a/app.py b/app.py index 7cd0407..09cb227 100644 --- a/app.py +++ b/app.py @@ -13,13 +13,7 @@ import requests import asyncio import psutil import zipfile -import ssl -import urllib3 -import subprocess -import tempfile from urllib.parse import urlparse -from requests.adapters import HTTPAdapter -from urllib3.util.ssl_ import create_urllib3_context from fastapi import FastAPI, WebSocket, HTTPException from fastapi.websockets import WebSocketDisconnect from fastapi.responses import Response @@ -246,14 +240,16 @@ async def detect(websocket: WebSocket): logger.debug(f"Processing frame for camera {camera_id} with model {stream['modelId']}") start_time = time.time() - # Extract display identifier for pipeline context + # Extract display identifier for session ID lookup subscription_parts = stream["subscriptionIdentifier"].split(';') display_identifier = subscription_parts[0] if subscription_parts else None + session_id = session_ids.get(display_identifier) if display_identifier else None - # Create context for pipeline execution (session_id will be generated by pipeline) + # Create context for pipeline execution pipeline_context = { "camera_id": camera_id, - "display_id": display_identifier + "display_id": display_identifier, + "session_id": session_id } detection_result = run_pipeline(cropped_frame, model_tree, context=pipeline_context) @@ -263,63 +259,57 @@ async def detect(websocket: WebSocket): # Log the raw detection result for debugging logger.debug(f"Raw detection result for camera {camera_id}:\n{json.dumps(detection_result, indent=2, default=str)}") - # Extract session_id from pipeline result (generated during database record creation) - session_id = None - if detection_result and isinstance(detection_result, dict): - # Check if pipeline generated a session_id (happens when Car+Frontal detected together) - if "session_id" in detection_result: - session_id = detection_result["session_id"] - logger.debug(f"Extracted session_id from pipeline result: {session_id}") - - # Process detection result - run_pipeline returns the primary detection directly - if detection_result and isinstance(detection_result, dict) and "class" in detection_result: - highest_confidence_detection = detection_result - else: - # No detection found + # Direct class result (no detections/classifications structure) + if detection_result and isinstance(detection_result, dict) and "class" in detection_result and "confidence" in detection_result: highest_confidence_detection = { + "class": detection_result.get("class", "none"), + "confidence": detection_result.get("confidence", 1.0), + "box": [0, 0, 0, 0] # Empty bounding box for classifications + } + # Handle case when no detections found or result is empty + elif not detection_result or not detection_result.get("detections"): + # Check if we have classification results + if detection_result and detection_result.get("classifications"): + # Get the highest confidence classification + classifications = detection_result.get("classifications", []) + highest_confidence_class = max(classifications, key=lambda x: x.get("confidence", 0)) if classifications else None + + if highest_confidence_class: + highest_confidence_detection = { + "class": highest_confidence_class.get("class", "none"), + "confidence": highest_confidence_class.get("confidence", 1.0), + "box": [0, 0, 0, 0] # Empty bounding box for classifications + } + else: + highest_confidence_detection = { + "class": "none", + "confidence": 1.0, + "box": [0, 0, 0, 0] + } + else: + highest_confidence_detection = { + "class": "none", + "confidence": 1.0, + "box": [0, 0, 0, 0] + } + else: + # Find detection with highest confidence + detections = detection_result.get("detections", []) + highest_confidence_detection = max(detections, key=lambda x: x.get("confidence", 0)) if detections else { "class": "none", "confidence": 1.0, - "bbox": [0, 0, 0, 0], - "branch_results": {} + "box": [0, 0, 0, 0] } - # Convert detection format to match backend expectations exactly as in worker.md section 4.2 - detection_dict = { - "carModel": None, - "carBrand": None, - "carYear": None, - "bodyType": None, - "licensePlateText": None, - "licensePlateConfidence": None - } + # Convert detection format to match protocol - flatten detection attributes + detection_dict = {} - # Extract and process branch results from parallel classification - branch_results = highest_confidence_detection.get("branch_results", {}) - if branch_results: - logger.debug(f"Processing branch results: {branch_results}") - - # Transform branch results into backend-expected detection attributes - for branch_id, branch_data in branch_results.items(): - if isinstance(branch_data, dict): - logger.debug(f"Processing branch {branch_id}: {branch_data}") - - # Map common classification fields to backend-expected names - if "brand" in branch_data: - detection_dict["carBrand"] = branch_data["brand"] - if "body_type" in branch_data: - detection_dict["bodyType"] = branch_data["body_type"] - if "class" in branch_data: - class_name = branch_data["class"] - - # Map based on branch/model type - if "brand" in branch_id.lower(): - detection_dict["carBrand"] = class_name - elif "bodytype" in branch_id.lower() or "body" in branch_id.lower(): - detection_dict["bodyType"] = class_name - - logger.info(f"Detection payload after branch processing: {detection_dict}") - else: - logger.debug("No branch results found in detection result") + # Handle different detection result formats + if isinstance(highest_confidence_detection, dict): + # Copy all fields from the detection result + for key, value in highest_confidence_detection.items(): + if key not in ["box", "id"]: # Skip internal fields + detection_dict[key] = value detection_data = { "type": "imageDetection", @@ -332,14 +322,12 @@ async def detect(websocket: WebSocket): } } - # Add session ID if available (generated by pipeline when Car+Frontal detected) + # Add session ID if available if session_id is not None: detection_data["sessionId"] = session_id - logger.debug(f"Added session_id to WebSocket response: {session_id}") - if highest_confidence_detection.get("class") != "none": - confidence = highest_confidence_detection.get("confidence", 0.0) - logger.info(f"Camera {camera_id}: Detected {highest_confidence_detection['class']} with confidence {confidence:.2f} using model {stream['modelName']}") + if highest_confidence_detection["class"] != "none": + logger.info(f"Camera {camera_id}: Detected {highest_confidence_detection['class']} with confidence {highest_confidence_detection['confidence']:.2f} using model {stream['modelName']}") # Log session ID if available if session_id: @@ -347,7 +335,6 @@ async def detect(websocket: WebSocket): await websocket.send_json(detection_data) logger.debug(f"Sent detection data to client for camera {camera_id}") - logger.debug(f"Sent this detection data: {detection_data}") return persistent_data except Exception as e: logger.error(f"Error in handle_detection for camera {camera_id}: {str(e)}", exc_info=True) @@ -513,199 +500,6 @@ async def detect(websocket: WebSocket): finally: logger.info(f"Snapshot reader thread for camera {camera_id} is exiting") - async def reconcile_subscriptions(desired_subscriptions, websocket): - """ - Declarative reconciliation: Compare desired vs current subscriptions and make changes - """ - logger.info(f"Reconciling subscriptions: {len(desired_subscriptions)} desired") - - with streams_lock: - # Get current subscriptions - current_subscription_ids = set(streams.keys()) - desired_subscription_ids = set(sub["subscriptionIdentifier"] for sub in desired_subscriptions) - - # Find what to add and remove - to_add = desired_subscription_ids - current_subscription_ids - to_remove = current_subscription_ids - desired_subscription_ids - to_check_for_changes = current_subscription_ids & desired_subscription_ids - - logger.info(f"Reconciliation: {len(to_add)} to add, {len(to_remove)} to remove, {len(to_check_for_changes)} to check for changes") - - # Remove subscriptions that are no longer wanted - for subscription_id in to_remove: - await unsubscribe_internal(subscription_id) - - # Check existing subscriptions for parameter changes - for subscription_id in to_check_for_changes: - desired_sub = next(sub for sub in desired_subscriptions if sub["subscriptionIdentifier"] == subscription_id) - current_stream = streams[subscription_id] - - # Check if parameters changed - if has_subscription_changed(desired_sub, current_stream): - logger.info(f"Parameters changed for {subscription_id}, resubscribing") - await unsubscribe_internal(subscription_id) - await subscribe_internal(desired_sub, websocket) - - # Add new subscriptions - for subscription_id in to_add: - desired_sub = next(sub for sub in desired_subscriptions if sub["subscriptionIdentifier"] == subscription_id) - await subscribe_internal(desired_sub, websocket) - - def has_subscription_changed(desired_sub, current_stream): - """Check if subscription parameters have changed""" - return ( - desired_sub.get("rtspUrl") != current_stream.get("rtsp_url") or - desired_sub.get("snapshotUrl") != current_stream.get("snapshot_url") or - desired_sub.get("snapshotInterval") != current_stream.get("snapshot_interval") or - desired_sub.get("cropX1") != current_stream.get("cropX1") or - desired_sub.get("cropY1") != current_stream.get("cropY1") or - desired_sub.get("cropX2") != current_stream.get("cropX2") or - desired_sub.get("cropY2") != current_stream.get("cropY2") or - desired_sub.get("modelId") != current_stream.get("modelId") or - desired_sub.get("modelName") != current_stream.get("modelName") - ) - - async def subscribe_internal(subscription, websocket): - """Internal subscription logic extracted from original subscribe handler""" - subscriptionIdentifier = subscription.get("subscriptionIdentifier") - rtsp_url = subscription.get("rtspUrl") - snapshot_url = subscription.get("snapshotUrl") - snapshot_interval = subscription.get("snapshotInterval") - model_url = subscription.get("modelUrl") - modelId = subscription.get("modelId") - modelName = subscription.get("modelName") - cropX1 = subscription.get("cropX1") - cropY1 = subscription.get("cropY1") - cropX2 = subscription.get("cropX2") - cropY2 = subscription.get("cropY2") - - # Extract camera_id from subscriptionIdentifier - parts = subscriptionIdentifier.split(';') - if len(parts) != 2: - logger.error(f"Invalid subscriptionIdentifier format: {subscriptionIdentifier}") - return - - display_identifier, camera_identifier = parts - camera_id = subscriptionIdentifier - - # Load model if needed - if model_url: - with models_lock: - if (camera_id not in models) or (modelId not in models[camera_id]): - logger.info(f"Loading model from {model_url} for camera {camera_id}, modelId {modelId}") - extraction_dir = os.path.join("models", camera_identifier, str(modelId)) - os.makedirs(extraction_dir, exist_ok=True) - - # Handle model loading (same as original) - parsed = urlparse(model_url) - if parsed.scheme in ("http", "https"): - filename = os.path.basename(parsed.path) or f"model_{modelId}.mpta" - local_mpta = os.path.join(extraction_dir, filename) - local_path = download_mpta(model_url, local_mpta) - if not local_path: - logger.error(f"Failed to download model from {model_url}") - return - model_tree = load_pipeline_from_zip(local_path, extraction_dir) - else: - if not os.path.exists(model_url): - logger.error(f"Model file not found: {model_url}") - return - model_tree = load_pipeline_from_zip(model_url, extraction_dir) - - if model_tree is None: - logger.error(f"Failed to load model {modelId}") - return - - if camera_id not in models: - models[camera_id] = {} - models[camera_id][modelId] = model_tree - - # Create stream (same logic as original) - if camera_id and (rtsp_url or snapshot_url) and len(streams) < max_streams: - camera_url = snapshot_url if snapshot_url else rtsp_url - - # Check if we already have a stream for this camera URL - shared_stream = camera_streams.get(camera_url) - - if shared_stream: - # Reuse existing stream - buffer = shared_stream["buffer"] - stop_event = shared_stream["stop_event"] - thread = shared_stream["thread"] - mode = shared_stream["mode"] - shared_stream["ref_count"] = shared_stream.get("ref_count", 0) + 1 - else: - # Create new stream - buffer = queue.Queue(maxsize=1) - stop_event = threading.Event() - - if snapshot_url and snapshot_interval: - thread = threading.Thread(target=snapshot_reader, args=(camera_id, snapshot_url, snapshot_interval, buffer, stop_event)) - thread.daemon = True - thread.start() - mode = "snapshot" - shared_stream = { - "buffer": buffer, "thread": thread, "stop_event": stop_event, - "mode": mode, "url": snapshot_url, "snapshot_interval": snapshot_interval, "ref_count": 1 - } - camera_streams[camera_url] = shared_stream - elif rtsp_url: - cap = cv2.VideoCapture(rtsp_url) - if not cap.isOpened(): - logger.error(f"Failed to open RTSP stream for camera {camera_id}") - return - thread = threading.Thread(target=frame_reader, args=(camera_id, cap, buffer, stop_event)) - thread.daemon = True - thread.start() - mode = "rtsp" - shared_stream = { - "buffer": buffer, "thread": thread, "stop_event": stop_event, - "mode": mode, "url": rtsp_url, "cap": cap, "ref_count": 1 - } - camera_streams[camera_url] = shared_stream - else: - logger.error(f"No valid URL provided for camera {camera_id}") - return - - # Create stream info - stream_info = { - "buffer": buffer, "thread": thread, "stop_event": stop_event, - "modelId": modelId, "modelName": modelName, "subscriptionIdentifier": subscriptionIdentifier, - "cropX1": cropX1, "cropY1": cropY1, "cropX2": cropX2, "cropY2": cropY2, - "mode": mode, "camera_url": camera_url, "modelUrl": model_url - } - - if mode == "snapshot": - stream_info["snapshot_url"] = snapshot_url - stream_info["snapshot_interval"] = snapshot_interval - elif mode == "rtsp": - stream_info["rtsp_url"] = rtsp_url - stream_info["cap"] = shared_stream["cap"] - - streams[camera_id] = stream_info - subscription_to_camera[camera_id] = camera_url - logger.info(f"Subscribed to camera {camera_id}") - - async def unsubscribe_internal(subscription_id): - """Internal unsubscription logic""" - if subscription_id in streams: - stream = streams.pop(subscription_id) - camera_url = subscription_to_camera.pop(subscription_id, None) - - if camera_url and camera_url in camera_streams: - shared_stream = camera_streams[camera_url] - shared_stream["ref_count"] -= 1 - - if shared_stream["ref_count"] <= 0: - shared_stream["stop_event"].set() - shared_stream["thread"].join() - if "cap" in shared_stream: - shared_stream["cap"].release() - del camera_streams[camera_url] - - latest_frames.pop(subscription_id, None) - logger.info(f"Unsubscribed from camera {subscription_id}") - async def process_streams(): logger.info("Started processing streams") try: @@ -773,10 +567,6 @@ async def detect(websocket: WebSocket): "modelId": stream["modelId"], "modelName": stream["modelName"], "online": True, - # Include all subscription parameters for proper change detection - "rtspUrl": stream.get("rtsp_url"), - "snapshotUrl": stream.get("snapshot_url"), - "snapshotInterval": stream.get("snapshot_interval"), **{k: v for k, v in get_crop_coords(stream).items() if v is not None} } for camera_id, stream in streams.items() @@ -805,44 +595,29 @@ async def detect(websocket: WebSocket): data = json.loads(msg) msg_type = data.get("type") - if msg_type == "setSubscriptionList": - # Declarative approach: Backend sends list of subscriptions this worker should have - desired_subscriptions = data.get("subscriptions", []) - logger.info(f"Received subscription list with {len(desired_subscriptions)} subscriptions") - - await reconcile_subscriptions(desired_subscriptions, websocket) - - elif msg_type == "subscribe": - # Legacy support - convert single subscription to list - payload = data.get("payload", {}) - await reconcile_subscriptions([payload], websocket) - - elif msg_type == "unsubscribe": - # Legacy support - remove subscription + if msg_type == "subscribe": payload = data.get("payload", {}) subscriptionIdentifier = payload.get("subscriptionIdentifier") - # Remove from current subscriptions and reconcile - current_subs = [] - with streams_lock: - for camera_id, stream in streams.items(): - if stream["subscriptionIdentifier"] != subscriptionIdentifier: - # Convert stream back to subscription format - current_subs.append({ - "subscriptionIdentifier": stream["subscriptionIdentifier"], - "rtspUrl": stream.get("rtsp_url"), - "snapshotUrl": stream.get("snapshot_url"), - "snapshotInterval": stream.get("snapshot_interval"), - "modelId": stream["modelId"], - "modelName": stream["modelName"], - "modelUrl": stream.get("modelUrl", ""), - "cropX1": stream.get("cropX1"), - "cropY1": stream.get("cropY1"), - "cropX2": stream.get("cropX2"), - "cropY2": stream.get("cropY2") - }) - await reconcile_subscriptions(current_subs, websocket) + rtsp_url = payload.get("rtspUrl") + snapshot_url = payload.get("snapshotUrl") + snapshot_interval = payload.get("snapshotInterval") + model_url = payload.get("modelUrl") + modelId = payload.get("modelId") + modelName = payload.get("modelName") + cropX1 = payload.get("cropX1") + cropY1 = payload.get("cropY1") + cropX2 = payload.get("cropX2") + cropY2 = payload.get("cropY2") + + # Extract camera_id from subscriptionIdentifier (format: displayIdentifier;cameraIdentifier) + parts = subscriptionIdentifier.split(';') + if len(parts) != 2: + logger.error(f"Invalid subscriptionIdentifier format: {subscriptionIdentifier}") + continue - elif msg_type == "old_subscribe_logic_removed": + display_identifier, camera_identifier = parts + camera_id = subscriptionIdentifier # Use full subscriptionIdentifier as camera_id for mapping + if model_url: with models_lock: if (camera_id not in models) or (modelId not in models[camera_id]): @@ -1038,10 +813,6 @@ async def detect(websocket: WebSocket): "modelId": stream["modelId"], "modelName": stream["modelName"], "online": True, - # Include all subscription parameters for proper change detection - "rtspUrl": stream.get("rtsp_url"), - "snapshotUrl": stream.get("snapshot_url"), - "snapshotInterval": stream.get("snapshot_interval"), **{k: v for k, v in get_crop_coords(stream).items() if v is not None} } for camera_id, stream in streams.items() diff --git a/debug/cuda.py b/debug/cuda.py new file mode 100644 index 0000000..44265e1 --- /dev/null +++ b/debug/cuda.py @@ -0,0 +1,4 @@ +import torch +print(torch.cuda.is_available()) # True if CUDA is available +print(torch.cuda.get_device_name(0)) # GPU name +print(torch.version.cuda) # CUDA version PyTorch was compiled with \ No newline at end of file diff --git a/docs/MasterElection.md b/docs/MasterElection.md deleted file mode 100644 index c5980b8..0000000 --- a/docs/MasterElection.md +++ /dev/null @@ -1,1449 +0,0 @@ -# Master Election Service Specification - Distributed Process Coordination - -## Overview - -The MasterElection service implements a Redis-based distributed leadership election and process coordination system for the CMS backend cluster. This service provides robust master-slave coordination with automatic failover, process registration, and TTL-based cleanup for multi-process backend deployments. - -**Key Architectural Principle**: Redis-based coordination with atomic Lua scripts ensures consistency and prevents split-brain scenarios while providing automatic cleanup through per-entry TTL expiration. - -## Architecture Components - -### Two-Tier Process Coordination - -The system manages two distinct coordination layers: - -1. **Master Election Layer**: Single leader election across all backend processes -2. **Process Registry Layer**: Individual process registration and heartbeat management - -### Leadership Election Pattern - -- **Single Master**: Only one backend process holds master lock at any time -- **Automatic Failover**: Master election triggers immediately when current master fails -- **Heartbeat-Based**: Master must renew lock every 10 seconds or lose leadership -- **Lua Script Atomicity**: All Redis operations use atomic Lua scripts to prevent race conditions -- **Event-Driven Transitions**: Role changes emit events for dependent services integration - -## Core Components - -### MasterElection Class -`cms-backend/services/MasterElection.ts` - -Primary coordination service that handles distributed leadership election and process lifecycle management. - -**Key Responsibilities:** -- Manages master lock acquisition and renewal using atomic Redis operations -- Provides process registration with automatic TTL-based expiration (45 seconds) -- Emits role transition events for dependent service coordination -- Handles slave registration and heartbeat management -- Maintains process-to-channel mapping for message routing - -### Process Management System - -**Process Registration:** -- Each backend process registers with unique UUID-based identifier -- Process metadata includes role, channel name, and capabilities -- TTL-based expiration (45 seconds) with heartbeat renewal -- Automatic cleanup of stale process entries without manual intervention - -**Channel Assignment:** -- Each process gets assigned a unique Redis pub/sub channel -- Channel mapping stored persistently for message routing -- Master process maintains channel-to-process mapping - -## Data Structures - -### MasterElectionEvents -```typescript -interface MasterElectionEvents { - 'master-acquired': () => void; // This process became master - 'master-lost': () => void; // This process lost master status - 'election-started': () => void; // Election process initiated - 'election-completed': (isMaster: boolean) => void; // Election finished - 'slave-registered': (slave: SlaveNode) => void; // New slave joined - 'slave-removed': (nodeId: string) => void; // Slave left/expired - 'error': (error: Error) => void; // Election/coordination errors -} -``` - -### ProcessInfo -```typescript -interface ProcessInfo { - processId: string; // Unique process identifier (UUID) - nodeId: string; // Node identifier (same as processId) - role: 'master' | 'slave'; // Current process role - lastSeen: string; // Last heartbeat timestamp (ISO string) - capabilities: ProcessCapabilities; // Process feature capabilities -} - -// Channel name derived as: `worker:slave:${processInfo.processId}` -``` - -### ProcessCapabilities -```typescript -interface ProcessCapabilities { - canProcessDetections: boolean; // Can handle AI detection processing - maxSubscriptions: number; // Maximum camera subscriptions supported - preferredWorkload: number; // Preferred subscription load (0-100) -} -``` - -### SlaveNode -```typescript -interface SlaveNode { - nodeId: string; // Unique slave node identifier - identifier: string; // Human-readable process identifier - registeredAt: string; // Initial registration timestamp - lastSeen: string; // Last heartbeat timestamp - metadata?: Record; // Optional process metadata -} -``` - -## Redis Data Architecture - -### Master Election Keys -- `master-election:master` - Current master process identifier with TTL lock -- `master-election:heartbeat` - Master heartbeat timestamp for liveness detection -- `master-election:master_process` - Detailed master process information (JSON) - -### Process Registry Keys (TTL-Enabled) -- `master-election:processes` - Hash map of all active processes with per-entry TTL (45s) -- Channel names derived directly from process ID: `worker:slave:{processId}` - no separate mapping needed - -### TTL Configuration -```typescript -// Per-entry TTL using hSetEx for automatic cleanup -PROCESS_TTL = 45; // Process registration expires after 45 seconds -HEARTBEAT_RENEWAL_INTERVAL = 10; // Process heartbeats renew TTL every 10 seconds -MASTER_LOCK_TTL = 30; // Master lock expires after 30 seconds -``` - -### Data Persistence Strategy -Uses **per-entry TTL with hSetEx** for automatic cleanup: -- Process entries automatically expire if heartbeats stop -- No manual cleanup processes required -- Prevents memory leaks from crashed processes -- Self-healing system that maintains only active processes -- Slave information derived from processes with role='slave' - no separate storage needed -- Channel names derived directly from process ID - no mapping table required - -## Master Election Algorithm - -### Election Flow Diagram - -```mermaid -graph TB - subgraph "Election Process" - START[Process Starts] --> ATTEMPT[attemptElection] - ATTEMPT --> ACQUIRE{acquireMasterLock} - - ACQUIRE -->|Success| MASTER[becomeMaster] - ACQUIRE -->|Failed| SLAVE[becomeSlave] - - MASTER --> HEARTBEAT[startHeartbeat] - SLAVE --> REGISTER[registerAsSlave] - - HEARTBEAT --> RENEW{renewMasterLock} - RENEW -->|Success| CONTINUE[Continue as Master] - RENEW -->|Failed| STEPDOWN[Step Down → SLAVE] - - REGISTER --> MONITOR[Monitor Master] - MONITOR --> CHECK{Master Exists?} - CHECK -->|Yes| WAIT[Wait and Monitor] - CHECK -->|No| ATTEMPT - - STEPDOWN --> SLAVE - WAIT --> MONITOR - CONTINUE --> RENEW - end - - subgraph "Atomic Operations" - ACQUIRE --> LUA1[Lua Script: SET master NX + SET heartbeat] - RENEW --> LUA2[Lua Script: Check owner + PEXPIRE + SET heartbeat] - STEPDOWN --> LUA3[Lua Script: Check owner + DEL master + DEL heartbeat] - end -``` - -### Atomic Lock Operations - -#### Master Lock Acquisition -```lua --- Atomic master lock acquisition with heartbeat -if redis.call("SET", KEYS[1], ARGV[1], "NX", "PX", ARGV[2]) then - redis.call("SET", KEYS[2], ARGV[3], "PX", ARGV[2]) - return 1 -else - return 0 -end -``` - -#### Master Lock Renewal -```lua --- Atomic master lock renewal with heartbeat update -if redis.call("GET", KEYS[1]) == ARGV[1] then - redis.call("PEXPIRE", KEYS[1], ARGV[2]) - redis.call("SET", KEYS[2], ARGV[3], "PX", ARGV[2]) - return 1 -else - return 0 -end -``` - -#### Master Lock Release -```lua --- Atomic master lock release -if redis.call("GET", KEYS[1]) == ARGV[1] then - redis.call("DEL", KEYS[1], KEYS[2]) - return 1 -else - return 0 -end -``` - -## Process Lifecycle Management - -### Process Registration Flow - -```mermaid -sequenceDiagram - participant P as Process - participant R as Redis - participant M as Master Process - - Note over P,M: Process Registration with TTL - - P->>+P: Generate UUID processId - P->>+P: Determine role (master/slave) - P->>+P: Assign channel name - - P->>+R: hSetEx(processes, processId, processInfo, {EX: 45}) - R-->>-P: Registration confirmed - - P->>+R: hSet(channels, processId, channelName) - R-->>-P: Channel mapping stored - - alt Process becomes master - P->>+R: set(master_process, processInfo) - R-->>-P: Master process registered - P->>+M: emit('master-acquired') - else Process becomes slave - P->>+R: hSet(slaves, nodeId, slaveInfo) - R-->>-P: Slave registered - P->>+M: emit('slave-registered', slaveInfo) - end - - Note over P,M: Heartbeat Loop (Every 10s) - - loop Every 10 seconds - P->>+P: updateProcessHeartbeat(processId) - P->>+R: hSetEx(processes, processId, updatedInfo, {EX: 45}) - Note over R: TTL renewed for 45 seconds - R-->>-P: Heartbeat recorded - end - - Note over P,M: Automatic Expiration (No heartbeat) - - R->>R: 45 seconds pass without heartbeat - R->>R: Process entry automatically expires - Note over R: No manual cleanup needed -``` - -### Master Election Scenarios - -#### Scenario 1: Initial Startup -```mermaid -sequenceDiagram - participant P1 as Process 1 - participant P2 as Process 2 - participant R as Redis - - Note over P1,R: First Process Startup - - P1->>+P1: attemptElection() - P1->>+R: Lua Script: SET master NX - R-->>-P1: Success (no existing master) - - P1->>+P1: becomeMaster() - P1->>+P1: emit('master-acquired') - P1->>+P1: startHeartbeat() every 10s - - Note over P1,R: Second Process Startup - - P2->>+P2: attemptElection() - P2->>+R: Lua Script: SET master NX - R-->>-P2: Failed (master exists) - - P2->>+P2: becomeSlave() - P2->>+R: hSet(slaves, nodeId, slaveInfo) - P2->>+P2: emit('election-completed', false) -``` - -#### Scenario 2: Master Failure and Failover -```mermaid -sequenceDiagram - participant P1 as Master Process - participant P2 as Slave Process 1 - participant P3 as Slave Process 2 - participant R as Redis - - Note over P1,R: Normal Operation - - P1->>+R: Heartbeat renewal every 10s - P2->>+P2: Monitor master existence every 5s - P3->>+P3: Monitor master existence every 5s - - Note over P1,R: Master Failure - - P1--XP1: Process crashes/network failure - - Note over R: Master lock expires after 30s - - R->>R: Master lock TTL expires - - Note over P2,R: Slave Detects Missing Master - - P2->>+R: checkMasterExists() Lua Script - R-->>-P2: Master not found or stale - - P2->>+P2: Random delay (0-2s) to reduce collisions - P2->>+R: attemptElection() - Lua Script: SET master NX - R-->>-P2: Success - became new master - - P2->>+P2: becomeMaster() - P2->>+P2: emit('master-acquired') - - Note over P3,R: Other Slave Detects New Master - - P3->>+R: checkMasterExists() - R-->>-P3: New master found - P3->>+P3: Continue as slave - no election needed -``` - -## TTL-Based Cleanup System - -### Per-Entry TTL Implementation - -```typescript -// Process registration with automatic TTL expiration -public async registerProcess(processInfo: ProcessInfo): Promise { - // Set process registration with 45 second TTL per entry - await redisClient.hSetEx( - this.processesKey, - { - [processInfo.processId]: JSON.stringify(processInfo) - }, - { - expiration: { - type: 'EX', - value: 45 // 45 second TTL per process entry - } - } - ); - - // Map process to channel (no TTL - cleaned up manually) - await redisClient.hSet( - this.processChannelsKey, - processInfo.processId, - processInfo.channelName - ); -} - -// Heartbeat renewal extends TTL automatically -public async updateProcessHeartbeat(processId: string): Promise { - const processData = await redisClient.hGet(this.processesKey, processId); - if (processData) { - const processInfo: ProcessInfo = JSON.parse(processData); - processInfo.lastSeen = new Date().toISOString(); - - // Update process and renew TTL on heartbeat (per-entry TTL) - await redisClient.hSetEx( - this.processesKey, - { - [processId]: JSON.stringify(processInfo) - }, - { - expiration: { - type: 'EX', - value: 45 // Renew 45 second TTL for this specific process entry - } - } - ); - } -} -``` - -### Cleanup Behavior - -```mermaid -graph TB - subgraph "TTL Cleanup Process" - REG[Process Registration] --> TTL[45s TTL Set] - TTL --> HB{Heartbeat Within 45s?} - - HB -->|Yes| RENEW[TTL Renewed to 45s] - HB -->|No| EXPIRE[Entry Automatically Expires] - - RENEW --> HB - EXPIRE --> GONE[Process Removed from Redis] - - GONE --> DETECT[Other Processes Detect Absence] - DETECT --> REBALANCE[Automatic Rebalancing] - end - - subgraph "Manual vs TTL Cleanup" - MANUAL[Manual Cleanup Process] - AUTOMATIC[TTL-Based Cleanup] - - MANUAL -.->|"❌ Complex"| ISSUES[Race Conditions
Memory Leaks
Stale Data] - AUTOMATIC -.->|"✅ Simple"| BENEFITS[Self-Healing
No Race Conditions
Guaranteed Cleanup] - end -``` - -## Event System Architecture - -### Event Emission Flow - -```mermaid -graph TD - subgraph "Election Events" - START[Election Started] --> ATTEMPT[Attempt Lock Acquisition] - ATTEMPT --> SUCCESS{Lock Acquired?} - - SUCCESS -->|Yes| MASTER[Become Master] - SUCCESS -->|No| SLAVE[Become Slave] - - MASTER --> MASTER_EVENT[emit('master-acquired')] - SLAVE --> SLAVE_EVENT[emit('election-completed', false)] - - MASTER_EVENT --> HEARTBEAT[Start Heartbeat Loop] - SLAVE_EVENT --> MONITOR[Start Master Monitoring] - end - - subgraph "Heartbeat Events" - HEARTBEAT --> RENEW{Renew Lock?} - RENEW -->|Success| CONTINUE[Continue as Master] - RENEW -->|Failed| LOST[emit('master-lost')] - - LOST --> STEPDOWN[Step Down to Slave] - STEPDOWN --> TRIGGER[Trigger New Election] - CONTINUE --> HEARTBEAT - end - - subgraph "Slave Management Events" - SLAVE_JOIN[New Slave Joins] --> SLAVE_REG[emit('slave-registered')] - SLAVE_TIMEOUT[Slave Heartbeat Timeout] --> SLAVE_REM[emit('slave-removed')] - - SLAVE_REG --> NOTIFY[Notify Dependent Services] - SLAVE_REM --> CLEANUP[Cleanup Assignments] - end -``` - -### Event Handler Integration - -```typescript -// Example: Camera module integration with MasterElection events -const masterElection = getMasterElection(); - -masterElection.on('master-acquired', () => { - // This process became master - start managing workers - masterSlaveWorkerCluster.becomeMaster(); - logger.info('Camera cluster: Became master, connecting to workers'); -}); - -masterElection.on('master-lost', () => { - // This process lost master status - become slave - masterSlaveWorkerCluster.becomeSlave(); - logger.info('Camera cluster: Became slave, disconnecting workers'); -}); - -masterElection.on('slave-registered', (slave: SlaveNode) => { - // New backend process joined - rebalance workload - masterSlaveWorkerCluster.handleSlaveJoined(slave); - logger.info(`Camera cluster: Slave ${slave.nodeId} joined`); -}); - -masterElection.on('slave-removed', (nodeId: string) => { - // Backend process left - reassign its workload - masterSlaveWorkerCluster.handleSlaveLeft(nodeId); - logger.info(`Camera cluster: Slave ${nodeId} removed`); -}); -``` - -## Process Coordination Patterns - -### Master Role Responsibilities - -```mermaid -graph TB - subgraph "Master Process Duties" - LOCK[Maintain Master Lock] --> HEARTBEAT[Send Heartbeats Every 10s] - HEARTBEAT --> MONITOR[Monitor All Slave Processes] - - MONITOR --> CLEANUP[Cleanup Stale Slave Entries] - CLEANUP --> BALANCE[Coordinate Resource Balancing] - - BALANCE --> WORKERS[Manage Worker Connections] - WORKERS --> ROUTE[Route Messages to Slaves] - - ROUTE --> STATUS[Provide Cluster Status] - STATUS --> LOCK - end - - subgraph "Master Failure Scenarios" - NETWORK[Network Partition] --> TIMEOUT[Lock Renewal Timeout] - CRASH[Process Crash] --> TIMEOUT - OVERLOAD[Resource Overload] --> TIMEOUT - - TIMEOUT --> EXPIRE[Master Lock Expires] - EXPIRE --> ELECTION[New Election Triggered] - ELECTION --> RECOVER[New Master Elected] - end -``` - -### Slave Role Responsibilities - -```mermaid -graph TB - subgraph "Slave Process Duties" - REGISTER[Register with Master Election] --> HEARTBEAT[Send Heartbeats Every 5s] - HEARTBEAT --> MONITOR[Monitor Master Existence] - - MONITOR --> PROCESS[Process Assigned Messages] - PROCESS --> REPORT[Report Status to Master] - - REPORT --> DETECT{Master Missing?} - DETECT -->|No| MONITOR - DETECT -->|Yes| ELECTION[Trigger Election] - - ELECTION --> ATTEMPT{Win Election?} - ATTEMPT -->|Yes| PROMOTE[Become Master] - ATTEMPT -->|No| CONTINUE[Continue as Slave] - - PROMOTE --> MASTER[Master Role Duties] - CONTINUE --> REGISTER - end -``` - -## Class Responsibilities Overview - -### Core Class Functions - -| Class | Primary Responsibility | Key Methods | Process Type | -|-------|----------------------|-------------|--------------| -| **MasterElection** | Distributed coordination and leadership election | • `start()` - Initialize election process
• `attemptElection()` - Try to acquire master lock
• `becomeMaster()` - Transition to master role
• `becomeSlave()` - Transition to slave role
• `waitForElectionComplete()` - Synchronous election waiting | Both Master & Slave | -| **Process Registry** | Process lifecycle management | • `registerProcess()` - Register with TTL
• `updateProcessHeartbeat()` - Renew TTL
• `getAllProcesses()` - Get active processes
• `getProcessesByRole()` - Filter by master/slave
• `unregisterProcess()` - Manual cleanup | Both Master & Slave | -| **Master Lock Manager** | Atomic lock operations | • `acquireMasterLock()` - Lua script lock acquisition
• `renewMasterLock()` - Lua script lock renewal
• `releaseMasterLock()` - Lua script lock release
• `checkMasterExists()` - Lua script master validation | Both Master & Slave | -| **Slave Management** | Slave registration and monitoring | • `registerAsSlave()` - Register as slave node
• `updateSlaveHeartbeat()` - Update slave status
• `cleanupStaleSlaves()` - Remove expired slaves
• `getSlaves()` - Get all registered slaves | Both Master & Slave | - -## Object Relationship Diagrams - -### Core Class Structure and Dependencies - -```mermaid -classDiagram - class MasterElection { - -nodeId: string - -identifier: string - -isMaster: boolean - -lockTtl: number - -heartbeatInterval: number - +start() - +stop() - +getIsMaster(): boolean - +getNodeId(): string - +waitForElectionComplete(): Promise~boolean~ - -attemptElection() - -acquireMasterLock(): Promise~boolean~ - -renewMasterLock(): Promise~boolean~ - -releaseMasterLock() - -becomeMaster() - -becomeSlave() - -checkMasterExists(): Promise~boolean~ - } - - class ProcessRegistry { - +registerProcess(processInfo) - +updateProcessHeartbeat(processId) - +getAllProcesses(): Promise~ProcessInfo[]~ - +getMasterProcess(): Promise~ProcessInfo~ - +getProcessesByRole(role): Promise~ProcessInfo[]~ - +unregisterProcess(processId) - +getProcessChannel(processId): Promise~string~ - } - - class SlaveManagement { - +registerAsSlave() - +unregisterFromSlaves() - +updateSlaveHeartbeat() - +getSlaves(): Promise~SlaveNode[]~ - +getSlave(nodeId): Promise~SlaveNode~ - +getSlaveCount(): Promise~number~ - -cleanupStaleSlaves() - -startSlaveManagement() - -stopSlaveManagement() - } - - class EventEmitter { - +on(event, listener) - +emit(event, ...args) - +once(event, listener) - +off(event, listener) - } - - MasterElection --|> EventEmitter : extends - MasterElection --* ProcessRegistry : contains - MasterElection --* SlaveManagement : contains - - MasterElection --> Redis : uses for coordination - ProcessRegistry --> Redis : uses hSetEx for TTL - SlaveManagement --> Redis : uses for slave state -``` - -### Redis Operations and Key Management - -```mermaid -graph TB - subgraph "Redis Key Structure" - MASTER[master-election:master
String - Current master ID with TTL] - HEARTBEAT[master-election:heartbeat
String - Master heartbeat timestamp] - MASTER_PROC[master-election:master_process
String - Master ProcessInfo JSON] - - PROCESSES[master-election:processes
Hash - ProcessInfo with per-entry TTL] - CHANNELS[master-election:channels
Hash - ProcessID → Channel mapping] - SLAVES[master-election:slaves
Hash - SlaveNode data] - end - - subgraph "Atomic Operations" - LUA1[Master Acquisition
SET master NX + SET heartbeat] - LUA2[Master Renewal
Check owner + PEXPIRE + SET heartbeat] - LUA3[Master Release
Check owner + DEL master + heartbeat] - LUA4[Master Check
GET master + GET heartbeat + validate TTL] - end - - subgraph "TTL Operations" - HSETEX1[Process Registration
hSetEx with 45s TTL per entry] - HSETEX2[Heartbeat Renewal
hSetEx renews TTL to 45s] - AUTO[Automatic Expiration
Redis removes expired entries] - end - - MASTER --> LUA1 - MASTER --> LUA2 - MASTER --> LUA3 - HEARTBEAT --> LUA1 - HEARTBEAT --> LUA2 - HEARTBEAT --> LUA4 - - PROCESSES --> HSETEX1 - PROCESSES --> HSETEX2 - PROCESSES --> AUTO -``` - -## Method Call Flow Analysis - -### Election and Role Transition Flow - -```mermaid -sequenceDiagram - participant App as Application - participant ME as MasterElection - participant R as Redis - participant Dep as Dependent Services - - Note over App,Dep: Election Initialization - - App->>+ME: start() - ME->>+ME: attemptElection() - ME->>+ME: emit('election-started') - - ME->>+R: Lua Script: acquireMasterLock() - - alt Lock acquired successfully - R-->>-ME: Success (1) - ME->>+ME: becomeMaster() - ME->>+ME: startHeartbeat() - every 10s - ME->>+ME: startSlaveManagement() - ME->>+Dep: emit('master-acquired') - ME->>+ME: emit('election-completed', true) - else Lock acquisition failed - R-->>-ME: Failed (0) - ME->>+ME: becomeSlave() - ME->>+R: hSet(slaves, nodeId, slaveInfo) - ME->>+ME: startPeriodicCheck() - every 5s - ME->>+Dep: emit('election-completed', false) - end - - Note over App,Dep: Heartbeat and Monitoring Loop - - loop Every 10 seconds (Master) / 5 seconds (Slave) - alt Process is Master - ME->>+R: Lua Script: renewMasterLock() - alt Renewal successful - R-->>-ME: Success (1) - ME->>+ME: Continue as master - else Renewal failed - R-->>-ME: Failed (0) - ME->>+ME: becomeSlave() - ME->>+Dep: emit('master-lost') - ME->>+ME: attemptElection() after delay - end - else Process is Slave - ME->>+R: Lua Script: checkMasterExists() - alt Master exists and healthy - R-->>-ME: Master found (1) - ME->>+ME: Continue monitoring - else No master or stale - R-->>-ME: No master (0) - ME->>+ME: attemptElection() with random delay - end - end - end -``` - -### Process Registration and TTL Management Flow - -```mermaid -sequenceDiagram - participant P as Process - participant ME as MasterElection - participant R as Redis - participant Auto as Redis TTL - - Note over P,Auto: Process Registration with TTL - - P->>+ME: registerProcess(processInfo) - - ME->>+R: hSetEx(processes, processId, processInfo, {EX: 45}) - Note over R: Entry set with 45 second TTL - R-->>-ME: Registration confirmed - - ME->>+R: hSet(channels, processId, channelName) - R-->>-ME: Channel mapping stored - - alt Process is master - ME->>+R: set(master_process, processInfo) - R-->>-ME: Master process info stored - end - - ME-->>-P: Registration complete - - Note over P,Auto: Heartbeat Loop (Every 10s) - - loop Every 10 seconds - P->>+ME: updateProcessHeartbeat(processId) - - ME->>+R: hGet(processes, processId) - R-->>-ME: Current process data - - ME->>+ME: Update lastSeen timestamp - - ME->>+R: hSetEx(processes, processId, updatedInfo, {EX: 45}) - Note over R: TTL renewed to 45 seconds - R-->>-ME: Heartbeat recorded - - ME-->>-P: Heartbeat updated - end - - Note over P,Auto: Automatic TTL Expiration (No heartbeat) - - Note over Auto: 45 seconds pass without heartbeat - Auto->>Auto: Process entry automatically expires - Auto->>R: Remove expired entry from hash - - Note over P,Auto: Other processes detect absence - - P->>+ME: getAllProcesses() - ME->>+R: hGetAll(processes) - R-->>-ME: Only active processes returned - Note over ME: Expired process not included - ME-->>-P: Updated process list -``` - -## System Architecture Diagrams - -### Master Election Cluster Architecture - -```mermaid -graph TB - subgraph "Backend Process Cluster" - M[Master Process
Elected Leader
🏆] - S1[Slave Process 1
Follower] - S2[Slave Process 2
Follower] - S3[Slave Process N
Follower] - end - - subgraph "Redis Coordination Layer" - R[(Redis Server)] - subgraph "Election Keys" - MK[master-election:master
Lock with TTL] - HK[master-election:heartbeat
Timestamp] - end - subgraph "Process Registry (TTL)" - PK[master-election:processes
Hash with per-entry TTL] - CK[master-election:channels
Process→Channel mapping] - end - subgraph "Slave Management" - SK[master-election:slaves
Slave registration data] - end - end - - subgraph "Dependent Services" - CAM[Camera Module
MasterSlaveWorkerCluster] - DS[Display Service
WebSocket Cluster] - OTHER[Other Services
...] - end - - M ===|Master Lock
Heartbeat Every 10s| MK - M ===|Timestamp Update| HK - M ===|TTL Registration
Heartbeat Renewal| PK - - S1 <-->|Monitor Master
Every 5s| R - S2 <-->|Monitor Master
Every 5s| R - S3 <-->|Monitor Master
Every 5s| R - - S1 ===|Slave Registration
Heartbeat Every 5s| SK - S2 ===|Slave Registration
Heartbeat Every 5s| SK - S3 ===|Slave Registration
Heartbeat Every 5s| SK - - M -.->|master-acquired
slave-registered
slave-removed| CAM - M -.->|Role transition events| DS - M -.->|Coordination events| OTHER - - S1 -.->|election-completed
master-lost| CAM - S2 -.->|Election events| DS - S3 -.->|Status events| OTHER -``` - -### TTL-Based Cleanup Architecture - -```mermaid -graph TB - subgraph "Process Lifecycle with TTL" - START[Process Starts] --> REG[Register with 45s TTL] - REG --> ACTIVE[Process Active] - - ACTIVE --> HB{Heartbeat?} - HB -->|Every 10s| RENEW[Renew TTL to 45s] - HB -->|Missed| COUNT[Count down TTL] - - RENEW --> ACTIVE - COUNT --> EXPIRE{TTL = 0?} - EXPIRE -->|No| COUNT - EXPIRE -->|Yes| CLEANUP[Redis Auto-Remove] - - CLEANUP --> DETECT[Other Processes Detect] - DETECT --> REBALANCE[Trigger Rebalancing] - end - - subgraph "Traditional Manual Cleanup vs TTL" - subgraph "❌ Manual Cleanup Problems" - RACE[Race Conditions] - LEAK[Memory Leaks] - STALE[Stale Data] - COMPLEX[Complex Logic] - end - - subgraph "✅ TTL-Based Benefits" - AUTO[Automatic Cleanup] - RELIABLE[Reliable Expiration] - SIMPLE[Simple Implementation] - SELF[Self-Healing] - end - end - - subgraph "TTL Management Operations" - HSETEX[hSetEx(key, field, value, {EX: 45})] - RENEWAL[Heartbeat renews TTL automatically] - EXPIRY[Redis removes expired entries] - - HSETEX --> RENEWAL - RENEWAL --> EXPIRY - EXPIRY --> HSETEX - end -``` - -### Election Timing and Coordination - -```mermaid -gantt - title Master Election Timeline - dateFormat X - axisFormat %s - - section Master Lock - Master Lock TTL (30s) :milestone, m1, 0, 0s - Lock Renewal (10s) :10, 20s - Lock Renewal (10s) :20, 30s - Lock Expires :milestone, m2, 30, 30s - - section Process TTL - Process Registration (45s) :milestone, p1, 0, 0s - Heartbeat Renewal (10s) :10, 20s - Heartbeat Renewal (10s) :20, 30s - Heartbeat Renewal (10s) :30, 40s - Process Expires :milestone, p2, 45, 45s - - section Election Events - Initial Election :milestone, e1, 0, 0s - Slave Monitoring (5s) :5, 10s - Slave Monitoring (5s) :10, 15s - Master Failure Detected :milestone, e2, 30, 30s - New Election Started :32, 35s - New Master Elected :milestone, e3, 35, 35s -``` - -## Event System Architecture - -### Event Flow and Dependencies - -```mermaid -graph TD - subgraph "MasterElection Events" - ES[election-started] --> EA{Election Attempt} - EA -->|Success| MA[master-acquired] - EA -->|Failed| EC[election-completed(false)] - - MA --> HB[Start Heartbeat Loop] - EC --> MON[Start Master Monitoring] - - HB --> RENEW{Heartbeat Success?} - RENEW -->|Success| CONT[Continue as Master] - RENEW -->|Failed| ML[master-lost] - - ML --> STEP[Step Down to Slave] - STEP --> MON - - CONT --> HB - MON --> CHECK{Master Missing?} - CHECK -->|Yes| ES - CHECK -->|No| MON - end - - subgraph "Slave Management Events" - SR[slave-registered] --> UP[Update Assignments] - SREM[slave-removed] --> CLEAN[Cleanup Assignments] - - UP --> NOTIFY[Notify Services] - CLEAN --> REBAL[Rebalance Load] - end - - subgraph "Error Handling Events" - ERR[error] --> LOG[Log Error Details] - LOG --> RECOVER[Attempt Recovery] - RECOVER --> ES - end - - subgraph "External Service Integration" - MA -.->|becomeMaster()| CAMERA[Camera Module] - ML -.->|becomeSlave()| CAMERA - SR -.->|slaveJoined()| CAMERA - SREM -.->|slaveLeft()| CAMERA - - MA -.->|Master role| DISPLAY[Display Service] - ML -.->|Slave role| DISPLAY - - MA -.->|Coordinate| OTHER[Other Services] - ML -.->|Follow| OTHER - end -``` - -### Event Sequence Patterns - -#### Master Failure and Recovery Pattern - -```mermaid -sequenceDiagram - participant M as Master Process - participant S1 as Slave 1 - participant S2 as Slave 2 - participant R as Redis - participant Svc as Dependent Services - - Note over M,Svc: Normal Operation - M->>R: Heartbeat renewal every 10s - S1->>R: Monitor master every 5s - S2->>R: Monitor master every 5s - - Note over M,Svc: Master Failure - M--XM: Process crashes - - Note over R: Master lock expires (30s) - R->>R: Lock TTL expires - - Note over S1,S2: Slaves detect master failure - S1->>R: checkMasterExists() → false - S2->>R: checkMasterExists() → false - - Note over S1,S2: Election race with random delay - S1->>S1: Random delay 1.2s - S2->>S2: Random delay 0.8s - - S2->>R: attemptElection() first - R->>S2: Success - became master - S2->>S2: emit('master-acquired') - S2->>Svc: becomeMaster() event - - S1->>R: attemptElection() second - R->>S1: Failed - master exists - S1->>S1: Continue as slave - - Note over S2,Svc: New master operational - S2->>R: Start heartbeat renewal - Svc->>S2: Acknowledge new master -``` - -## Configuration and Tuning - -### Timing Configuration - -```typescript -// MasterElection constructor parameters -interface MasterElectionConfig { - lockName: string = 'master-election'; // Redis key prefix - lockTtl: number = 30000; // Master lock TTL (30 seconds) - heartbeatInterval: number = 10000; // Master heartbeat interval (10 seconds) - checkInterval: number = 5000; // Slave monitoring interval (5 seconds) - identifier: string = 'cms-backend'; // Human-readable process identifier -} - -// TTL Configuration -const PROCESS_TTL_SECONDS = 45; // Process registration TTL -const SLAVE_TIMEOUT_MS = 15000; // Slave cleanup threshold (3x heartbeat) -const ELECTION_RANDOM_DELAY_MAX = 2000; // Max random delay to prevent collisions -``` - -### Redis Key Structure - -```typescript -// Election and coordination keys -const REDIS_KEYS = { - // Master election coordination - master: `${lockName}:master`, // Current master ID with TTL - heartbeat: `${lockName}:heartbeat`, // Master heartbeat timestamp - masterProcess: `${lockName}:master_process`, // Master ProcessInfo JSON - - // Process registry with TTL - processes: `${lockName}:processes`, // Hash: processId → ProcessInfo (TTL per entry) - channels: `${lockName}:channels`, // Hash: processId → channelName - - // Slave management - slaves: `${lockName}:slaves`, // Hash: nodeId → SlaveNode -}; - -// TTL settings -const TTL_CONFIG = { - masterLock: 30, // seconds - Master lock expiration - processEntry: 45, // seconds - Process registration TTL - heartbeatRenewal: 10, // seconds - How often to renew heartbeats - slaveMonitoring: 5, // seconds - How often slaves check master -}; -``` - -### Performance Characteristics - -#### Scalability Metrics -- **Election Speed**: < 100ms for uncontested election -- **Failover Time**: < 5 seconds from master failure to new election -- **Process Registration**: < 10ms per process registration -- **TTL Cleanup**: Automatic, no performance impact on application - -#### Resource Usage -- **Memory**: O(n) where n = number of backend processes -- **Redis Operations**: Atomic Lua scripts prevent race conditions -- **Network**: Minimal - only heartbeats and election attempts -- **CPU**: Negligible overhead for coordination operations - -#### Reliability Guarantees -- **Split-Brain Prevention**: Atomic Lua scripts ensure single master -- **Automatic Recovery**: TTL-based cleanup handles all failure scenarios -- **Event Consistency**: All role transitions emit events for service coordination -- **State Persistence**: Process registry survives Redis restarts - -## Public Interface Specification - -The MasterElection service provides a clean, event-driven interface for distributed coordination across backend processes. - -### Primary Interface: MasterElection Class - -#### Core Lifecycle Methods - -```typescript -/** - * Initialize and start the master election process - * @returns Promise - Resolves when election completes - */ -public async start(): Promise - -/** - * Stop master election and cleanup resources - * @returns Promise - Resolves when cleanup completes - */ -public async stop(): Promise - -/** - * Wait for election to complete with timeout - * @param timeoutMs - Maximum time to wait (default: 30000) - * @returns Promise - true if became master, false if slave - */ -public async waitForElectionComplete(timeoutMs: number = 30000): Promise -``` - -#### Status and Information Methods - -```typescript -/** - * Check if this process is currently the master - * @returns boolean - true if master, false if slave - */ -public getIsMaster(): boolean - -/** - * Get this process's unique node identifier - * @returns string - UUID-based node identifier - */ -public getNodeId(): string - -/** - * Get this process's human-readable identifier - * @returns string - Process identifier (e.g., 'cms-backend') - */ -public getIdentifier(): string - -/** - * Get or set process metadata for coordination - * @param metadata - Optional metadata to set - * @returns Record - Current metadata - */ -public setMetadata(metadata: Record): void -public getMetadata(): Record -``` - -#### Process Registry Methods - -```typescript -/** - * Register a process in the distributed registry with TTL - * @param processInfo - Process information including role and capabilities - * @returns Promise - */ -public async registerProcess(processInfo: ProcessInfo): Promise - -/** - * Update process heartbeat to renew TTL (45 seconds) - * @param processId - Process identifier to update - * @returns Promise - */ -public async updateProcessHeartbeat(processId: string): Promise - -/** - * Get all currently registered processes (auto-filtered by TTL) - * @returns Promise - Array of active processes - */ -public async getAllProcesses(): Promise - -/** - * Get current master process information - * @returns Promise - Master process or null if none - */ -public async getMasterProcess(): Promise - -/** - * Get processes filtered by role - * @param role - 'master' or 'slave' - * @returns Promise - Processes with specified role - */ -public async getProcessesByRole(role: 'master' | 'slave'): Promise -``` - -#### Slave Management Methods - -```typescript -/** - * Get all registered slave nodes - * @returns Promise - Array of active slaves - */ -public async getSlaves(): Promise - -/** - * Get specific slave node information - * @param nodeId - Slave node identifier - * @returns Promise - Slave info or null if not found - */ -public async getSlave(nodeId: string): Promise - -/** - * Get count of registered slave nodes - * @returns Promise - Number of active slaves - */ -public async getSlaveCount(): Promise -``` - -### Event System Interface - -#### Event Registration - -```typescript -// Type-safe event registration -masterElection.on('master-acquired', () => { - // This process became the master - console.log('Became master - start coordinating resources'); -}); - -masterElection.on('master-lost', () => { - // This process lost master status - console.log('Lost master status - step down to slave role'); -}); - -masterElection.on('election-completed', (isMaster: boolean) => { - // Election finished - role determined - console.log(`Election completed - role: ${isMaster ? 'MASTER' : 'SLAVE'}`); -}); - -masterElection.on('slave-registered', (slave: SlaveNode) => { - // New backend process joined cluster - console.log(`New slave joined: ${slave.nodeId}`); -}); - -masterElection.on('slave-removed', (nodeId: string) => { - // Backend process left cluster (TTL expired) - console.log(`Slave removed: ${nodeId}`); -}); - -masterElection.on('error', (error: Error) => { - // Election or coordination error occurred - console.error('Master election error:', error); -}); -``` - -#### Event Timing Guarantees - -- **master-acquired**: Emitted immediately after successful lock acquisition -- **master-lost**: Emitted immediately after failed lock renewal -- **election-completed**: Emitted after initial election resolves (master or slave) -- **slave-registered**: Emitted when new slave joins (master only) -- **slave-removed**: Emitted when slave TTL expires (master only) -- **error**: Emitted on Redis connection issues or election failures - -### Usage Patterns - -#### Basic Initialization and Coordination - -```typescript -import { initialize, getMasterElection } from '~/services/MasterElection'; - -// Initialize master election with custom settings -await initialize( - 'cms-cluster', // lockName - Redis key prefix - 30000, // lockTtl - Master lock TTL (30s) - 10000, // heartbeatInterval - Master heartbeat (10s) - 5000, // checkInterval - Slave monitoring (5s) - 'cms-backend-prod' // identifier - Human-readable name -); - -// Get election instance for event handling -const masterElection = getMasterElection(); - -// Wait for initial election to complete -const isMaster = await masterElection.waitForElectionComplete(); -console.log(`Process started as: ${isMaster ? 'MASTER' : 'SLAVE'}`); -``` - -#### Service Integration Pattern - -```typescript -// Camera module integration example -class CameraClusterService { - private masterElection: MasterElection; - - constructor() { - this.masterElection = getMasterElection(); - this.setupElectionHandlers(); - } - - private setupElectionHandlers() { - // Handle master role transitions - this.masterElection.on('master-acquired', () => { - this.becomeMaster(); - }); - - this.masterElection.on('master-lost', () => { - this.becomeSlave(); - }); - - // Handle cluster membership changes - this.masterElection.on('slave-registered', (slave) => { - this.handleSlaveJoined(slave); - }); - - this.masterElection.on('slave-removed', (nodeId) => { - this.handleSlaveLeft(nodeId); - }); - } - - private async becomeMaster() { - console.log('Camera service: Becoming master'); - - // Connect to all Python ML workers - await this.connectToAllWorkers(); - - // Start managing cluster assignments - this.startClusterManagement(); - - // Begin rebalancing subscriptions - this.startRebalancing(); - } - - private async becomeSlave() { - console.log('Camera service: Becoming slave'); - - // Disconnect from Python workers (master-only) - await this.disconnectFromWorkers(); - - // Stop cluster management - this.stopClusterManagement(); - - // Start listening for routed messages - this.startSlaveMessageHandling(); - } -} -``` - -#### Process Registration with Custom Capabilities - -```typescript -// Register this process with specific capabilities -await masterElection.registerProcess({ - processId: masterElection.getNodeId(), - nodeId: masterElection.getNodeId(), - role: masterElection.getIsMaster() ? 'master' : 'slave', - channelName: `worker:slave:${masterElection.getNodeId()}`, - lastSeen: new Date().toISOString(), - capabilities: { - canProcessDetections: true, // Can handle AI detection callbacks - maxSubscriptions: 100, // Maximum camera subscriptions - preferredWorkload: 80 // Preferred load percentage (0-100) - } -}); - -// Start heartbeat loop to maintain registration -setInterval(async () => { - await masterElection.updateProcessHeartbeat(masterElection.getNodeId()); -}, 10000); // Every 10 seconds -``` - -#### Cluster Monitoring and Status - -```typescript -// Monitor cluster status and health -async function monitorClusterHealth() { - // Get all active processes (TTL-filtered automatically) - const allProcesses = await masterElection.getAllProcesses(); - console.log(`Active processes: ${allProcesses.length}`); - - // Get current master - const masterProcess = await masterElection.getMasterProcess(); - if (masterProcess) { - console.log(`Master: ${masterProcess.processId} (${masterProcess.capabilities.maxSubscriptions} max subscriptions)`); - } - - // Get all slaves - const slaves = await masterElection.getSlaves(); - console.log(`Slaves: ${slaves.length}`); - slaves.forEach(slave => { - console.log(` Slave ${slave.nodeId}: last seen ${slave.lastSeen}`); - }); - - // Check if this process is master - if (masterElection.getIsMaster()) { - console.log('This process is the master - coordinating cluster'); - } else { - console.log('This process is a slave - following master'); - } -} - -// Run monitoring every 30 seconds -setInterval(monitorClusterHealth, 30000); -``` - -#### Graceful Shutdown Pattern - -```typescript -// Graceful shutdown with proper cleanup -process.on('SIGTERM', async () => { - console.log('Shutting down master election...'); - - try { - // Stop election and cleanup resources - await masterElection.stop(); - - // Master automatically releases lock - // Process TTL will expire naturally - // Slaves will detect and trigger new election - - console.log('Master election shutdown complete'); - } catch (error) { - console.error('Error during election shutdown:', error); - } - - process.exit(0); -}); -``` - -### Error Handling and Recovery - -#### Election Failure Scenarios - -```typescript -// Handle various failure modes -masterElection.on('error', (error) => { - console.error('Master election error:', error.message); - - // Common error types: - if (error.message.includes('Redis')) { - // Redis connection issues - console.log('Redis connectivity problem - will retry automatically'); - - } else if (error.message.includes('timeout')) { - // Election timeout - console.log('Election timeout - may indicate network issues'); - - } else if (error.message.includes('lock')) { - // Lock acquisition issues - console.log('Lock contention - normal during elections'); - } - - // Service continues running - election will retry automatically -}); - -// Handle network partitions -masterElection.on('master-lost', () => { - console.log('Lost master status - likely network partition or overload'); - - // Dependent services should gracefully step down - // New election will start automatically after random delay -}); -``` - -#### Recovery Guarantees - -- **Split-Brain Prevention**: Atomic Lua scripts ensure only one master exists -- **Automatic Failover**: New elections triggered immediately when master fails -- **TTL-Based Cleanup**: Processes automatically removed when heartbeats stop -- **State Recovery**: Process registry rebuilds automatically from active heartbeats -- **Event Consistency**: All role changes emit events for service coordination - -### Integration with Dependent Services - -The MasterElection service is designed to coordinate multiple backend services that need distributed leadership: - -#### Camera Module Integration -- Master: Connects to Python ML workers, manages subscriptions -- Slaves: Process routed detection messages, forward commands - -#### Display WebSocket Cluster -- Master: Manages WebSocket connection assignments across processes -- Slaves: Handle assigned display connections, route messages - -#### Database Migration Coordination -- Master: Executes database migrations and schema changes -- Slaves: Wait for master to complete before proceeding - -This specification provides a comprehensive understanding of the MasterElection service's distributed coordination capabilities and integration patterns for multi-process backend systems. \ No newline at end of file diff --git a/docs/WorkerConnection.md b/docs/WorkerConnection.md deleted file mode 100644 index b11ba61..0000000 --- a/docs/WorkerConnection.md +++ /dev/null @@ -1,1498 +0,0 @@ -# Worker Connection Architecture Specification - Pure Declarative State Management - -## Overview - -The Camera Module implements a pure VMware DRS-like declarative architecture for managing connections to Python ML workers. This system uses the database as the single source of truth for desired subscription state, with automatic regeneration and reconciliation providing intelligent camera management, real-time object detection, and AI-powered content selection with automatic load balancing capabilities. - -**Key Architectural Principle**: Database mutations trigger complete state regeneration rather than incremental updates, ensuring consistency and eliminating complex state synchronization issues. - -## Architecture Components - -### Two-Cluster System - -The system consists of two distinct but coordinated clusters: - -1. **Backend Process Cluster**: Multiple CMS backend processes with leader election -2. **Worker Cluster**: Python ML workers for object detection processing - -### Master-Slave WebSocket Architecture - -- **Master Process**: Single elected backend process that maintains WebSocket connections to Python workers -- **Slave Processes**: All other backend processes that handle message routing and processing -- **Message Routing**: Master forwards worker messages to assigned slaves via Redis pub/sub channels -- **MasterElection Integration**: Automated master/slave role management with event-driven transitions -- **Seamless Scaling**: Backend processes can be added/removed without affecting WebSocket connections - -## Core Components - -### DetectorCluster -`cms-backend/modules/camera/services/DetectorCluster.ts` - -Primary interface for camera operations that abstracts the underlying distributed architecture. - -**Key Responsibilities:** -- Routes camera subscription requests through the cluster -- Manages detection callback registration and event emission -- Bridges CameraService with underlying MasterSlaveWorkerCluster -- Provides unified API regardless of master/slave status - -### MasterSlaveWorkerCluster -`cms-backend/modules/camera/services/MasterSlaveWorkerCluster.ts` - -Core distributed cluster implementation that handles declarative state management and worker assignment reconciliation. - -**Master Mode Responsibilities:** -- Maintains WebSocket connections to all Python workers -- Manages desired vs actual subscription state separation -- Implements VMware DRS-like global rebalancing algorithm -- Processes automatic reconciliation every 30 seconds -- Responds to slave join/leave events from MasterElection -- Generates fresh pre-signed model URLs for worker assignments - -**Slave Mode Responsibilities:** -- Submits desired subscription state changes to master -- Processes detection results routed from master -- Event-driven role transitions managed by MasterElection -- No direct worker management (delegated to master) - -### DetectorConnection -`cms-backend/modules/camera/services/DetectorConnection.ts` - -Individual WebSocket connection handler for Python workers. - -**Key Features:** -- Connection lifecycle management (connect, disconnect, reconnect) -- Exponential backoff reconnection with 10-second intervals -- Subscription state management and restoration after reconnection -- Real-time heartbeat monitoring with 10-second timeout -- Resource usage tracking (CPU, memory, GPU) - -## Data Structures - -### WorkerConnectionState -```typescript -interface WorkerConnectionState { - url: string; // Worker WebSocket URL - processId: string; // Backend process managing this worker - online: boolean; // Connection status - cpuUsage: number | null; // Worker CPU utilization - memoryUsage: number | null; // Worker memory usage - gpuUsage: number | null; // Worker GPU utilization - gpuMemoryUsage: number | null; // Worker GPU memory usage - subscriptionCount: number; // Active camera subscriptions - subscriptions: string[]; // List of subscription identifiers - lastHeartbeat: string; // Last heartbeat timestamp - connectedAt: string; // Connection established timestamp -} -``` - -### DesiredCameraSubscription -```typescript -interface DesiredCameraSubscription { - subscriptionIdentifier: string; // Format: ${displayId};${cameraId} - rtspUrl: string; // Camera RTSP stream URL - modelId: number; // AI model database ID - modelName: string; // AI model identifier - createdAt: string; // Subscription creation timestamp - - // Snapshot configuration - snapshotUrl?: string; // Optional snapshot endpoint URL - snapshotInterval?: number; // Snapshot interval in milliseconds - - // Image cropping parameters - cropX1?: number; // Crop region top-left X - cropY1?: number; // Crop region top-left Y - cropX2?: number; // Crop region bottom-right X - cropY2?: number; // Crop region bottom-right Y -} -``` - -### ActualCameraSubscription -```typescript -interface ActualCameraSubscription { - subscriptionIdentifier: string; // Format: ${displayId};${cameraId} - assignedWorkerUrl: string; // Worker handling this subscription - modelUrl: string; // AI model presigned URL (1hr TTL) - status: 'active' | 'pending' | 'failed' | 'recovering'; - assignedAt: string; // Worker assignment timestamp - lastSeen: string; // Last activity timestamp -} -``` - -### SlaveState -```typescript -interface SlaveState { - slaveId: string; // Unique slave identifier (process ID) - processId: string; // Backend process ID (same as slaveId) - online: boolean; // Always true (maintained by MasterElection) - workload: number; // Number of assigned workers (calculated) - lastSeen: string; // Last heartbeat from MasterElection - capabilities?: Record; // Metadata from MasterElection -} -``` - -### DetectorWorkerCommand -```typescript -interface DetectorWorkerCommand { - type: DetectorWorkerCommandType; - payload?: { - subscriptionIdentifier: string; - rtspUrl: string; - snapshotUrl?: string; - snapshotInterval?: number; - modelUrl: string; - modelName: string; - modelId: number; - cropX1?: number; - cropY1?: number; - cropX2?: number; - cropY2?: number; - }; -} - -enum DetectorWorkerCommandType { - SUBSCRIBE = "subscribe", - UNSUBSCRIBE = "unsubscribe", - REQUEST_STATE = "requestState", - PATCH_SESSION_RESULT = "patchSessionResult", - SET_SESSION_ID = "setSessionId" -} -``` - -### ImageDetectionResponse -```typescript -interface ImageDetectionResponse { - subscriptionIdentifier: string; - timestamp: Date; - data: { - detection: { - carModel?: string; - carBrand?: string; - carYear?: number; - bodyType?: string; - licensePlateText?: string; - licensePlateType?: string; - }; - modelId: number; - modelName: string; - }; -} -``` - -## Redis Data Architecture - -### Persistent Storage Keys -- `worker:connections` - Worker connection states and health metrics -- `worker:assignments` - Worker-to-slave assignment mappings -- `worker:desired_subscriptions` - Desired camera subscription state (user intent) -- `worker:actual_subscriptions` - Actual worker subscription assignments (system state) -- `master-election:slaves` - Slave registration and heartbeat (managed by MasterElection) - -### Communication Channels -- `worker:slave:{slaveId}` - Individual slave message routing channels -- `worker:messages:upstream` - Worker-to-master communication channel (currently unused) -- `worker:assignments:changed` - Assignment change broadcast notifications -- `worker:master:commands` - Database change notification channel (slaves → master) - -### Data Persistence Strategy -All Redis data uses **manual cleanup only** (no TTL) to ensure: -- Reliable state recovery after process restarts -- Consistent subscription persistence across failovers -- Predictable cleanup during planned maintenance -- Debug visibility into system state history - -## Pure Declarative Architecture (VMware DRS-like) - -### Concept Overview -The system implements a pure declarative approach similar to VMware Distributed Resource Scheduler (DRS), where: -- **Database**: Single source of truth for desired state (Display+Camera+Playlist combinations) -- **Actual State**: What subscriptions are currently running on workers (stored in `worker:actual_subscriptions`) -- **Regeneration**: Master regenerates complete desired state from database on every change notification -- **Reconciliation**: Master continuously reconciles desired vs actual state via global rebalancing - -### Pure Declarative Benefits -- **Database as Truth**: Desired state always derived fresh from database, eliminating state synchronization issues -- **Zero Incremental Updates**: No complex state management, just "regenerate everything on change" -- **Automatic Recovery**: System heals itself by comparing database state vs actual worker state -- **Load Balancing**: Global optimization across all workers and subscriptions -- **Fault Tolerance**: Desired state survives all failures since it's always derived from database -- **Simplicity**: Database mutations just trigger regeneration - no complex command protocols - -### Pure Declarative Flow -```typescript -// Triggered by any database change -async handleDatabaseChange(changeType: string, entityId: string) { - // 1. Any process detects database change - await triggerSubscriptionUpdate(changeType, entityId); - - // 2. Master receives regeneration request - async handleMasterCommand(message) { - if (data.type === 'regenerate_subscriptions') { - await regenerateDesiredStateFromDatabase(); - } - } - - // 3. Master regenerates complete desired state from database - async regenerateDesiredStateFromDatabase() { - const activeDisplays = await db.display.findMany({ - where: { - AND: [ - { cameraIdentifier: { not: null } }, - { playlistId: { not: null } } - ] - }, - include: { camera: true, playlist: { include: { model: true } } } - }); - - // Generate fresh desired subscriptions from database - await storeDesiredSubscriptions(generateFromDisplays(activeDisplays)); - - // Trigger reconciliation - await rebalanceCameraSubscriptions(); - } - - // 4. Reconciliation (same VMware DRS algorithm) - async rebalanceCameraSubscriptions() { - const desired = await getDesiredSubscriptions(); // Fresh from database - const actual = await getActualSubscriptions(); // Current worker state - - // Find and fix differences using load balancing - await reconcileDifferences(desired, actual); - } -} - -// VMware DRS-like worker selection (unchanged) -function findBestWorkerVMwareDRS(workers, currentLoads) { - return workers - .map(worker => ({ - worker, - score: (currentLoads.get(worker.url) * 0.4) + // 40% load balance - (worker.cpuUsage * 0.35) + // 35% CPU usage - (worker.memoryUsage * 0.25) // 25% memory usage - })) - .sort((a, b) => a.score - b.score)[0].worker; // Lower score = better -} -``` - -### Simplified Reconciliation Flow -1. **Database Change**: Any process modifies database (Display, Camera, Playlist, Model) -2. **Trigger Notification**: Process sends `regenerate_subscriptions` to `worker:master:commands` -3. **Complete Regeneration**: Master queries database for all active Display+Camera+Playlist combinations -4. **Desired State Creation**: Master generates fresh desired subscriptions from database query results -5. **Diff Analysis**: Master compares fresh desired state vs current actual state on workers -6. **Global Reconciliation**: Master applies VMware DRS algorithm to reconcile differences -7. **Worker Commands**: Master sends subscription/unsubscription commands to workers -8. **State Update**: Master updates actual subscription state in Redis - -### Key Simplifications vs Previous Architecture -- **No Incremental State Management**: No complex tracking of individual subscription changes -- **No State Synchronization Issues**: Desired state always freshly derived from database -- **No Complex Command Protocols**: Only one command type: `regenerate_subscriptions` -- **No Partial Update Bugs**: Complete regeneration eliminates edge cases and race conditions -- **Zero Database-Redis Divergence**: Database is always the authoritative source -- **Simpler Service Layer**: Services just update database + trigger, no subscription logic - -## Class Responsibilities Overview - -### Core Class Functions - -| Class | Primary Responsibility | Key Functions | Process Type | -|-------|----------------------|---------------|--------------| -| **DetectorCluster** | Public API facade and event management | • `subscribeToCamera()` - Legacy interface (triggers regeneration)
• `addDetectionListener()` - Callback registration
• `getState()` - Cluster monitoring
• Event emission to external services | Both Master & Slave | -| **MasterSlaveWorkerCluster** | Pure declarative cluster coordination | **Master**: `regenerateDesiredStateFromDatabase()`, `rebalanceCameraSubscriptions()`, `connectToAllWorkers()`
**Slave**: Minimal role - just routes detection messages
**Both**: `handleDetectionMessage()` for callbacks | Both (different roles) | -| **DetectorConnection** | Individual worker WebSocket management | • `initialize()` - WebSocket connection setup
• `subscribeToCamera()` - Send subscription to worker
• `handleImageDetectionResponse()` - Process AI results
• `resubscribeAll()` - Restore subscriptions after reconnect | Master Only | -| **CameraService** | Database operations + trigger notifications | • `addCamera()` - Database create + trigger regeneration
• `updateCamera()` - Database update + trigger regeneration
• `removeCamera()` - Database delete + trigger regeneration | Both Master & Slave | -| **DisplayService** | Database operations + trigger notifications | • `registerDisplay()` - Database create + trigger regeneration
• `updateDisplay()` - Database update + trigger regeneration
• `deleteDisplay()` - Database delete + trigger regeneration | Both Master & Slave | -| **SubscriptionTrigger** | Simple notification system | • `triggerSubscriptionUpdate()` - Send regeneration request to master | Both Master & Slave | - -## Object Relationship Diagrams - -### Core Class Structure and Methods - -```mermaid -classDiagram - class CameraService { - +addCamera(identifier, rtspUrl) - +removeCamera(identifier) - +resubscribeCamera(identifier) - +getCameras() - +updateCamera(...) - -processDetection(data) - } - - class DetectorCluster { - +initialize() - +subscribeToCamera(...) - +unsubscribeFromCamera(subscriptionId) - +unsubscribeFromAllWithCameraID(cameraId) - +getState() - +addDetectionListener(subscriptionId, callback) - +addGlobalDetectionListener(callback) - -handleWorkerDetection(data) - } - - class MasterSlaveWorkerCluster { - +initialize() - +subscribeToCamera(...) - +storeCameraSubscription(subscription) - +getClusterState() - +shutdown() - -connectToAllWorkers() [MASTER] - -rebalanceCameraSubscriptions() [MASTER] - -triggerRebalancing() [MASTER] - -becomeMaster() - -becomeSlave() - -setupMasterElectionListeners() - } - - class DetectorConnection { - +initialize() - +subscribeToCamera(...) - +unsubscribeFromCamera(subscriptionId) - +getCameraImage(cameraId) - +setSessionId(displayId, sessionId) - +getState() - -connect() - -resubscribeAll() - -handleImageDetectionResponse(data) - -scheduleReconnect() - } - - CameraService --> DetectorCluster : "subscribeToCamera()\ngetState()" - DetectorCluster --> MasterSlaveWorkerCluster : "initialize()\nstoreCameraSubscription()" - MasterSlaveWorkerCluster --> DetectorConnection : "[MASTER] creates connections" -``` - -### Direct Function Call Relationships - -```mermaid -graph TD - API[API Routes] --> CS[CameraService] - CS --> |subscribeToCamera
getState
unsubscribeFromAllWithCameraID| DC[DetectorCluster] - DC --> |initialize
storeCameraSubscription
getClusterState
subscribeToCamera| MSC[MasterSlaveWorkerCluster] - - subgraph "Master Process Only" - MSC --> |connectToAllWorkers
creates connections| CONN[DetectorConnection] - CONN --> |WebSocket calls| PW[Python ML Worker] - end - - ME[MasterElection] --> |getIsMaster
getNodeId
getSlaves| MSC - WL[WorkerLogger] --> |attachToDetectorCluster| DC - - classDef masterOnly fill:#ffcccc - classDef external fill:#ffffcc - - class CONN masterOnly - class PW external - class API external -``` - -### Event-Driven Communication - -```mermaid -graph LR - subgraph "Internal Events" - MSC[MasterSlaveWorkerCluster] -.-> |emit detection| DC[DetectorCluster] - MSC -.-> |emit worker:online
emit worker:offline| DC - DC -.-> |emit worker:detection_result
emit worker:online
emit worker:offline| CS[CameraService] - DC -.-> |emit events| WL[WorkerLogger] - ME[MasterElection] -.-> |master-acquired
master-lost
slave-registered
slave-removed| MSC - end - - subgraph "Callback System" - CS -.-> |callback registration| DC - DC -.-> |detection callbacks| CS - end - - subgraph "WebSocket Events (Master Only)" - CONN[DetectorConnection] -.-> |handleWorkerMessage
handleWorkerOnline
handleWorkerOffline| MSC - PW[Python ML Worker] -.-> |IMAGE_DETECTION
STATE_REPORT| CONN - end - - classDef events fill:#e6f3ff - classDef callbacks fill:#fff2e6 - classDef websocket fill:#ffe6e6 - - class MSC,DC,CS,WL events - class CONN,PW websocket -``` - -### Redis Communication Patterns - -```mermaid -graph TB - subgraph "Master Process" - M[Master MasterSlaveWorkerCluster] - end - - subgraph "Slave Processes" - S1[Slave Process 1] - S2[Slave Process 2] - end - - subgraph "Redis Channels" - SC1[worker:slave:slave1] - SC2[worker:slave:slave2] - MC[worker:master:commands] - AC[worker:assignments:changed] - end - - subgraph "Redis Storage" - WC[worker:connections] - WA[worker:assignments] - WS[worker:slaves] - CS[worker:camera_subscriptions] - end - - M --> |publish detection routing| SC1 - M --> |publish detection routing| SC2 - M --> |publish assignments| AC - M --> |hSet/hGet state| WC - M --> |hSet/hGet assignments| WA - M --> |hSet/hGet subscriptions| CS - - S1 --> |publish commands| MC - S2 --> |publish commands| MC - S1 --> |hSet registration| WS - S2 --> |hSet registration| WS - - SC1 --> |subscribe| S1 - SC2 --> |subscribe| S2 - MC --> |subscribe| M - AC --> |subscribe all| S1 - AC --> |subscribe all| S2 -``` - -## Method Call Flow Analysis - -### Camera Subscription Flow (External Request → Worker) - -```mermaid -sequenceDiagram - participant API as API Routes - participant CS as CameraService - participant DB as Database - participant ST as SubscriptionTrigger - participant R as Redis - participant MSC as MasterSlaveCluster - participant CONN as DetectorConnection - participant W as Python Worker - - Note over API,W: Pure Declarative Flow - API->>+CS: POST /api/camera - CS->>+DB: db.cameraEntity.create({...}) - DB-->>-CS: Camera created - CS->>+ST: triggerSubscriptionUpdate('camera.created', id) - ST->>+R: publish(worker:master:commands, {type: 'regenerate_subscriptions', ...}) - - Note over R,MSC: Only Master Processes Commands - R->>+MSC: Master receives regeneration request - MSC->>+MSC: regenerateDesiredStateFromDatabase() - MSC->>+DB: Query all Display+Camera+Playlist combinations - DB-->>-MSC: Active display configurations - MSC->>+MSC: Generate fresh desired subscriptions - MSC->>+R: Store desired state in Redis - MSC->>+MSC: rebalanceCameraSubscriptions() - MSC->>+MSC: findBestWorkerForSubscription() - MSC->>+CONN: subscribeToCamera(subscriptionId, rtspUrl, ...) - CONN->>+W: WebSocket: {type: "subscribe", payload: {...}} - W-->>-CONN: WebSocket: {type: "stateReport", ...} - CONN->>-MSC: handleWorkerOnline(workerUrl) - MSC->>-R: Update actual subscription state - - Note over W,CS: Detection Processing (unchanged) - W->>CONN: Detection results - CONN->>MSC: Route to assigned slave - MSC->>CS: Detection callback - CS-->>-API: Camera added successfully -``` - -### Detection Processing Flow (Worker → External Callback) - -```mermaid -sequenceDiagram - participant W as Python Worker - participant CONN as DetectorConnection - participant MSC as MasterSlaveCluster - participant R as Redis - participant DC as DetectorCluster - participant CS as CameraService - - Note over W,CS: AI Detection Result Processing - W->>+CONN: WebSocket: {type: "imageDetection", subscriptionIdentifier, data} - CONN->>+MSC: handleWorkerMessage(ImageDetectionResponse) - - Note over MSC: Master finds assigned slave - MSC->>+MSC: findWorkerForSubscription(subscriptionId) - MSC->>+R: hGet(worker:assignments, workerUrl) - MSC->>+R: publish(worker:slave:{slaveId}, {type: 'detection', ...}) - - Note over R: Redis routes to assigned slave - R-->>+MSC: Slave receives detection message - MSC->>+MSC: handleDetectionMessage(message) - MSC->>+DC: emit('detection', detectionData) - - Note over DC: Process detection and trigger callbacks - DC->>+DC: handleWorkerDetection(data) - DC->>+DC: detectionListeners.get(subscriptionId).forEach(callback) - DC->>+CS: callback(detectionData) - DC->>+DC: emit('worker:detection_result', {url, cameraId, detections}) - - Note over CS: External service processes detection - CS->>+CS: processDetection(data) - CS-->>CS: updateAnalytics(), triggerDecisionTrees() -``` - -### Master Election and Failover Flow - -```mermaid -sequenceDiagram - participant ME as MasterElection - participant MSC1 as MasterSlaveCluster (Process 1) - participant MSC2 as MasterSlaveCluster (Process 2) - participant R as Redis - participant W1 as Python Worker 1 - participant W2 as Python Worker 2 - - Note over ME,W2: Master Failover Scenario - - %% Initial state - ME->>+MSC1: emit('master-acquired') - MSC1->>+MSC1: becomeMaster() - ME->>+MSC2: emit('master-lost') - MSC2->>+MSC2: becomeSlave() - - ME->>+R: Automatic slave registration - MSC1->>+W1: WebSocket connection (Master) - MSC1->>+W2: WebSocket connection (Master) - - Note over MSC1: Original master fails - MSC1--xMSC1: Process crash/network failure - - %% MasterElection detects failure and triggers new election - ME->>+ME: Detect failed master, trigger election - ME->>+MSC2: emit('master-acquired') - MSC2->>+MSC2: becomeMaster() - - %% Master recovery process - MSC2->>+MSC2: connectToAllWorkers() - MSC2->>+W1: WebSocket reconnection - MSC2->>+W2: WebSocket reconnection - - MSC2->>+MSC2: healClusterAssignments() - MSC2->>+R: hGetAll(worker:camera_subscriptions) - MSC2->>+MSC2: rebalanceCameraSubscriptions() - - %% Restore subscriptions - MSC2->>+W1: WebSocket: SUBSCRIBE commands - MSC2->>+W2: WebSocket: SUBSCRIBE commands - - Note over MSC2,W2: New master operational - slave registration handled by MasterElection -``` - -## System Architecture Diagrams - -### Master-Slave Cluster Architecture - -```mermaid -graph TB - subgraph "Backend Process Cluster" - M[Master Process
NodeJS Backend] - S1[Slave Process 1
NodeJS Backend] - S2[Slave Process 2
NodeJS Backend] - S3[Slave Process N
NodeJS Backend] - end - - subgraph "Python Worker Cluster" - W1[Python ML Worker 1
WebSocket Server] - W2[Python ML Worker 2
WebSocket Server] - W3[Python ML Worker N
WebSocket Server] - end - - subgraph "Redis Coordination Layer" - R[(Redis)] - R --- C1[worker:slave:* channels] - R --- C2[worker:connections state] - R --- C3[worker:assignments mapping] - R --- C4[worker:camera_subscriptions] - end - - M ===|WebSocket Connections
Only Master| W1 - M ===|WebSocket Connections
Only Master| W2 - M ===|WebSocket Connections
Only Master| W3 - - M <-->|Pub/Sub Messages| R - S1 <-->|Pub/Sub Messages| R - S2 <-->|Pub/Sub Messages| R - S3 <-->|Pub/Sub Messages| R - - M -.->|Route Messages| S1 - M -.->|Route Messages| S2 - M -.->|Route Messages| S3 -``` - -### Data Flow Architecture - -```mermaid -sequenceDiagram - participant CS as CameraService - participant DC as DetectorCluster - participant MS as MasterSlaveCluster - participant R as Redis - participant W as Python Worker - participant S as Slave Process - - Note over CS,S: Camera Subscription Flow - - CS->>DC: subscribeToCamera(cameraId, rtspUrl, modelUrl, ...) - DC->>MS: storeCameraSubscription({...}) - - alt Master Process - MS->>MS: findBestWorkerForSubscription() - MS->>R: hSet(camera_subscriptions, subscriptionId, {...}) - MS->>W: WebSocket: SUBSCRIBE command - W->>MS: STATE_REPORT (subscription confirmed) - MS->>R: publish(worker:slave:{slaveId}, detection_message) - else Slave Process - MS->>R: publish(worker:master:commands, subscribe_command) - Note over MS: Routes to master for execution - end - - Note over CS,S: Detection Processing Flow - - W->>MS: WebSocket: IMAGE_DETECTION response - MS->>MS: findSlaveForWorker(workerUrl) - MS->>R: publish(worker:slave:{slaveId}, detection_data) - R->>S: Redis pub/sub delivery - S->>DC: emit('detection', detectionData) - DC->>CS: callback(detectionData) -``` - -### Subscription Lifecycle Management - -```mermaid -stateDiagram-v2 - [*] --> Pending: Camera Subscription Request - - Pending --> Active: Worker accepts subscription - Pending --> Failed: Worker rejects/unavailable - Pending --> Recovering: Assignment change needed - - Active --> Recovering: Worker goes offline - Active --> [*]: Unsubscribe request - - Recovering --> Active: Reassigned to online worker - Recovering --> Failed: No workers available - Recovering --> [*]: Subscription expired - - Failed --> Recovering: Worker becomes available - Failed --> [*]: Max retries exceeded - - note right of Recovering - Automatic rebalancing every 30s - Master detects offline workers - Reassigns to healthy workers - end note -``` - -### Worker Connection State Machine - -```mermaid -stateDiagram-v2 - [*] --> Connecting: initialize() - - Connecting --> Online: WebSocket connected + STATE_REPORT received - Connecting --> Reconnecting: Connection failed - - Online --> Offline: Heartbeat timeout (10s) - Online --> Reconnecting: WebSocket error/close - Online --> [*]: close() called - - Offline --> Reconnecting: Scheduled reconnect (10s) - Offline --> [*]: close() called - - Reconnecting --> Online: Reconnection successful - Reconnecting --> Reconnecting: Reconnection failed (retry) - Reconnecting --> [*]: close() called - - note right of Online - - Sends heartbeat every 2s - - Processes subscriptions - - Reports resource usage - - Handles detection results - end note -``` - -### Redis Channel Communication Flow - -```mermaid -graph LR - subgraph "Master Process" - M[Master] - WS1[WebSocket to Worker 1] - WS2[WebSocket to Worker 2] - end - - subgraph "Slave Processes" - S1[Slave 1] - S2[Slave 2] - end - - subgraph "Redis Channels" - CH1[worker:slave:slave1] - CH2[worker:slave:slave2] - CH3[worker:messages:upstream] - CH4[worker:assignments:changed] - end - - WS1 -->|Detection Data| M - WS2 -->|Detection Data| M - - M -->|Route by Assignment| CH1 - M -->|Route by Assignment| CH2 - - CH1 -->|Subscribed| S1 - CH2 -->|Subscribed| S2 - - S1 -->|Commands/Responses| CH3 - S2 -->|Commands/Responses| CH3 - CH3 -->|Subscribed| M - - M -->|Assignment Updates| CH4 - CH4 -->|Subscribed| S1 - CH4 -->|Subscribed| S2 -``` - -### Detailed Message Flow by Channel - -```mermaid -graph TB - subgraph "Python ML Workers" - W1[Worker 1
ws://worker1:8000] - W2[Worker 2
ws://worker2:8000] - W3[Worker N
ws://workerN:8000] - end - - subgraph "Master Process (Only One)" - M[Master Backend Process] - subgraph "Master Managed Data" - WC1[WebSocket Connection Pool] - AS[Assignment State] - SUB[Subscription Manager] - end - end - - subgraph "Redis Channels & Storage" - subgraph "Individual Slave Channels" - SC1["worker:slave:slave-uuid-1"] - SC2["worker:slave:slave-uuid-2"] - SC3["worker:slave:slave-uuid-N"] - end - - subgraph "Master Coordination Channels" - MC["worker:master:commands"] - ACH["worker:assignments:changed"] - UPC["worker:messages:upstream"] - SEC["worker:subscription:events"] - end - - subgraph "Persistent Storage" - WCS["worker:connections
(Worker Health States)"] - WAS["worker:assignments
(Worker→Slave Mapping)"] - WSS["worker:slaves
(Slave Registration)"] - CSS["worker:camera_subscriptions
(Subscription Persistence)"] - end - end - - subgraph "Slave Processes" - S1[Slave Process 1
slave-uuid-1] - S2[Slave Process 2
slave-uuid-2] - S3[Slave Process N
slave-uuid-N] - end - - %% WebSocket Communications (Master Only) - W1 -.->|"WebSocket Messages:
• IMAGE_DETECTION
• STATE_REPORT
• PATCH_SESSION"| WC1 - W2 -.->|"WebSocket Messages:
• IMAGE_DETECTION
• STATE_REPORT
• PATCH_SESSION"| WC1 - W3 -.->|"WebSocket Messages:
• IMAGE_DETECTION
• STATE_REPORT
• PATCH_SESSION"| WC1 - - WC1 -.->|"WebSocket Commands:
• SUBSCRIBE
• UNSUBSCRIBE
• REQUEST_STATE
• SET_SESSION_ID"| W1 - WC1 -.->|"WebSocket Commands:
• SUBSCRIBE
• UNSUBSCRIBE
• REQUEST_STATE
• SET_SESSION_ID"| W2 - WC1 -.->|"WebSocket Commands:
• SUBSCRIBE
• UNSUBSCRIBE
• REQUEST_STATE
• SET_SESSION_ID"| W3 - - %% Master Redis Operations - M -->|"hSet() operations:
• Worker states
• Assignments
• Subscriptions"| WCS - M -->|"hSet() operations:
• Worker→Slave mapping
• Load balancing data"| WAS - M -->|"hSet() operations:
• Subscription details
• Assignment tracking"| CSS - - %% Master to Slave Routing - M -->|"Detection Routing:
{type: 'detection',
workerUrl: string,
data: ImageDetectionResponse,
timestamp: string}"| SC1 - M -->|"Detection Routing:
{type: 'detection',
workerUrl: string,
data: ImageDetectionResponse,
timestamp: string}"| SC2 - M -->|"Detection Routing:
{type: 'detection',
workerUrl: string,
data: ImageDetectionResponse,
timestamp: string}"| SC3 - - M -->|"Assignment Updates:
{type: 'assignments_updated',
assignments: Record,
timestamp: string}"| ACH - - %% Slave to Master Communication - S1 -->|"Slave Commands:
{type: 'subscribe_camera',
subscriptionIdentifier: string,
rtspUrl: string,
modelUrl: string,
modelId: number,
snapshotUrl?: string,
cropX1?: number, ...}"| MC - S2 -->|"Slave Commands:
{type: 'subscribe_camera',
subscriptionIdentifier: string,
rtspUrl: string,
modelUrl: string,
modelId: number,
snapshotUrl?: string,
cropX1?: number, ...}"| MC - S3 -->|"Slave Commands:
{type: 'subscribe_camera',
subscriptionIdentifier: string,
rtspUrl: string,
modelUrl: string,
modelId: number,
snapshotUrl?: string,
cropX1?: number, ...}"| MC - - %% Slave Registration and Heartbeats - S1 -->|"hSet() Slave Registration:
{slaveId: string,
processId: string,
online: boolean,
workload: number,
lastSeen: string,
capabilities: {...}}"| WSS - S2 -->|"hSet() Slave Registration:
{slaveId: string,
processId: string,
online: boolean,
workload: number,
lastSeen: string,
capabilities: {...}}"| WSS - S3 -->|"hSet() Slave Registration:
{slaveId: string,
processId: string,
online: boolean,
workload: number,
lastSeen: string,
capabilities: {...}}"| WSS - - %% Channel Subscriptions - SC1 -->|"Subscribed"| S1 - SC2 -->|"Subscribed"| S2 - SC3 -->|"Subscribed"| S3 - - MC -->|"Subscribed"| M - ACH -->|"Subscribed (All Slaves)"| S1 - ACH -->|"Subscribed (All Slaves)"| S2 - ACH -->|"Subscribed (All Slaves)"| S3 - - style M fill:#ff9999 - style WC1 fill:#ffcc99 - style AS fill:#ffcc99 - style SUB fill:#ffcc99 - style S1 fill:#99ccff - style S2 fill:#99ccff - style S3 fill:#99ccff -``` - -### Channel Message Specification - -| Channel Name | Direction | Message Type | Sender | Receiver | Payload Structure | Purpose | -|--------------|-----------|--------------|---------|-----------|-------------------|---------| -| `worker:slave:{slaveId}` | Master→Slave | `detection` | Master Process | Assigned Slave | `{type: 'detection', workerUrl: string, data: ImageDetectionResponse, timestamp: string}` | Route AI detection results from workers to processing slaves | -| `worker:master:commands` | Slave→Master | `regenerate_subscriptions` | Any Process | Master Process | `{type: 'regenerate_subscriptions', reason: string, triggeredBy: string, timestamp: string}` | Notify master that database changed and subscriptions need regeneration | -| `worker:assignments:changed` | Master→All Slaves | `assignments_updated` | Master Process | All Slave Processes | `{type: 'assignments_updated', assignments: Record, timestamp: string}` | Broadcast worker-to-slave assignment changes for rebalancing | -| `worker:messages:upstream` | Slave→Master | Various | Any Slave Process | Master Process | `{type: string, slaveId: string, data: any, timestamp: string}` | General slave-to-master communication (currently unused) | - -### Redis Hash Storage Specification - -| Redis Key | Data Type | Content | Update Pattern | Cleanup Strategy | -|-----------|-----------|---------|----------------|-------------------| -| `worker:connections` | Hash Map | `{[workerUrl]: JSON.stringify(WorkerConnectionState)}` | Master updates every 2s | Manual cleanup only | -| `worker:assignments` | Hash Map | `{[workerUrl]: slaveId}` | Master updates on rebalancing | Manual cleanup only | -| `worker:camera_subscriptions` | Hash Map | `{[subscriptionId]: JSON.stringify(CameraSubscription)}` | Master on subscription changes | Manual cleanup only | -| `master-election:slaves` | Hash Map | `{[nodeId]: JSON.stringify(SlaveNode)}` | MasterElection service manages | TTL-based cleanup | - -### WebSocket Message Protocol - -| Direction | Message Type | JSON Structure | Trigger | Response Expected | -|-----------|--------------|----------------|---------|-------------------| -| Backend→Worker | `SUBSCRIBE` | `{type: "subscribe", payload: {subscriptionIdentifier, rtspUrl, snapshotUrl?, snapshotInterval?, modelUrl, modelName, modelId, cropX1?, cropY1?, cropX2?, cropY2?}}` | Camera subscription request | STATE_REPORT confirmation | -| Backend→Worker | `UNSUBSCRIBE` | `{type: "unsubscribe", payload: {subscriptionIdentifier}}` | Camera unsubscription | STATE_REPORT confirmation | -| Backend→Worker | `REQUEST_STATE` | `{type: "requestState"}` | Health check or monitoring | STATE_REPORT response | -| Backend→Worker | `SET_SESSION_ID` | `{type: "setSessionId", payload: {displayIdentifier, sessionId}}` | Associate session with display | None | -| Backend→Worker | `PATCH_SESSION_RESULT` | `{type: "patchSessionResult", payload: {sessionId, success, message?}}` | Session update response | None | -| Worker→Backend | `IMAGE_DETECTION` | `{type: "imageDetection", subscriptionIdentifier, timestamp, data: {detection: {carModel?, carBrand?, carYear?, bodyType?, licensePlateText?, licensePlateType?}, modelId, modelName}}` | AI detection result | None | -| Worker→Backend | `STATE_REPORT` | `{type: "stateReport", cpuUsage, memoryUsage, gpuUsage?, gpuMemoryUsage?, cameraConnections: [{subscriptionIdentifier, modelId, modelName, online, cropX?, cropY?}]}` | Periodic health report (every 2s) | None | -| Worker→Backend | `PATCH_SESSION` | `{type: "patchSession", sessionId, data: any}` | Session data update from ML processing | PATCH_SESSION_RESULT | - -## Event System Architecture - -### Event Flow Hierarchy - -```mermaid -graph TD - subgraph "Service Layer" - CS[CameraService] - end - - subgraph "Cluster Layer" - DC[DetectorCluster] - DC --> DCE[Detection Events] - DC --> WOE[Worker Online Events] - DC --> WOFE[Worker Offline Events] - end - - subgraph "Worker Management Layer" - MS[MasterSlaveWorkerCluster] - MS --> DE[detection] - MS --> WC[worker:connected] - MS --> WD[worker:disconnected] - MS --> WSE[worker:websocket_error] - MS --> WON[worker:online] - MS --> WOFF[worker:offline] - MS --> WSR[worker:state_report] - end - - subgraph "Connection Layer" - DConn[DetectorConnection] - DConn --> IMG[IMAGE_DETECTION] - DConn --> STATE[STATE_REPORT] - DConn --> PATCH[PATCH_SESSION] - end - - DConn --> MS - MS --> DC - DC --> CS - - IMG -.-> DE - STATE -.-> WSR - WC -.-> WOE - WD -.-> WOFE -``` - -### Message Types and Routing - -#### WebSocket Message Types (Python Worker → Backend) -- `IMAGE_DETECTION`: AI detection results from camera streams -- `STATE_REPORT`: Worker health, resource usage, and subscription status -- `PATCH_SESSION`: Session data updates from worker processing - -#### Redis Channel Message Types -- `detection`: Detection results routed from master to assigned slave -- `command_response`: Command acknowledgment and status updates -- `heartbeat`: Worker and slave health monitoring messages -- `assignments_updated`: Worker-to-slave assignment change notifications - -#### Internal Event Types -- `worker:online`: Worker connection established and ready -- `worker:offline`: Worker connection lost or health check failed -- `worker:connected`: WebSocket connection opened (not necessarily ready) -- `worker:disconnected`: WebSocket connection closed -- `worker:websocket_error`: WebSocket communication errors -- `worker:detection_result`: Processed detection with metadata -- `worker:state_report`: Worker resource and subscription status - -## Subscription Management - -### Camera Subscription Flow - -1. **Registration Phase** - - `CameraService.subscribeToCamera()` → `DetectorCluster.subscribeToCamera()` - - Master process finds optimal worker using load balancing algorithm - - Subscription stored in Redis with full configuration including crop parameters - - Master sends WebSocket SUBSCRIBE command to assigned worker - -2. **Processing Phase** - - Python worker establishes RTSP connection to camera - - Worker performs AI inference on video stream frames - - Detection results sent back via WebSocket with subscription identifier - - Master routes results to appropriate slave based on worker assignments - -3. **Rebalancing Phase** - - Master monitors worker health every 30 seconds - - Orphaned subscriptions (offline workers) automatically detected - - Load balancing algorithm reassigns cameras to healthy workers - - Fresh model URLs generated to handle S3 presigned URL expiration - -### Load Balancing Algorithm - -```typescript -// Simplified load balancing logic -function findBestWorkerForSubscription(onlineWorkers, allSubscriptions) { - return onlineWorkers - .sort((a, b) => { - const loadA = getSubscriptionCount(a.url); - const loadB = getSubscriptionCount(b.url); - if (loadA !== loadB) { - return loadA - loadB; // Prefer lower load - } - return (a.cpuUsage || 0) - (b.cpuUsage || 0); // Then prefer lower CPU - })[0]; -} -``` - -### Automatic Failover Process - -1. **Detection**: Master detects worker offline via missed heartbeats (10s timeout) -2. **Identification**: System identifies all camera subscriptions assigned to offline worker -3. **Reassignment**: Load balancer selects optimal replacement worker -4. **Migration**: Subscription updated in Redis with new worker assignment -5. **Resubscription**: Master sends SUBSCRIBE command to new worker with fresh model URL -6. **Verification**: New worker confirms subscription and begins processing - -## Resource Management - -### Connection Pooling -- Master maintains persistent WebSocket connections to all configured workers -- Connection sharing across all backend processes reduces resource overhead -- Automatic reconnection with exponential backoff prevents connection storms - -### Memory Management -- Redis data uses manual cleanup to prevent accidental state loss -- Subscription callbacks stored in local memory with automatic cleanup on unsubscribe -- Worker resource usage tracked in real-time to prevent overload - -### CPU and GPU Monitoring -- Workers report resource usage every 2 seconds via STATE_REPORT messages -- Load balancing algorithm considers CPU usage when assigning new subscriptions -- GPU utilization tracked for ML model optimization and capacity planning - -## Error Handling - -### Connection Error Recovery -- **Exponential Backoff**: 10-second fixed interval reconnection attempts -- **Circuit Breaker**: Automatic failover prevents overwhelming failed workers -- **Graceful Degradation**: System continues operating with available workers - -### Master Election Failover -- **Leadership Transfer**: New master elected via Redis-based coordination -- **State Recovery**: Worker connections and subscriptions restored from Redis persistence -- **Seamless Transition**: No subscription loss during master failover process - -### Monitoring and Observability - -#### Structured Logging Topics -- `detector-cluster`: High-level cluster operations and state changes -- `master-slave-worker-cluster`: Worker assignment and rebalancing operations -- `DetectorConnection`: WebSocket connection events and message processing - -#### Monitoring Information -- Subscription identifier format: `${displayId};${cameraId}` for traceability -- Worker assignment tracking with process ID and timestamp correlation -- Redis pub/sub message routing with structured logging -- Heartbeat and health check timing with millisecond precision - -## Configuration Parameters - -### Timing Configuration -```typescript -const WORKER_TIMEOUT_MS = 10000; // Worker heartbeat timeout -const SLAVE_HEARTBEAT_INTERVAL = 5000; // Slave heartbeat frequency -const SLAVE_TIMEOUT = 15000; // Slave registration timeout -const REBALANCE_INTERVAL = 30000; // Automatic rebalancing frequency -const STATE_UPDATE_INTERVAL = 2000; // Worker state update frequency -const RECONNECT_DELAY = 10000; // WebSocket reconnection delay -``` - -### Environment Variables -```bash -DETECTOR_WORKERS=ws://worker1:8000,ws://worker2:8000 # Python worker URLs -REDIS_HOST=localhost # Redis coordination server -REDIS_PORT=6379 # Redis server port -REDIS_PASSWORD=secure_password # Redis authentication -DETECT_DEBUG=true # Enable detailed structured logging -``` - -## Performance Characteristics - -### Scalability Metrics -- **Horizontal Scaling**: Add backend processes without WebSocket connection changes -- **Worker Scaling**: Python ML workers scale independently of backend processes -- **Redis Optimization**: Efficient pub/sub routing with minimal memory overhead - -### Throughput Capabilities -- **Camera Subscriptions**: Support for 100+ simultaneous camera streams per worker -- **Detection Processing**: Sub-second AI inference with real-time result delivery -- **Message Routing**: Sub-millisecond Redis pub/sub message delivery - -### Resource Efficiency -- **Connection Multiplexing**: Single WebSocket per worker shared across all processes -- **Memory Usage**: Lightweight subscription state with callback cleanup -- **Network Optimization**: Binary WebSocket frames with JSON payload compression - -## Public Interface Specification - -The distributed worker cluster exposes a clean, simplified interface to external services like CameraService, hiding the complexity of the underlying master-slave architecture. All interactions go through the `DetectorCluster` class, which serves as the primary facade. - -### Primary Interface: DetectorCluster - -The `DetectorCluster` class in `/services/DetectorCluster.ts` provides the main public interface that external services interact with. It abstracts away the distributed architecture complexity and provides consistent behavior regardless of whether the current process is a master or slave. - -#### Core Interface Methods - -##### Camera Subscription Management - -```typescript -/** - * Subscribe to a camera stream for AI detection processing - * @param subscriptionIdentifier - Unique identifier format: "${displayId};${cameraId}" - * @param rtspUrl - RTSP stream URL for the camera - * @param modelUrl - Pre-signed S3 URL for AI model (1hr TTL) - * @param modelId - Database ID of the AI model - * @param modelName - Human-readable model identifier - * @param callback - Function called when detection results are received - * @param snapshotUrl - Optional HTTP endpoint for camera snapshots - * @param snapshotInterval - Optional snapshot capture interval in milliseconds - * @param cropX1, cropY1, cropX2, cropY2 - Optional image crop coordinates - * @returns Promise - Always returns true (errors thrown as exceptions) - */ -public async subscribeToCamera( - subscriptionIdentifier: string, - rtspUrl: string, - modelUrl: string, - modelId: number, - modelName: string, - callback: Function, - snapshotUrl?: string, - snapshotInterval?: number, - cropX1?: number, - cropY1?: number, - cropX2?: number, - cropY2?: number -): Promise -``` - -**Behavior:** -- **Master Process**: Stores subscription in Redis, assigns to optimal worker, sends WebSocket command -- **Slave Process**: Routes subscription request to master via Redis pub/sub -- **Callback Registration**: Stores callback locally for detection result processing -- **Persistence**: All subscription details stored in Redis for failover recovery -- **Load Balancing**: Automatically selects best available worker based on CPU and subscription load - -```typescript -/** - * Unsubscribe from a specific camera stream - * @param subscriptionIdentifier - The subscription to remove - * @returns Promise - Success status - */ -public async unsubscribeFromCamera(subscriptionIdentifier: string): Promise -``` - -**Behavior:** -- Removes local callback listeners immediately -- Subscription cleanup handled automatically by cluster rebalancing -- Safe to call multiple times (idempotent operation) - -```typescript -/** - * Remove all subscriptions for a specific camera across all displays - * @param cameraIdentifier - The camera ID to unsubscribe from all displays - * @returns Promise - */ -public async unsubscribeFromAllWithCameraID(cameraIdentifier: string): Promise -``` - -**Behavior:** -- Finds all subscription identifiers matching pattern `*;${cameraIdentifier}` -- Removes all local callbacks for matched subscriptions -- Cluster automatically handles worker-side cleanup - -##### Event Registration and Callbacks - -```typescript -/** - * Register a callback for detection results from a specific subscription - * @param subscriptionIdentifier - Target subscription - * @param callback - Function to call with detection data - */ -public addDetectionListener(subscriptionIdentifier: string, callback: Function): void - -/** - * Register a global callback for all detection results - * @param callback - Function to call with any detection data - */ -public addGlobalDetectionListener(callback: Function): void -``` - -**Detection Callback Signature:** -```typescript -type DetectionCallback = (data: { - subscriptionIdentifier: string; - timestamp: Date; - data: { - detection: { - carModel?: string; - carBrand?: string; - carYear?: number; - bodyType?: string; - licensePlateText?: string; - licensePlateType?: string; - }; - modelId: number; - modelName: string; - }; -}) => void; -``` - -##### Cluster State Management - -```typescript -/** - * Get comprehensive cluster state for monitoring and status reporting - * @returns Promise - */ -public async getState(): Promise - -/** - * Legacy method - rebalancing now happens automatically - * @returns Promise - Always returns true - */ -public async rebalanceWorkers(): Promise -``` - -**DetectorClusterState Interface:** -```typescript -interface DetectorClusterState { - processId: string; // Current process identifier - isMaster: boolean; // Whether this process is the master - slaveId: string; // This process's slave identifier - totalWorkers: number; // Number of Python ML workers - totalSlaves: number; // Number of backend slave processes - workers: WorkerState[]; // Detailed worker health and status - slaves: SlaveInfo[]; // Slave process information - assignments: Record; // workerUrl -> slaveId mapping -} -``` - -##### Session Management (Future Implementation) - -```typescript -/** - * Associate a session ID with a camera subscription for tracking - * @param subscriptionIdentifier - Target subscription - * @param sessionId - Session ID to associate (null to clear) - * @returns Promise - Success status - */ -public async setSessionId(subscriptionIdentifier: string, sessionId: number | null): Promise - -/** - * Get current camera image via worker REST API - * @param cameraIdentifier - Camera to capture from - * @returns Promise - JPEG image data - */ -public async getCameraImage(cameraIdentifier: string): Promise -``` - -**Note:** These methods are currently not fully implemented in master-slave mode. - -### Event System Interface - -The cluster emits events that external services can listen to for system monitoring and integration: - -#### Emitted Events - -```typescript -// Detection result processed -detectorCluster.on('worker:detection_result', (event: { - url: string; // Worker URL (always 'cluster-managed') - cameraId: string; // Subscription identifier - detections: number; // Number of objects detected (0 or 1) -}) => void); - -// Worker status changes -detectorCluster.on('worker:online', (event: { url: string }) => void); -detectorCluster.on('worker:offline', (event: { url: string }) => void); - -// Connection events -detectorCluster.on('worker:connecting', (event: { url: string }) => void); -detectorCluster.on('worker:disconnected', (event: { url: string, reason: string }) => void); -detectorCluster.on('worker:websocket_error', (event: { url: string, error: string }) => void); -``` - -### Usage Examples - -#### Basic Camera Subscription (CameraService Integration) - -```typescript -import { detectorCluster } from '~/modules/camera/services/CameraService'; - -// Subscribe to camera with AI detection -const success = await detectorCluster.subscribeToCamera( - `display-123;camera-456`, // subscriptionIdentifier - 'rtsp://192.168.1.100:554/stream1', // rtspUrl - 'https://s3.bucket.com/model.onnx', // modelUrl (pre-signed) - 42, // modelId - 'vehicle-detection-v2', // modelName - (detectionData) => { // callback - console.log('Detection:', detectionData.data.detection); - // Process car model, license plate, etc. - }, - 'http://192.168.1.100/snapshot.jpg', // snapshotUrl (optional) - 5000, // snapshotInterval (optional) - 100, 50, 800, 600 // crop coordinates (optional) -); -``` - -#### Event Monitoring Integration - -```typescript -// Monitor worker health -detectorCluster.on('worker:online', (event) => { - console.log(`Worker ${event.url} came online`); - // Update dashboard, send notifications, etc. -}); - -detectorCluster.on('worker:offline', (event) => { - console.log(`Worker ${event.url} went offline`); - // Alert administrators, trigger failover procedures -}); - -// Monitor detection activity -detectorCluster.on('worker:detection_result', (event) => { - if (event.detections > 0) { - console.log(`Camera ${event.cameraId} detected objects`); - // Trigger content changes, log analytics, etc. - } -}); -``` - -#### Cluster State Monitoring - -```typescript -// Get comprehensive cluster status -const state = await detectorCluster.getState(); - -console.log(`Process ${state.processId} is ${state.isMaster ? 'MASTER' : 'SLAVE'}`); -console.log(`Cluster: ${state.totalWorkers} workers, ${state.totalSlaves} slaves`); - -// Monitor worker health -state.workers.forEach(worker => { - console.log(`Worker ${worker.url}: ${worker.online ? 'ONLINE' : 'OFFLINE'}`); - console.log(` CPU: ${worker.cpuUsage}%, Memory: ${worker.memoryUsage}%`); - console.log(` Subscriptions: ${worker.subscriptionCount}`); -}); - -// Check assignments -Object.entries(state.assignments).forEach(([workerUrl, slaveId]) => { - console.log(`Worker ${workerUrl} assigned to slave ${slaveId}`); -}); -``` - -#### Bulk Camera Management - -```typescript -// Remove all subscriptions for a camera being deleted -await detectorCluster.unsubscribeFromAllWithCameraID('camera-456'); - -// Re-subscribe camera to all displays after configuration change -const displays = await getDisplaysForCamera('camera-456'); -for (const display of displays) { - await detectorCluster.subscribeToCamera( - `${display.id};camera-456`, - camera.rtspUrl, - freshModelUrl, - modelId, - modelName, - createDetectionHandler(display.id, camera.id), - camera.snapshotUrl, - camera.snapshotInterval, - display.cropX1, display.cropY1, - display.cropX2, display.cropY2 - ); -} -``` - -### Error Handling Interface - -The cluster interface follows consistent error handling patterns: - -#### Exception Types - -```typescript -// Subscription errors -try { - await detectorCluster.subscribeToCamera(...); -} catch (error) { - // Possible errors: - // - "No workers available for assignment" - // - "Invalid subscription identifier format" - // - "Model URL expired or inaccessible" - // - Redis connection errors -} - -// State retrieval errors -try { - const state = await detectorCluster.getState(); -} catch (error) { - // Returns safe default state on errors - // Logs detailed error information -} -``` - -#### Graceful Degradation - -- **No Workers Available**: Subscriptions stored in Redis, will activate when workers come online -- **Master Process Failure**: New master elected, all subscriptions restored from Redis -- **Redis Connection Issues**: Local callbacks continue working, subscriptions restored when connection recovers -- **Invalid Parameters**: Clear error messages with parameter validation - -### Integration Patterns - -#### Service Layer Integration - -```typescript -// CameraService.ts example -export class CameraService { - constructor() { - // Initialize cluster connection - detectorCluster.initialize(); - - // Set up global detection processing - detectorCluster.addGlobalDetectionListener(this.processDetection.bind(this)); - } - - async subscribeCamera(displayId: string, camera: CameraEntity) { - const subscriptionId = `${displayId};${camera.cameraIdentifier}`; - - return await detectorCluster.subscribeToCamera( - subscriptionId, - camera.rtspUrl, - await this.getModelUrl(camera.modelId), - camera.modelId, - camera.modelName, - (data) => this.handleDetection(displayId, camera.id, data), - camera.snapshotUrl, - camera.snapshotInterval, - camera.cropX1, camera.cropY1, - camera.cropX2, camera.cropY2 - ); - } - - private processDetection(data: ImageDetectionResponse) { - // Global detection processing logic - this.updateAnalytics(data); - this.triggerDecisionTrees(data); - } -} -``` - -### Interface Guarantees and Contracts - -#### Reliability Guarantees - -- **At-Least-Once Detection Delivery**: Detection callbacks will be called at least once per detection -- **Subscription Persistence**: Subscriptions survive process restarts and master failovers -- **Automatic Reconnection**: Workers automatically reconnect with exponential backoff -- **Load Balancing**: New subscriptions automatically assigned to least loaded workers - -#### Performance Characteristics - -- **Subscription Latency**: < 100ms for new camera subscriptions -- **Detection Latency**: < 50ms from worker to callback (excluding AI processing time) -- **State Query Performance**: < 10ms for cluster state retrieval -- **Memory Usage**: O(n) where n = number of active subscriptions - -#### Thread Safety - -- **Callback Execution**: All callbacks executed on main event loop (Node.js single-threaded) -- **Concurrent Subscriptions**: Multiple simultaneous subscriptions handled safely -- **State Consistency**: Redis operations use atomic transactions where needed - -This interface specification provides external services with a clear understanding of how to integrate with the distributed worker cluster while maintaining abstraction from the underlying complexity. - -## Architecture Evolution: From Complex to Pure Declarative - -### Previous Architecture Limitations (Addressed) -- **Complex State Synchronization**: Incremental updates between database, Redis desired state, and worker actual state created synchronization complexity -- **Command Protocol Complexity**: Multiple command types (`subscribe_camera`, `unsubscribe_camera`) with complex payloads and error handling -- **State Divergence**: Database and Redis desired state could diverge, causing inconsistent behavior -- **Partial Update Complexity**: Complex logic for handling individual subscription changes led to edge cases and race conditions -- **Service Layer Complexity**: Camera/Display services contained complex subscription management logic - -### Current Pure Declarative Architecture Benefits -- **Single Source of Truth**: Database is the only source for desired state - no secondary state stores to synchronize -- **Zero State Divergence**: Desired state is always freshly derived from database queries, eliminating synchronization complexity -- **Simplified Protocol**: Only one command type (`regenerate_subscriptions`) with minimal payload -- **Consistent State Management**: Complete regeneration eliminates all edge cases and partial update complexity -- **Service Layer Simplicity**: Services just update database + trigger regeneration - no subscription logic -- **Operational Resilience**: System is self-healing and predictable - any database change triggers complete reconciliation - -### VMware DRS-like Benefits -- **Global Optimization**: Every regeneration considers all subscriptions globally for optimal load balancing -- **Automatic Recovery**: System automatically heals from any inconsistent state by regenerating from database -- **Resource Efficiency**: Workers assigned based on real-time CPU/memory metrics with load balancing -- **Fault Tolerance**: Complete state recovery from database after any failure (process crashes, network interruptions, etc.) - -### Performance Characteristics -- **Regeneration Speed**: Database queries are fast (~10ms) even with hundreds of displays -- **Reconciliation Efficiency**: Only changed subscriptions are actually modified on workers -- **Memory Efficiency**: No persistent state storage outside of database and current worker assignments -- **Network Efficiency**: Minimal command protocol reduces Redis pub/sub overhead - -This pure declarative architecture provides the reliability and simplicity of Kubernetes-style declarative resource management while maintaining the performance and scalability needed for real-time camera processing systems. \ No newline at end of file diff --git a/feeder/note.txt b/feeder/note.txt new file mode 100644 index 0000000..d3b0ef0 --- /dev/null +++ b/feeder/note.txt @@ -0,0 +1 @@ +python simple_track.py --source video/sample.mp4 --show-vid --save-vid --enable-json-log \ No newline at end of file diff --git a/feeder/sender/__init__.py b/feeder/sender/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/feeder/sender/base.py b/feeder/sender/base.py new file mode 100644 index 0000000..8824dfe --- /dev/null +++ b/feeder/sender/base.py @@ -0,0 +1,21 @@ + +import numpy as np +import json + +class NumpyArrayEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.floating): + return float(obj) + elif isinstance(obj, np.ndarray): + return obj.tolist() + else: + return super(NumpyArrayEncoder, self).default(obj) + +class BasSender: + def __init__(self) -> None: + pass + + def send(self, messages): + raise NotImplementedError() \ No newline at end of file diff --git a/feeder/sender/jsonlogger.py b/feeder/sender/jsonlogger.py new file mode 100644 index 0000000..63200cf --- /dev/null +++ b/feeder/sender/jsonlogger.py @@ -0,0 +1,13 @@ +from .base import BasSender +from loguru import logger +import json +from .base import NumpyArrayEncoder + +class JsonLogger(BasSender): + def __init__(self, log_filename:str = "tracking.log") -> None: + super().__init__() + self.logger = logger + self.logger.add(log_filename, format="{message}", level="INFO") + + def send(self, messages): + self.logger.info(json.dumps(messages, cls=NumpyArrayEncoder)) \ No newline at end of file diff --git a/feeder/sender/szmq.py b/feeder/sender/szmq.py new file mode 100644 index 0000000..059c81a --- /dev/null +++ b/feeder/sender/szmq.py @@ -0,0 +1,14 @@ +from .base import BasSender, NumpyArrayEncoder +import zmq +import json + + +class ZmqLogger(BasSender): + def __init__(self, ip_addr:str = "localhost", port:int = 5555) -> None: + super().__init__() + self.context = zmq.Context() + self.producer = self.context.socket(zmq.PUB) + self.producer.connect(f"tcp://{ip_addr}:{port}") + + def send(self, messages): + self.producer.send_string(json.dumps(messages, cls = NumpyArrayEncoder)) \ No newline at end of file diff --git a/feeder/simple_track.py b/feeder/simple_track.py new file mode 100644 index 0000000..a8bf61c --- /dev/null +++ b/feeder/simple_track.py @@ -0,0 +1,245 @@ +import argparse +import cv2 +import os +os.environ["OMP_NUM_THREADS"] = "1" +os.environ["OPENBLAS_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" +os.environ["VECLIB_MAXIMUM_THREADS"] = "1" +os.environ["NUMEXPR_NUM_THREADS"] = "1" + +import sys +import numpy as np +from pathlib import Path +import torch + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] +WEIGHTS = ROOT / 'weights' + +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) +if str(ROOT / 'trackers' / 'strongsort') not in sys.path: + sys.path.append(str(ROOT / 'trackers' / 'strongsort')) + +from ultralytics.nn.autobackend import AutoBackend +from ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages +from ultralytics.yolo.data.utils import VID_FORMATS +from ultralytics.yolo.utils import LOGGER, colorstr +from ultralytics.yolo.utils.checks import check_file, check_imgsz +from ultralytics.yolo.utils.files import increment_path +from ultralytics.yolo.utils.torch_utils import select_device +from ultralytics.yolo.utils.ops import Profile, non_max_suppression, scale_boxes +from ultralytics.yolo.utils.plotting import Annotator, colors + +from trackers.multi_tracker_zoo import create_tracker +from sender.jsonlogger import JsonLogger +from sender.szmq import ZmqLogger + +@torch.no_grad() +def run( + source='0', + yolo_weights=WEIGHTS / 'yolov8n.pt', + reid_weights=WEIGHTS / 'osnet_x0_25_msmt17.pt', + imgsz=(640, 640), + conf_thres=0.7, + iou_thres=0.45, + max_det=1000, + device='', + show_vid=True, + save_vid=True, + project=ROOT / 'runs' / 'track', + name='exp', + exist_ok=False, + line_thickness=2, + hide_labels=False, + hide_conf=False, + half=False, + vid_stride=1, + enable_json_log=False, + enable_zmq=False, + zmq_ip='localhost', + zmq_port=5555, +): + source = str(source) + is_file = Path(source).suffix[1:] in (VID_FORMATS) + + if is_file: + source = check_file(source) + + device = select_device(device) + + model = AutoBackend(yolo_weights, device=device, dnn=False, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_imgsz(imgsz, stride=stride) + + dataset = LoadImages( + source, + imgsz=imgsz, + stride=stride, + auto=pt, + transforms=getattr(model.model, 'transforms', None), + vid_stride=vid_stride + ) + bs = len(dataset) + + tracking_config = ROOT / 'trackers' / 'strongsort' / 'configs' / 'strongsort.yaml' + tracker = create_tracker('strongsort', tracking_config, reid_weights, device, half) + + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) + (save_dir / 'tracks').mkdir(parents=True, exist_ok=True) + + # Initialize loggers + json_logger = JsonLogger(f"{source}-strongsort.log") if enable_json_log else None + zmq_logger = ZmqLogger(zmq_ip, zmq_port) if enable_zmq else None + + vid_path, vid_writer = [None] * bs, [None] * bs + dt = (Profile(), Profile(), Profile()) + + for frame_idx, (path, im, im0s, vid_cap, s) in enumerate(dataset): + + with dt[0]: + im = torch.from_numpy(im).to(model.device) + im = im.half() if model.fp16 else im.float() + im /= 255.0 + if len(im.shape) == 3: + im = im[None] + + with dt[1]: + pred = model(im, augment=False, visualize=False) + + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, None, False, max_det=max_det) + + for i, det in enumerate(pred): + seen = 0 + p, im0, _ = path, im0s.copy(), dataset.count + p = Path(p) + + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) + + if len(det): + # Filter detections for 'car' class only (class 2 in COCO dataset) + car_mask = det[:, 5] == 2 # car class index is 2 + det = det[car_mask] + + if len(det): + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() + + for *xyxy, conf, cls in reversed(det): + c = int(cls) + id = f'{c}' + label = None if hide_labels else (f'{id} {names[c]}' if hide_conf else f'{id} {names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + + t_outputs = tracker.update(det.cpu(), im0) + + if len(t_outputs) > 0: + for j, (output) in enumerate(t_outputs): + bbox = output[0:4] + id = output[4] + cls = output[5] + conf = output[6] + + # Log tracking data + if json_logger or zmq_logger: + track_data = { + 'bbox': bbox.tolist() if hasattr(bbox, 'tolist') else list(bbox), + 'id': int(id), + 'cls': int(cls), + 'conf': float(conf), + 'frame_idx': frame_idx, + 'source': source, + 'class_name': names[int(cls)] + } + + if json_logger: + json_logger.send(track_data) + if zmq_logger: + zmq_logger.send(track_data) + + if save_vid or show_vid: + c = int(cls) + id = int(id) + label = f'{id} {names[c]}' if not hide_labels else f'{id}' + if not hide_conf: + label += f' {conf:.2f}' + annotator.box_label(bbox, label, color=colors(c, True)) + + im0 = annotator.result() + + if show_vid: + cv2.imshow(str(p), im0) + if cv2.waitKey(1) == ord('q'): + break + + if save_vid: + if vid_path[i] != str(save_dir / p.name): + vid_path[i] = str(save_dir / p.name) + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() + + if vid_cap: + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: + fps, w, h = 30, im0.shape[1], im0.shape[0] + + vid_writer[i] = cv2.VideoWriter(vid_path[i], cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + + vid_writer[i].write(im0) + + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + + for i, vid_writer_obj in enumerate(vid_writer): + if isinstance(vid_writer_obj, cv2.VideoWriter): + vid_writer_obj.release() + + cv2.destroyAllWindows() + + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--source', type=str, default='0', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--yolo-weights', nargs='+', type=str, default=WEIGHTS / 'yolov8n.pt', help='model path') + parser.add_argument('--reid-weights', type=str, default=WEIGHTS / 'osnet_x0_25_msmt17.pt') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.7, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--show-vid', action='store_true', help='display results') + parser.add_argument('--save-vid', action='store_true', help='save video tracking results') + parser.add_argument('--project', default=ROOT / 'runs' / 'track', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=2, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + parser.add_argument('--enable-json-log', action='store_true', help='enable JSON file logging') + parser.add_argument('--enable-zmq', action='store_true', help='enable ZMQ messaging') + parser.add_argument('--zmq-ip', type=str, default='localhost', help='ZMQ server IP') + parser.add_argument('--zmq-port', type=int, default=5555, help='ZMQ server port') + + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 + return opt + +def main(opt): + run(**vars(opt)) + +if __name__ == "__main__": + opt = parse_opt() + main(opt) \ No newline at end of file diff --git a/feeder/trackers/__init__.py b/feeder/trackers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/feeder/trackers/botsort/basetrack.py b/feeder/trackers/botsort/basetrack.py new file mode 100644 index 0000000..c8d4c15 --- /dev/null +++ b/feeder/trackers/botsort/basetrack.py @@ -0,0 +1,60 @@ +import numpy as np +from collections import OrderedDict + + +class TrackState(object): + New = 0 + Tracked = 1 + Lost = 2 + LongLost = 3 + Removed = 4 + + +class BaseTrack(object): + _count = 0 + + track_id = 0 + is_activated = False + state = TrackState.New + + history = OrderedDict() + features = [] + curr_feature = None + score = 0 + start_frame = 0 + frame_id = 0 + time_since_update = 0 + + # multi-camera + location = (np.inf, np.inf) + + @property + def end_frame(self): + return self.frame_id + + @staticmethod + def next_id(): + BaseTrack._count += 1 + return BaseTrack._count + + def activate(self, *args): + raise NotImplementedError + + def predict(self): + raise NotImplementedError + + def update(self, *args, **kwargs): + raise NotImplementedError + + def mark_lost(self): + self.state = TrackState.Lost + + def mark_long_lost(self): + self.state = TrackState.LongLost + + def mark_removed(self): + self.state = TrackState.Removed + + @staticmethod + def clear_count(): + BaseTrack._count = 0 diff --git a/feeder/trackers/botsort/bot_sort.py b/feeder/trackers/botsort/bot_sort.py new file mode 100644 index 0000000..1144c17 --- /dev/null +++ b/feeder/trackers/botsort/bot_sort.py @@ -0,0 +1,534 @@ +import cv2 +import matplotlib.pyplot as plt +import numpy as np +from collections import deque + +from trackers.botsort import matching +from trackers.botsort.gmc import GMC +from trackers.botsort.basetrack import BaseTrack, TrackState +from trackers.botsort.kalman_filter import KalmanFilter + +# from fast_reid.fast_reid_interfece import FastReIDInterface + +from reid_multibackend import ReIDDetectMultiBackend +from ultralytics.yolo.utils.ops import xyxy2xywh, xywh2xyxy + + +class STrack(BaseTrack): + shared_kalman = KalmanFilter() + + def __init__(self, tlwh, score, cls, feat=None, feat_history=50): + + # wait activate + self._tlwh = np.asarray(tlwh, dtype=np.float32) + self.kalman_filter = None + self.mean, self.covariance = None, None + self.is_activated = False + + self.cls = -1 + self.cls_hist = [] # (cls id, freq) + self.update_cls(cls, score) + + self.score = score + self.tracklet_len = 0 + + self.smooth_feat = None + self.curr_feat = None + if feat is not None: + self.update_features(feat) + self.features = deque([], maxlen=feat_history) + self.alpha = 0.9 + + def update_features(self, feat): + feat /= np.linalg.norm(feat) + self.curr_feat = feat + if self.smooth_feat is None: + self.smooth_feat = feat + else: + self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat + self.features.append(feat) + self.smooth_feat /= np.linalg.norm(self.smooth_feat) + + def update_cls(self, cls, score): + if len(self.cls_hist) > 0: + max_freq = 0 + found = False + for c in self.cls_hist: + if cls == c[0]: + c[1] += score + found = True + + if c[1] > max_freq: + max_freq = c[1] + self.cls = c[0] + if not found: + self.cls_hist.append([cls, score]) + self.cls = cls + else: + self.cls_hist.append([cls, score]) + self.cls = cls + + def predict(self): + mean_state = self.mean.copy() + if self.state != TrackState.Tracked: + mean_state[6] = 0 + mean_state[7] = 0 + + self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) + + @staticmethod + def multi_predict(stracks): + if len(stracks) > 0: + multi_mean = np.asarray([st.mean.copy() for st in stracks]) + multi_covariance = np.asarray([st.covariance for st in stracks]) + for i, st in enumerate(stracks): + if st.state != TrackState.Tracked: + multi_mean[i][6] = 0 + multi_mean[i][7] = 0 + multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance) + for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): + stracks[i].mean = mean + stracks[i].covariance = cov + + @staticmethod + def multi_gmc(stracks, H=np.eye(2, 3)): + if len(stracks) > 0: + multi_mean = np.asarray([st.mean.copy() for st in stracks]) + multi_covariance = np.asarray([st.covariance for st in stracks]) + + R = H[:2, :2] + R8x8 = np.kron(np.eye(4, dtype=float), R) + t = H[:2, 2] + + for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): + mean = R8x8.dot(mean) + mean[:2] += t + cov = R8x8.dot(cov).dot(R8x8.transpose()) + + stracks[i].mean = mean + stracks[i].covariance = cov + + def activate(self, kalman_filter, frame_id): + """Start a new tracklet""" + self.kalman_filter = kalman_filter + self.track_id = self.next_id() + + self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xywh(self._tlwh)) + + self.tracklet_len = 0 + self.state = TrackState.Tracked + if frame_id == 1: + self.is_activated = True + self.frame_id = frame_id + self.start_frame = frame_id + + def re_activate(self, new_track, frame_id, new_id=False): + + self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xywh(new_track.tlwh)) + if new_track.curr_feat is not None: + self.update_features(new_track.curr_feat) + self.tracklet_len = 0 + self.state = TrackState.Tracked + self.is_activated = True + self.frame_id = frame_id + if new_id: + self.track_id = self.next_id() + self.score = new_track.score + + self.update_cls(new_track.cls, new_track.score) + + def update(self, new_track, frame_id): + """ + Update a matched track + :type new_track: STrack + :type frame_id: int + :type update_feature: bool + :return: + """ + self.frame_id = frame_id + self.tracklet_len += 1 + + new_tlwh = new_track.tlwh + + self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xywh(new_tlwh)) + + if new_track.curr_feat is not None: + self.update_features(new_track.curr_feat) + + self.state = TrackState.Tracked + self.is_activated = True + + self.score = new_track.score + self.update_cls(new_track.cls, new_track.score) + + @property + def tlwh(self): + """Get current position in bounding box format `(top left x, top left y, + width, height)`. + """ + if self.mean is None: + return self._tlwh.copy() + ret = self.mean[:4].copy() + ret[:2] -= ret[2:] / 2 + return ret + + @property + def tlbr(self): + """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., + `(top left, bottom right)`. + """ + ret = self.tlwh.copy() + ret[2:] += ret[:2] + return ret + + @property + def xywh(self): + """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., + `(top left, bottom right)`. + """ + ret = self.tlwh.copy() + ret[:2] += ret[2:] / 2.0 + return ret + + @staticmethod + def tlwh_to_xyah(tlwh): + """Convert bounding box to format `(center x, center y, aspect ratio, + height)`, where the aspect ratio is `width / height`. + """ + ret = np.asarray(tlwh).copy() + ret[:2] += ret[2:] / 2 + ret[2] /= ret[3] + return ret + + @staticmethod + def tlwh_to_xywh(tlwh): + """Convert bounding box to format `(center x, center y, width, + height)`. + """ + ret = np.asarray(tlwh).copy() + ret[:2] += ret[2:] / 2 + return ret + + def to_xywh(self): + return self.tlwh_to_xywh(self.tlwh) + + @staticmethod + def tlbr_to_tlwh(tlbr): + ret = np.asarray(tlbr).copy() + ret[2:] -= ret[:2] + return ret + + @staticmethod + def tlwh_to_tlbr(tlwh): + ret = np.asarray(tlwh).copy() + ret[2:] += ret[:2] + return ret + + def __repr__(self): + return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame) + + +class BoTSORT(object): + def __init__(self, + model_weights, + device, + fp16, + track_high_thresh:float = 0.45, + new_track_thresh:float = 0.6, + track_buffer:int = 30, + match_thresh:float = 0.8, + proximity_thresh:float = 0.5, + appearance_thresh:float = 0.25, + cmc_method:str = 'sparseOptFlow', + frame_rate=30, + lambda_=0.985 + ): + + self.tracked_stracks = [] # type: list[STrack] + self.lost_stracks = [] # type: list[STrack] + self.removed_stracks = [] # type: list[STrack] + BaseTrack.clear_count() + + self.frame_id = 0 + + self.lambda_ = lambda_ + self.track_high_thresh = track_high_thresh + self.new_track_thresh = new_track_thresh + + self.buffer_size = int(frame_rate / 30.0 * track_buffer) + self.max_time_lost = self.buffer_size + self.kalman_filter = KalmanFilter() + + # ReID module + self.proximity_thresh = proximity_thresh + self.appearance_thresh = appearance_thresh + self.match_thresh = match_thresh + + self.model = ReIDDetectMultiBackend(weights=model_weights, device=device, fp16=fp16) + + self.gmc = GMC(method=cmc_method, verbose=[None,False]) + + def update(self, output_results, img): + self.frame_id += 1 + activated_starcks = [] + refind_stracks = [] + lost_stracks = [] + removed_stracks = [] + + xyxys = output_results[:, 0:4] + xywh = xyxy2xywh(xyxys.numpy()) + confs = output_results[:, 4] + clss = output_results[:, 5] + + classes = clss.numpy() + xyxys = xyxys.numpy() + confs = confs.numpy() + + remain_inds = confs > self.track_high_thresh + inds_low = confs > 0.1 + inds_high = confs < self.track_high_thresh + + inds_second = np.logical_and(inds_low, inds_high) + + dets_second = xywh[inds_second] + dets = xywh[remain_inds] + + scores_keep = confs[remain_inds] + scores_second = confs[inds_second] + + classes_keep = classes[remain_inds] + clss_second = classes[inds_second] + + self.height, self.width = img.shape[:2] + + '''Extract embeddings ''' + features_keep = self._get_features(dets, img) + + if len(dets) > 0: + '''Detections''' + + detections = [STrack(xyxy, s, c, f.cpu().numpy()) for + (xyxy, s, c, f) in zip(dets, scores_keep, classes_keep, features_keep)] + else: + detections = [] + + ''' Add newly detected tracklets to tracked_stracks''' + unconfirmed = [] + tracked_stracks = [] # type: list[STrack] + for track in self.tracked_stracks: + if not track.is_activated: + unconfirmed.append(track) + else: + tracked_stracks.append(track) + + ''' Step 2: First association, with high score detection boxes''' + strack_pool = joint_stracks(tracked_stracks, self.lost_stracks) + + # Predict the current location with KF + STrack.multi_predict(strack_pool) + + # Fix camera motion + warp = self.gmc.apply(img, dets) + STrack.multi_gmc(strack_pool, warp) + STrack.multi_gmc(unconfirmed, warp) + + # Associate with high score detection boxes + raw_emb_dists = matching.embedding_distance(strack_pool, detections) + dists = matching.fuse_motion(self.kalman_filter, raw_emb_dists, strack_pool, detections, only_position=False, lambda_=self.lambda_) + + # ious_dists = matching.iou_distance(strack_pool, detections) + # ious_dists_mask = (ious_dists > self.proximity_thresh) + + # ious_dists = matching.fuse_score(ious_dists, detections) + + # emb_dists = matching.embedding_distance(strack_pool, detections) / 2.0 + # raw_emb_dists = emb_dists.copy() + # emb_dists[emb_dists > self.appearance_thresh] = 1.0 + # emb_dists[ious_dists_mask] = 1.0 + # dists = np.minimum(ious_dists, emb_dists) + + # Popular ReID method (JDE / FairMOT) + # raw_emb_dists = matching.embedding_distance(strack_pool, detections) + # dists = matching.fuse_motion(self.kalman_filter, raw_emb_dists, strack_pool, detections) + # emb_dists = dists + + # IoU making ReID + # dists = matching.embedding_distance(strack_pool, detections) + # dists[ious_dists_mask] = 1.0 + + matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.match_thresh) + + for itracked, idet in matches: + track = strack_pool[itracked] + det = detections[idet] + if track.state == TrackState.Tracked: + track.update(detections[idet], self.frame_id) + activated_starcks.append(track) + else: + track.re_activate(det, self.frame_id, new_id=False) + refind_stracks.append(track) + + ''' Step 3: Second association, with low score detection boxes''' + # if len(scores): + # inds_high = scores < self.track_high_thresh + # inds_low = scores > self.track_low_thresh + # inds_second = np.logical_and(inds_low, inds_high) + # dets_second = bboxes[inds_second] + # scores_second = scores[inds_second] + # classes_second = classes[inds_second] + # else: + # dets_second = [] + # scores_second = [] + # classes_second = [] + + # association the untrack to the low score detections + if len(dets_second) > 0: + '''Detections''' + detections_second = [STrack(STrack.tlbr_to_tlwh(tlbr), s, c) for + (tlbr, s, c) in zip(dets_second, scores_second, clss_second)] + else: + detections_second = [] + + r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked] + dists = matching.iou_distance(r_tracked_stracks, detections_second) + matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5) + for itracked, idet in matches: + track = r_tracked_stracks[itracked] + det = detections_second[idet] + if track.state == TrackState.Tracked: + track.update(det, self.frame_id) + activated_starcks.append(track) + else: + track.re_activate(det, self.frame_id, new_id=False) + refind_stracks.append(track) + + for it in u_track: + track = r_tracked_stracks[it] + if not track.state == TrackState.Lost: + track.mark_lost() + lost_stracks.append(track) + + '''Deal with unconfirmed tracks, usually tracks with only one beginning frame''' + detections = [detections[i] for i in u_detection] + ious_dists = matching.iou_distance(unconfirmed, detections) + ious_dists_mask = (ious_dists > self.proximity_thresh) + + ious_dists = matching.fuse_score(ious_dists, detections) + + emb_dists = matching.embedding_distance(unconfirmed, detections) / 2.0 + raw_emb_dists = emb_dists.copy() + emb_dists[emb_dists > self.appearance_thresh] = 1.0 + emb_dists[ious_dists_mask] = 1.0 + dists = np.minimum(ious_dists, emb_dists) + + matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7) + for itracked, idet in matches: + unconfirmed[itracked].update(detections[idet], self.frame_id) + activated_starcks.append(unconfirmed[itracked]) + for it in u_unconfirmed: + track = unconfirmed[it] + track.mark_removed() + removed_stracks.append(track) + + """ Step 4: Init new stracks""" + for inew in u_detection: + track = detections[inew] + if track.score < self.new_track_thresh: + continue + + track.activate(self.kalman_filter, self.frame_id) + activated_starcks.append(track) + + """ Step 5: Update state""" + for track in self.lost_stracks: + if self.frame_id - track.end_frame > self.max_time_lost: + track.mark_removed() + removed_stracks.append(track) + + """ Merge """ + self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked] + self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks) + self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks) + self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks) + self.lost_stracks.extend(lost_stracks) + self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks) + self.removed_stracks.extend(removed_stracks) + self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks) + + # output_stracks = [track for track in self.tracked_stracks if track.is_activated] + output_stracks = [track for track in self.tracked_stracks if track.is_activated] + outputs = [] + for t in output_stracks: + output= [] + tlwh = t.tlwh + tid = t.track_id + tlwh = np.expand_dims(tlwh, axis=0) + xyxy = xywh2xyxy(tlwh) + xyxy = np.squeeze(xyxy, axis=0) + output.extend(xyxy) + output.append(tid) + output.append(t.cls) + output.append(t.score) + outputs.append(output) + + return outputs + + def _xywh_to_xyxy(self, bbox_xywh): + x, y, w, h = bbox_xywh + x1 = max(int(x - w / 2), 0) + x2 = min(int(x + w / 2), self.width - 1) + y1 = max(int(y - h / 2), 0) + y2 = min(int(y + h / 2), self.height - 1) + return x1, y1, x2, y2 + + def _get_features(self, bbox_xywh, ori_img): + im_crops = [] + for box in bbox_xywh: + x1, y1, x2, y2 = self._xywh_to_xyxy(box) + im = ori_img[y1:y2, x1:x2] + im_crops.append(im) + if im_crops: + features = self.model(im_crops) + else: + features = np.array([]) + return features + +def joint_stracks(tlista, tlistb): + exists = {} + res = [] + for t in tlista: + exists[t.track_id] = 1 + res.append(t) + for t in tlistb: + tid = t.track_id + if not exists.get(tid, 0): + exists[tid] = 1 + res.append(t) + return res + + +def sub_stracks(tlista, tlistb): + stracks = {} + for t in tlista: + stracks[t.track_id] = t + for t in tlistb: + tid = t.track_id + if stracks.get(tid, 0): + del stracks[tid] + return list(stracks.values()) + + +def remove_duplicate_stracks(stracksa, stracksb): + pdist = matching.iou_distance(stracksa, stracksb) + pairs = np.where(pdist < 0.15) + dupa, dupb = list(), list() + for p, q in zip(*pairs): + timep = stracksa[p].frame_id - stracksa[p].start_frame + timeq = stracksb[q].frame_id - stracksb[q].start_frame + if timep > timeq: + dupb.append(q) + else: + dupa.append(p) + resa = [t for i, t in enumerate(stracksa) if not i in dupa] + resb = [t for i, t in enumerate(stracksb) if not i in dupb] + return resa, resb diff --git a/feeder/trackers/botsort/configs/botsort.yaml b/feeder/trackers/botsort/configs/botsort.yaml new file mode 100644 index 0000000..e5afb91 --- /dev/null +++ b/feeder/trackers/botsort/configs/botsort.yaml @@ -0,0 +1,13 @@ +# Trial number: 232 +# HOTA, MOTA, IDF1: [45.31] +botsort: + appearance_thresh: 0.4818211117541298 + cmc_method: sparseOptFlow + conf_thres: 0.3501265956918775 + frame_rate: 30 + lambda_: 0.9896143462366406 + match_thresh: 0.22734550911325851 + new_track_thresh: 0.21144301345190655 + proximity_thresh: 0.5945380911899254 + track_buffer: 60 + track_high_thresh: 0.33824964456239337 diff --git a/feeder/trackers/botsort/gmc.py b/feeder/trackers/botsort/gmc.py new file mode 100644 index 0000000..e7ec207 --- /dev/null +++ b/feeder/trackers/botsort/gmc.py @@ -0,0 +1,316 @@ +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import copy +import time + + +class GMC: + def __init__(self, method='sparseOptFlow', downscale=2, verbose=None): + super(GMC, self).__init__() + + self.method = method + self.downscale = max(1, int(downscale)) + + if self.method == 'orb': + self.detector = cv2.FastFeatureDetector_create(20) + self.extractor = cv2.ORB_create() + self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING) + + elif self.method == 'sift': + self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) + self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) + self.matcher = cv2.BFMatcher(cv2.NORM_L2) + + elif self.method == 'ecc': + number_of_iterations = 5000 + termination_eps = 1e-6 + self.warp_mode = cv2.MOTION_EUCLIDEAN + self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) + + elif self.method == 'sparseOptFlow': + self.feature_params = dict(maxCorners=1000, qualityLevel=0.01, minDistance=1, blockSize=3, + useHarrisDetector=False, k=0.04) + # self.gmc_file = open('GMC_results.txt', 'w') + + elif self.method == 'file' or self.method == 'files': + seqName = verbose[0] + ablation = verbose[1] + if ablation: + filePath = r'tracker/GMC_files/MOT17_ablation' + else: + filePath = r'tracker/GMC_files/MOTChallenge' + + if '-FRCNN' in seqName: + seqName = seqName[:-6] + elif '-DPM' in seqName: + seqName = seqName[:-4] + elif '-SDP' in seqName: + seqName = seqName[:-4] + + self.gmcFile = open(filePath + "/GMC-" + seqName + ".txt", 'r') + + if self.gmcFile is None: + raise ValueError("Error: Unable to open GMC file in directory:" + filePath) + elif self.method == 'none' or self.method == 'None': + self.method = 'none' + else: + raise ValueError("Error: Unknown CMC method:" + method) + + self.prevFrame = None + self.prevKeyPoints = None + self.prevDescriptors = None + + self.initializedFirstFrame = False + + def apply(self, raw_frame, detections=None): + if self.method == 'orb' or self.method == 'sift': + return self.applyFeaures(raw_frame, detections) + elif self.method == 'ecc': + return self.applyEcc(raw_frame, detections) + elif self.method == 'sparseOptFlow': + return self.applySparseOptFlow(raw_frame, detections) + elif self.method == 'file': + return self.applyFile(raw_frame, detections) + elif self.method == 'none': + return np.eye(2, 3) + else: + return np.eye(2, 3) + + def applyEcc(self, raw_frame, detections=None): + + # Initialize + height, width, _ = raw_frame.shape + frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) + H = np.eye(2, 3, dtype=np.float32) + + # Downscale image (TODO: consider using pyramids) + if self.downscale > 1.0: + frame = cv2.GaussianBlur(frame, (3, 3), 1.5) + frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) + width = width // self.downscale + height = height // self.downscale + + # Handle first frame + if not self.initializedFirstFrame: + # Initialize data + self.prevFrame = frame.copy() + + # Initialization done + self.initializedFirstFrame = True + + return H + + # Run the ECC algorithm. The results are stored in warp_matrix. + # (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria) + try: + (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1) + except: + print('Warning: find transform failed. Set warp as identity') + + return H + + def applyFeaures(self, raw_frame, detections=None): + + # Initialize + height, width, _ = raw_frame.shape + frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) + H = np.eye(2, 3) + + # Downscale image (TODO: consider using pyramids) + if self.downscale > 1.0: + # frame = cv2.GaussianBlur(frame, (3, 3), 1.5) + frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) + width = width // self.downscale + height = height // self.downscale + + # find the keypoints + mask = np.zeros_like(frame) + # mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255 + mask[int(0.02 * height): int(0.98 * height), int(0.02 * width): int(0.98 * width)] = 255 + if detections is not None: + for det in detections: + tlbr = (det[:4] / self.downscale).astype(np.int_) + mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0 + + keypoints = self.detector.detect(frame, mask) + + # compute the descriptors + keypoints, descriptors = self.extractor.compute(frame, keypoints) + + # Handle first frame + if not self.initializedFirstFrame: + # Initialize data + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + self.prevDescriptors = copy.copy(descriptors) + + # Initialization done + self.initializedFirstFrame = True + + return H + + # Match descriptors. + knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2) + + # Filtered matches based on smallest spatial distance + matches = [] + spatialDistances = [] + + maxSpatialDistance = 0.25 * np.array([width, height]) + + # Handle empty matches case + if len(knnMatches) == 0: + # Store to next iteration + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + self.prevDescriptors = copy.copy(descriptors) + + return H + + for m, n in knnMatches: + if m.distance < 0.9 * n.distance: + prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt + currKeyPointLocation = keypoints[m.trainIdx].pt + + spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0], + prevKeyPointLocation[1] - currKeyPointLocation[1]) + + if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \ + (np.abs(spatialDistance[1]) < maxSpatialDistance[1]): + spatialDistances.append(spatialDistance) + matches.append(m) + + meanSpatialDistances = np.mean(spatialDistances, 0) + stdSpatialDistances = np.std(spatialDistances, 0) + + inliesrs = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances + + goodMatches = [] + prevPoints = [] + currPoints = [] + for i in range(len(matches)): + if inliesrs[i, 0] and inliesrs[i, 1]: + goodMatches.append(matches[i]) + prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt) + currPoints.append(keypoints[matches[i].trainIdx].pt) + + prevPoints = np.array(prevPoints) + currPoints = np.array(currPoints) + + # Draw the keypoint matches on the output image + if 0: + matches_img = np.hstack((self.prevFrame, frame)) + matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR) + W = np.size(self.prevFrame, 1) + for m in goodMatches: + prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_) + curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_) + curr_pt[0] += W + color = np.random.randint(0, 255, (3,)) + color = (int(color[0]), int(color[1]), int(color[2])) + + matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA) + matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1) + matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1) + + plt.figure() + plt.imshow(matches_img) + plt.show() + + # Find rigid matrix + if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)): + H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) + + # Handle downscale + if self.downscale > 1.0: + H[0, 2] *= self.downscale + H[1, 2] *= self.downscale + else: + print('Warning: not enough matching points') + + # Store to next iteration + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + self.prevDescriptors = copy.copy(descriptors) + + return H + + def applySparseOptFlow(self, raw_frame, detections=None): + + t0 = time.time() + + # Initialize + height, width, _ = raw_frame.shape + frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) + H = np.eye(2, 3) + + # Downscale image + if self.downscale > 1.0: + # frame = cv2.GaussianBlur(frame, (3, 3), 1.5) + frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) + + # find the keypoints + keypoints = cv2.goodFeaturesToTrack(frame, mask=None, **self.feature_params) + + # Handle first frame + if not self.initializedFirstFrame: + # Initialize data + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + + # Initialization done + self.initializedFirstFrame = True + + return H + + # find correspondences + matchedKeypoints, status, err = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None) + + # leave good correspondences only + prevPoints = [] + currPoints = [] + + for i in range(len(status)): + if status[i]: + prevPoints.append(self.prevKeyPoints[i]) + currPoints.append(matchedKeypoints[i]) + + prevPoints = np.array(prevPoints) + currPoints = np.array(currPoints) + + # Find rigid matrix + if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)): + H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) + + # Handle downscale + if self.downscale > 1.0: + H[0, 2] *= self.downscale + H[1, 2] *= self.downscale + else: + print('Warning: not enough matching points') + + # Store to next iteration + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + + t1 = time.time() + + # gmc_line = str(1000 * (t1 - t0)) + "\t" + str(H[0, 0]) + "\t" + str(H[0, 1]) + "\t" + str( + # H[0, 2]) + "\t" + str(H[1, 0]) + "\t" + str(H[1, 1]) + "\t" + str(H[1, 2]) + "\n" + # self.gmc_file.write(gmc_line) + + return H + + def applyFile(self, raw_frame, detections=None): + line = self.gmcFile.readline() + tokens = line.split("\t") + H = np.eye(2, 3, dtype=np.float_) + H[0, 0] = float(tokens[1]) + H[0, 1] = float(tokens[2]) + H[0, 2] = float(tokens[3]) + H[1, 0] = float(tokens[4]) + H[1, 1] = float(tokens[5]) + H[1, 2] = float(tokens[6]) + + return H \ No newline at end of file diff --git a/feeder/trackers/botsort/kalman_filter.py b/feeder/trackers/botsort/kalman_filter.py new file mode 100644 index 0000000..02a6eb4 --- /dev/null +++ b/feeder/trackers/botsort/kalman_filter.py @@ -0,0 +1,269 @@ +# vim: expandtab:ts=4:sw=4 +import numpy as np +import scipy.linalg + + +""" +Table for the 0.95 quantile of the chi-square distribution with N degrees of +freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv +function and used as Mahalanobis gating threshold. +""" +chi2inv95 = { + 1: 3.8415, + 2: 5.9915, + 3: 7.8147, + 4: 9.4877, + 5: 11.070, + 6: 12.592, + 7: 14.067, + 8: 15.507, + 9: 16.919} + + +class KalmanFilter(object): + """ + A simple Kalman filter for tracking bounding boxes in image space. + + The 8-dimensional state space + + x, y, w, h, vx, vy, vw, vh + + contains the bounding box center position (x, y), width w, height h, + and their respective velocities. + + Object motion follows a constant velocity model. The bounding box location + (x, y, w, h) is taken as direct observation of the state space (linear + observation model). + + """ + + def __init__(self): + ndim, dt = 4, 1. + + # Create Kalman filter model matrices. + self._motion_mat = np.eye(2 * ndim, 2 * ndim) + for i in range(ndim): + self._motion_mat[i, ndim + i] = dt + self._update_mat = np.eye(ndim, 2 * ndim) + + # Motion and observation uncertainty are chosen relative to the current + # state estimate. These weights control the amount of uncertainty in + # the model. This is a bit hacky. + self._std_weight_position = 1. / 20 + self._std_weight_velocity = 1. / 160 + + def initiate(self, measurement): + """Create track from unassociated measurement. + + Parameters + ---------- + measurement : ndarray + Bounding box coordinates (x, y, w, h) with center position (x, y), + width w, and height h. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector (8 dimensional) and covariance matrix (8x8 + dimensional) of the new track. Unobserved velocities are initialized + to 0 mean. + + """ + mean_pos = measurement + mean_vel = np.zeros_like(mean_pos) + mean = np.r_[mean_pos, mean_vel] + + std = [ + 2 * self._std_weight_position * measurement[2], + 2 * self._std_weight_position * measurement[3], + 2 * self._std_weight_position * measurement[2], + 2 * self._std_weight_position * measurement[3], + 10 * self._std_weight_velocity * measurement[2], + 10 * self._std_weight_velocity * measurement[3], + 10 * self._std_weight_velocity * measurement[2], + 10 * self._std_weight_velocity * measurement[3]] + covariance = np.diag(np.square(std)) + return mean, covariance + + def predict(self, mean, covariance): + """Run Kalman filter prediction step. + + Parameters + ---------- + mean : ndarray + The 8 dimensional mean vector of the object state at the previous + time step. + covariance : ndarray + The 8x8 dimensional covariance matrix of the object state at the + previous time step. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + + """ + std_pos = [ + self._std_weight_position * mean[2], + self._std_weight_position * mean[3], + self._std_weight_position * mean[2], + self._std_weight_position * mean[3]] + std_vel = [ + self._std_weight_velocity * mean[2], + self._std_weight_velocity * mean[3], + self._std_weight_velocity * mean[2], + self._std_weight_velocity * mean[3]] + motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) + + mean = np.dot(mean, self._motion_mat.T) + covariance = np.linalg.multi_dot(( + self._motion_mat, covariance, self._motion_mat.T)) + motion_cov + + return mean, covariance + + def project(self, mean, covariance): + """Project state distribution to measurement space. + + Parameters + ---------- + mean : ndarray + The state's mean vector (8 dimensional array). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + + Returns + ------- + (ndarray, ndarray) + Returns the projected mean and covariance matrix of the given state + estimate. + + """ + std = [ + self._std_weight_position * mean[2], + self._std_weight_position * mean[3], + self._std_weight_position * mean[2], + self._std_weight_position * mean[3]] + innovation_cov = np.diag(np.square(std)) + + mean = np.dot(self._update_mat, mean) + covariance = np.linalg.multi_dot(( + self._update_mat, covariance, self._update_mat.T)) + return mean, covariance + innovation_cov + + def multi_predict(self, mean, covariance): + """Run Kalman filter prediction step (Vectorized version). + Parameters + ---------- + mean : ndarray + The Nx8 dimensional mean matrix of the object states at the previous + time step. + covariance : ndarray + The Nx8x8 dimensional covariance matrics of the object states at the + previous time step. + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + """ + std_pos = [ + self._std_weight_position * mean[:, 2], + self._std_weight_position * mean[:, 3], + self._std_weight_position * mean[:, 2], + self._std_weight_position * mean[:, 3]] + std_vel = [ + self._std_weight_velocity * mean[:, 2], + self._std_weight_velocity * mean[:, 3], + self._std_weight_velocity * mean[:, 2], + self._std_weight_velocity * mean[:, 3]] + sqr = np.square(np.r_[std_pos, std_vel]).T + + motion_cov = [] + for i in range(len(mean)): + motion_cov.append(np.diag(sqr[i])) + motion_cov = np.asarray(motion_cov) + + mean = np.dot(mean, self._motion_mat.T) + left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) + covariance = np.dot(left, self._motion_mat.T) + motion_cov + + return mean, covariance + + def update(self, mean, covariance, measurement): + """Run Kalman filter correction step. + + Parameters + ---------- + mean : ndarray + The predicted state's mean vector (8 dimensional). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + measurement : ndarray + The 4 dimensional measurement vector (x, y, w, h), where (x, y) + is the center position, w the width, and h the height of the + bounding box. + + Returns + ------- + (ndarray, ndarray) + Returns the measurement-corrected state distribution. + + """ + projected_mean, projected_cov = self.project(mean, covariance) + + chol_factor, lower = scipy.linalg.cho_factor( + projected_cov, lower=True, check_finite=False) + kalman_gain = scipy.linalg.cho_solve( + (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, + check_finite=False).T + innovation = measurement - projected_mean + + new_mean = mean + np.dot(innovation, kalman_gain.T) + new_covariance = covariance - np.linalg.multi_dot(( + kalman_gain, projected_cov, kalman_gain.T)) + return new_mean, new_covariance + + def gating_distance(self, mean, covariance, measurements, + only_position=False, metric='maha'): + """Compute gating distance between state distribution and measurements. + A suitable distance threshold can be obtained from `chi2inv95`. If + `only_position` is False, the chi-square distribution has 4 degrees of + freedom, otherwise 2. + Parameters + ---------- + mean : ndarray + Mean vector over the state distribution (8 dimensional). + covariance : ndarray + Covariance of the state distribution (8x8 dimensional). + measurements : ndarray + An Nx4 dimensional matrix of N measurements, each in + format (x, y, a, h) where (x, y) is the bounding box center + position, a the aspect ratio, and h the height. + only_position : Optional[bool] + If True, distance computation is done with respect to the bounding + box center position only. + Returns + ------- + ndarray + Returns an array of length N, where the i-th element contains the + squared Mahalanobis distance between (mean, covariance) and + `measurements[i]`. + """ + mean, covariance = self.project(mean, covariance) + if only_position: + mean, covariance = mean[:2], covariance[:2, :2] + measurements = measurements[:, :2] + + d = measurements - mean + if metric == 'gaussian': + return np.sum(d * d, axis=1) + elif metric == 'maha': + cholesky_factor = np.linalg.cholesky(covariance) + z = scipy.linalg.solve_triangular( + cholesky_factor, d.T, lower=True, check_finite=False, + overwrite_b=True) + squared_maha = np.sum(z * z, axis=0) + return squared_maha + else: + raise ValueError('invalid distance metric') \ No newline at end of file diff --git a/feeder/trackers/botsort/matching.py b/feeder/trackers/botsort/matching.py new file mode 100644 index 0000000..756dd45 --- /dev/null +++ b/feeder/trackers/botsort/matching.py @@ -0,0 +1,234 @@ +import numpy as np +import scipy +import lap +from scipy.spatial.distance import cdist + +from trackers.botsort import kalman_filter + + +def merge_matches(m1, m2, shape): + O,P,Q = shape + m1 = np.asarray(m1) + m2 = np.asarray(m2) + + M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P)) + M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q)) + + mask = M1*M2 + match = mask.nonzero() + match = list(zip(match[0], match[1])) + unmatched_O = tuple(set(range(O)) - set([i for i, j in match])) + unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match])) + + return match, unmatched_O, unmatched_Q + + +def _indices_to_matches(cost_matrix, indices, thresh): + matched_cost = cost_matrix[tuple(zip(*indices))] + matched_mask = (matched_cost <= thresh) + + matches = indices[matched_mask] + unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0])) + unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1])) + + return matches, unmatched_a, unmatched_b + + +def linear_assignment(cost_matrix, thresh): + if cost_matrix.size == 0: + return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) + matches, unmatched_a, unmatched_b = [], [], [] + cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) + for ix, mx in enumerate(x): + if mx >= 0: + matches.append([ix, mx]) + unmatched_a = np.where(x < 0)[0] + unmatched_b = np.where(y < 0)[0] + matches = np.asarray(matches) + return matches, unmatched_a, unmatched_b + + +def ious(atlbrs, btlbrs): + """ + Compute cost based on IoU + :type atlbrs: list[tlbr] | np.ndarray + :type atlbrs: list[tlbr] | np.ndarray + + :rtype ious np.ndarray + """ + ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32) + if ious.size == 0: + return ious + + ious = bbox_ious( + np.ascontiguousarray(atlbrs, dtype=np.float32), + np.ascontiguousarray(btlbrs, dtype=np.float32) + ) + + return ious + + +def tlbr_expand(tlbr, scale=1.2): + w = tlbr[2] - tlbr[0] + h = tlbr[3] - tlbr[1] + + half_scale = 0.5 * scale + + tlbr[0] -= half_scale * w + tlbr[1] -= half_scale * h + tlbr[2] += half_scale * w + tlbr[3] += half_scale * h + + return tlbr + + +def iou_distance(atracks, btracks): + """ + Compute cost based on IoU + :type atracks: list[STrack] + :type btracks: list[STrack] + + :rtype cost_matrix np.ndarray + """ + + if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): + atlbrs = atracks + btlbrs = btracks + else: + atlbrs = [track.tlbr for track in atracks] + btlbrs = [track.tlbr for track in btracks] + _ious = ious(atlbrs, btlbrs) + cost_matrix = 1 - _ious + + return cost_matrix + + +def v_iou_distance(atracks, btracks): + """ + Compute cost based on IoU + :type atracks: list[STrack] + :type btracks: list[STrack] + + :rtype cost_matrix np.ndarray + """ + + if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): + atlbrs = atracks + btlbrs = btracks + else: + atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks] + btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks] + _ious = ious(atlbrs, btlbrs) + cost_matrix = 1 - _ious + + return cost_matrix + + +def embedding_distance(tracks, detections, metric='cosine'): + """ + :param tracks: list[STrack] + :param detections: list[BaseTrack] + :param metric: + :return: cost_matrix np.ndarray + """ + + cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32) + if cost_matrix.size == 0: + return cost_matrix + det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32) + track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32) + + cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # / 2.0 # Nomalized features + return cost_matrix + + +def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False): + if cost_matrix.size == 0: + return cost_matrix + gating_dim = 2 if only_position else 4 + gating_threshold = kalman_filter.chi2inv95[gating_dim] + # measurements = np.asarray([det.to_xyah() for det in detections]) + measurements = np.asarray([det.to_xywh() for det in detections]) + for row, track in enumerate(tracks): + gating_distance = kf.gating_distance( + track.mean, track.covariance, measurements, only_position) + cost_matrix[row, gating_distance > gating_threshold] = np.inf + return cost_matrix + + +def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98): + if cost_matrix.size == 0: + return cost_matrix + gating_dim = 2 if only_position else 4 + gating_threshold = kalman_filter.chi2inv95[gating_dim] + # measurements = np.asarray([det.to_xyah() for det in detections]) + measurements = np.asarray([det.to_xywh() for det in detections]) + for row, track in enumerate(tracks): + gating_distance = kf.gating_distance( + track.mean, track.covariance, measurements, only_position, metric='maha') + cost_matrix[row, gating_distance > gating_threshold] = np.inf + cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance + return cost_matrix + + +def fuse_iou(cost_matrix, tracks, detections): + if cost_matrix.size == 0: + return cost_matrix + reid_sim = 1 - cost_matrix + iou_dist = iou_distance(tracks, detections) + iou_sim = 1 - iou_dist + fuse_sim = reid_sim * (1 + iou_sim) / 2 + det_scores = np.array([det.score for det in detections]) + det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) + #fuse_sim = fuse_sim * (1 + det_scores) / 2 + fuse_cost = 1 - fuse_sim + return fuse_cost + + +def fuse_score(cost_matrix, detections): + if cost_matrix.size == 0: + return cost_matrix + iou_sim = 1 - cost_matrix + det_scores = np.array([det.score for det in detections]) + det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) + fuse_sim = iou_sim * det_scores + fuse_cost = 1 - fuse_sim + return fuse_cost + +def bbox_ious(boxes, query_boxes): + """ + Parameters + ---------- + boxes: (N, 4) ndarray of float + query_boxes: (K, 4) ndarray of float + Returns + ------- + overlaps: (N, K) ndarray of overlap between boxes and query_boxes + """ + N = boxes.shape[0] + K = query_boxes.shape[0] + overlaps = np.zeros((N, K), dtype=np.float32) + + for k in range(K): + box_area = ( + (query_boxes[k, 2] - query_boxes[k, 0] + 1) * + (query_boxes[k, 3] - query_boxes[k, 1] + 1) + ) + for n in range(N): + iw = ( + min(boxes[n, 2], query_boxes[k, 2]) - + max(boxes[n, 0], query_boxes[k, 0]) + 1 + ) + if iw > 0: + ih = ( + min(boxes[n, 3], query_boxes[k, 3]) - + max(boxes[n, 1], query_boxes[k, 1]) + 1 + ) + if ih > 0: + ua = float( + (boxes[n, 2] - boxes[n, 0] + 1) * + (boxes[n, 3] - boxes[n, 1] + 1) + + box_area - iw * ih + ) + overlaps[n, k] = iw * ih / ua + return overlaps \ No newline at end of file diff --git a/feeder/trackers/bytetrack/basetrack.py b/feeder/trackers/bytetrack/basetrack.py new file mode 100644 index 0000000..4fe2233 --- /dev/null +++ b/feeder/trackers/bytetrack/basetrack.py @@ -0,0 +1,52 @@ +import numpy as np +from collections import OrderedDict + + +class TrackState(object): + New = 0 + Tracked = 1 + Lost = 2 + Removed = 3 + + +class BaseTrack(object): + _count = 0 + + track_id = 0 + is_activated = False + state = TrackState.New + + history = OrderedDict() + features = [] + curr_feature = None + score = 0 + start_frame = 0 + frame_id = 0 + time_since_update = 0 + + # multi-camera + location = (np.inf, np.inf) + + @property + def end_frame(self): + return self.frame_id + + @staticmethod + def next_id(): + BaseTrack._count += 1 + return BaseTrack._count + + def activate(self, *args): + raise NotImplementedError + + def predict(self): + raise NotImplementedError + + def update(self, *args, **kwargs): + raise NotImplementedError + + def mark_lost(self): + self.state = TrackState.Lost + + def mark_removed(self): + self.state = TrackState.Removed diff --git a/feeder/trackers/bytetrack/byte_tracker.py b/feeder/trackers/bytetrack/byte_tracker.py new file mode 100644 index 0000000..e74afe4 --- /dev/null +++ b/feeder/trackers/bytetrack/byte_tracker.py @@ -0,0 +1,348 @@ +import numpy as np + +from ultralytics.yolo.utils.ops import xywh2xyxy, xyxy2xywh + + +from trackers.bytetrack.kalman_filter import KalmanFilter +from trackers.bytetrack import matching +from trackers.bytetrack.basetrack import BaseTrack, TrackState + +class STrack(BaseTrack): + shared_kalman = KalmanFilter() + def __init__(self, tlwh, score, cls): + + # wait activate + self._tlwh = np.asarray(tlwh, dtype=np.float32) + self.kalman_filter = None + self.mean, self.covariance = None, None + self.is_activated = False + + self.score = score + self.tracklet_len = 0 + self.cls = cls + + def predict(self): + mean_state = self.mean.copy() + if self.state != TrackState.Tracked: + mean_state[7] = 0 + self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) + + @staticmethod + def multi_predict(stracks): + if len(stracks) > 0: + multi_mean = np.asarray([st.mean.copy() for st in stracks]) + multi_covariance = np.asarray([st.covariance for st in stracks]) + for i, st in enumerate(stracks): + if st.state != TrackState.Tracked: + multi_mean[i][7] = 0 + multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance) + for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): + stracks[i].mean = mean + stracks[i].covariance = cov + + def activate(self, kalman_filter, frame_id): + """Start a new tracklet""" + self.kalman_filter = kalman_filter + self.track_id = self.next_id() + self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh)) + + self.tracklet_len = 0 + self.state = TrackState.Tracked + if frame_id == 1: + self.is_activated = True + # self.is_activated = True + self.frame_id = frame_id + self.start_frame = frame_id + + def re_activate(self, new_track, frame_id, new_id=False): + self.mean, self.covariance = self.kalman_filter.update( + self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh) + ) + self.tracklet_len = 0 + self.state = TrackState.Tracked + self.is_activated = True + self.frame_id = frame_id + if new_id: + self.track_id = self.next_id() + self.score = new_track.score + self.cls = new_track.cls + + def update(self, new_track, frame_id): + """ + Update a matched track + :type new_track: STrack + :type frame_id: int + :type update_feature: bool + :return: + """ + self.frame_id = frame_id + self.tracklet_len += 1 + # self.cls = cls + + new_tlwh = new_track.tlwh + self.mean, self.covariance = self.kalman_filter.update( + self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh)) + self.state = TrackState.Tracked + self.is_activated = True + + self.score = new_track.score + + @property + # @jit(nopython=True) + def tlwh(self): + """Get current position in bounding box format `(top left x, top left y, + width, height)`. + """ + if self.mean is None: + return self._tlwh.copy() + ret = self.mean[:4].copy() + ret[2] *= ret[3] + ret[:2] -= ret[2:] / 2 + return ret + + @property + # @jit(nopython=True) + def tlbr(self): + """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., + `(top left, bottom right)`. + """ + ret = self.tlwh.copy() + ret[2:] += ret[:2] + return ret + + @staticmethod + # @jit(nopython=True) + def tlwh_to_xyah(tlwh): + """Convert bounding box to format `(center x, center y, aspect ratio, + height)`, where the aspect ratio is `width / height`. + """ + ret = np.asarray(tlwh).copy() + ret[:2] += ret[2:] / 2 + ret[2] /= ret[3] + return ret + + def to_xyah(self): + return self.tlwh_to_xyah(self.tlwh) + + @staticmethod + # @jit(nopython=True) + def tlbr_to_tlwh(tlbr): + ret = np.asarray(tlbr).copy() + ret[2:] -= ret[:2] + return ret + + @staticmethod + # @jit(nopython=True) + def tlwh_to_tlbr(tlwh): + ret = np.asarray(tlwh).copy() + ret[2:] += ret[:2] + return ret + + def __repr__(self): + return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame) + + +class BYTETracker(object): + def __init__(self, track_thresh=0.45, match_thresh=0.8, track_buffer=25, frame_rate=30): + self.tracked_stracks = [] # type: list[STrack] + self.lost_stracks = [] # type: list[STrack] + self.removed_stracks = [] # type: list[STrack] + + self.frame_id = 0 + self.track_buffer=track_buffer + + self.track_thresh = track_thresh + self.match_thresh = match_thresh + self.det_thresh = track_thresh + 0.1 + self.buffer_size = int(frame_rate / 30.0 * track_buffer) + self.max_time_lost = self.buffer_size + self.kalman_filter = KalmanFilter() + + def update(self, dets, _): + self.frame_id += 1 + activated_starcks = [] + refind_stracks = [] + lost_stracks = [] + removed_stracks = [] + + xyxys = dets[:, 0:4] + xywh = xyxy2xywh(xyxys.numpy()) + confs = dets[:, 4] + clss = dets[:, 5] + + classes = clss.numpy() + xyxys = xyxys.numpy() + confs = confs.numpy() + + remain_inds = confs > self.track_thresh + inds_low = confs > 0.1 + inds_high = confs < self.track_thresh + + inds_second = np.logical_and(inds_low, inds_high) + + dets_second = xywh[inds_second] + dets = xywh[remain_inds] + + scores_keep = confs[remain_inds] + scores_second = confs[inds_second] + + clss_keep = classes[remain_inds] + clss_second = classes[inds_second] + + + if len(dets) > 0: + '''Detections''' + detections = [STrack(xyxy, s, c) for + (xyxy, s, c) in zip(dets, scores_keep, clss_keep)] + else: + detections = [] + + ''' Add newly detected tracklets to tracked_stracks''' + unconfirmed = [] + tracked_stracks = [] # type: list[STrack] + for track in self.tracked_stracks: + if not track.is_activated: + unconfirmed.append(track) + else: + tracked_stracks.append(track) + + ''' Step 2: First association, with high score detection boxes''' + strack_pool = joint_stracks(tracked_stracks, self.lost_stracks) + # Predict the current location with KF + STrack.multi_predict(strack_pool) + dists = matching.iou_distance(strack_pool, detections) + #if not self.args.mot20: + dists = matching.fuse_score(dists, detections) + matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.match_thresh) + + for itracked, idet in matches: + track = strack_pool[itracked] + det = detections[idet] + if track.state == TrackState.Tracked: + track.update(detections[idet], self.frame_id) + activated_starcks.append(track) + else: + track.re_activate(det, self.frame_id, new_id=False) + refind_stracks.append(track) + + ''' Step 3: Second association, with low score detection boxes''' + # association the untrack to the low score detections + if len(dets_second) > 0: + '''Detections''' + detections_second = [STrack(xywh, s, c) for (xywh, s, c) in zip(dets_second, scores_second, clss_second)] + else: + detections_second = [] + r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked] + dists = matching.iou_distance(r_tracked_stracks, detections_second) + matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5) + for itracked, idet in matches: + track = r_tracked_stracks[itracked] + det = detections_second[idet] + if track.state == TrackState.Tracked: + track.update(det, self.frame_id) + activated_starcks.append(track) + else: + track.re_activate(det, self.frame_id, new_id=False) + refind_stracks.append(track) + + for it in u_track: + track = r_tracked_stracks[it] + if not track.state == TrackState.Lost: + track.mark_lost() + lost_stracks.append(track) + + '''Deal with unconfirmed tracks, usually tracks with only one beginning frame''' + detections = [detections[i] for i in u_detection] + dists = matching.iou_distance(unconfirmed, detections) + #if not self.args.mot20: + dists = matching.fuse_score(dists, detections) + matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7) + for itracked, idet in matches: + unconfirmed[itracked].update(detections[idet], self.frame_id) + activated_starcks.append(unconfirmed[itracked]) + for it in u_unconfirmed: + track = unconfirmed[it] + track.mark_removed() + removed_stracks.append(track) + + """ Step 4: Init new stracks""" + for inew in u_detection: + track = detections[inew] + if track.score < self.det_thresh: + continue + track.activate(self.kalman_filter, self.frame_id) + activated_starcks.append(track) + """ Step 5: Update state""" + for track in self.lost_stracks: + if self.frame_id - track.end_frame > self.max_time_lost: + track.mark_removed() + removed_stracks.append(track) + + # print('Ramained match {} s'.format(t4-t3)) + + self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked] + self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks) + self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks) + self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks) + self.lost_stracks.extend(lost_stracks) + self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks) + self.removed_stracks.extend(removed_stracks) + self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks) + # get scores of lost tracks + output_stracks = [track for track in self.tracked_stracks if track.is_activated] + outputs = [] + for t in output_stracks: + output= [] + tlwh = t.tlwh + tid = t.track_id + tlwh = np.expand_dims(tlwh, axis=0) + xyxy = xywh2xyxy(tlwh) + xyxy = np.squeeze(xyxy, axis=0) + output.extend(xyxy) + output.append(tid) + output.append(t.cls) + output.append(t.score) + outputs.append(output) + + return outputs +#track_id, class_id, conf + +def joint_stracks(tlista, tlistb): + exists = {} + res = [] + for t in tlista: + exists[t.track_id] = 1 + res.append(t) + for t in tlistb: + tid = t.track_id + if not exists.get(tid, 0): + exists[tid] = 1 + res.append(t) + return res + + +def sub_stracks(tlista, tlistb): + stracks = {} + for t in tlista: + stracks[t.track_id] = t + for t in tlistb: + tid = t.track_id + if stracks.get(tid, 0): + del stracks[tid] + return list(stracks.values()) + + +def remove_duplicate_stracks(stracksa, stracksb): + pdist = matching.iou_distance(stracksa, stracksb) + pairs = np.where(pdist < 0.15) + dupa, dupb = list(), list() + for p, q in zip(*pairs): + timep = stracksa[p].frame_id - stracksa[p].start_frame + timeq = stracksb[q].frame_id - stracksb[q].start_frame + if timep > timeq: + dupb.append(q) + else: + dupa.append(p) + resa = [t for i, t in enumerate(stracksa) if not i in dupa] + resb = [t for i, t in enumerate(stracksb) if not i in dupb] + return resa, resb diff --git a/feeder/trackers/bytetrack/configs/bytetrack.yaml b/feeder/trackers/bytetrack/configs/bytetrack.yaml new file mode 100644 index 0000000..e81dd78 --- /dev/null +++ b/feeder/trackers/bytetrack/configs/bytetrack.yaml @@ -0,0 +1,7 @@ +bytetrack: + track_thresh: 0.6 # tracking confidence threshold + track_buffer: 30 # the frames for keep lost tracks + match_thresh: 0.8 # matching threshold for tracking + frame_rate: 30 # FPS + conf_thres: 0.5122620708221085 + diff --git a/feeder/trackers/bytetrack/kalman_filter.py b/feeder/trackers/bytetrack/kalman_filter.py new file mode 100644 index 0000000..deda8a2 --- /dev/null +++ b/feeder/trackers/bytetrack/kalman_filter.py @@ -0,0 +1,270 @@ +# vim: expandtab:ts=4:sw=4 +import numpy as np +import scipy.linalg + + +""" +Table for the 0.95 quantile of the chi-square distribution with N degrees of +freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv +function and used as Mahalanobis gating threshold. +""" +chi2inv95 = { + 1: 3.8415, + 2: 5.9915, + 3: 7.8147, + 4: 9.4877, + 5: 11.070, + 6: 12.592, + 7: 14.067, + 8: 15.507, + 9: 16.919} + + +class KalmanFilter(object): + """ + A simple Kalman filter for tracking bounding boxes in image space. + + The 8-dimensional state space + + x, y, a, h, vx, vy, va, vh + + contains the bounding box center position (x, y), aspect ratio a, height h, + and their respective velocities. + + Object motion follows a constant velocity model. The bounding box location + (x, y, a, h) is taken as direct observation of the state space (linear + observation model). + + """ + + def __init__(self): + ndim, dt = 4, 1. + + # Create Kalman filter model matrices. + self._motion_mat = np.eye(2 * ndim, 2 * ndim) + for i in range(ndim): + self._motion_mat[i, ndim + i] = dt + self._update_mat = np.eye(ndim, 2 * ndim) + + # Motion and observation uncertainty are chosen relative to the current + # state estimate. These weights control the amount of uncertainty in + # the model. This is a bit hacky. + self._std_weight_position = 1. / 20 + self._std_weight_velocity = 1. / 160 + + def initiate(self, measurement): + """Create track from unassociated measurement. + + Parameters + ---------- + measurement : ndarray + Bounding box coordinates (x, y, a, h) with center position (x, y), + aspect ratio a, and height h. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector (8 dimensional) and covariance matrix (8x8 + dimensional) of the new track. Unobserved velocities are initialized + to 0 mean. + + """ + mean_pos = measurement + mean_vel = np.zeros_like(mean_pos) + mean = np.r_[mean_pos, mean_vel] + + std = [ + 2 * self._std_weight_position * measurement[3], + 2 * self._std_weight_position * measurement[3], + 1e-2, + 2 * self._std_weight_position * measurement[3], + 10 * self._std_weight_velocity * measurement[3], + 10 * self._std_weight_velocity * measurement[3], + 1e-5, + 10 * self._std_weight_velocity * measurement[3]] + covariance = np.diag(np.square(std)) + return mean, covariance + + def predict(self, mean, covariance): + """Run Kalman filter prediction step. + + Parameters + ---------- + mean : ndarray + The 8 dimensional mean vector of the object state at the previous + time step. + covariance : ndarray + The 8x8 dimensional covariance matrix of the object state at the + previous time step. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + + """ + std_pos = [ + self._std_weight_position * mean[3], + self._std_weight_position * mean[3], + 1e-2, + self._std_weight_position * mean[3]] + std_vel = [ + self._std_weight_velocity * mean[3], + self._std_weight_velocity * mean[3], + 1e-5, + self._std_weight_velocity * mean[3]] + motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) + + #mean = np.dot(self._motion_mat, mean) + mean = np.dot(mean, self._motion_mat.T) + covariance = np.linalg.multi_dot(( + self._motion_mat, covariance, self._motion_mat.T)) + motion_cov + + return mean, covariance + + def project(self, mean, covariance): + """Project state distribution to measurement space. + + Parameters + ---------- + mean : ndarray + The state's mean vector (8 dimensional array). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + + Returns + ------- + (ndarray, ndarray) + Returns the projected mean and covariance matrix of the given state + estimate. + + """ + std = [ + self._std_weight_position * mean[3], + self._std_weight_position * mean[3], + 1e-1, + self._std_weight_position * mean[3]] + innovation_cov = np.diag(np.square(std)) + + mean = np.dot(self._update_mat, mean) + covariance = np.linalg.multi_dot(( + self._update_mat, covariance, self._update_mat.T)) + return mean, covariance + innovation_cov + + def multi_predict(self, mean, covariance): + """Run Kalman filter prediction step (Vectorized version). + Parameters + ---------- + mean : ndarray + The Nx8 dimensional mean matrix of the object states at the previous + time step. + covariance : ndarray + The Nx8x8 dimensional covariance matrics of the object states at the + previous time step. + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + """ + std_pos = [ + self._std_weight_position * mean[:, 3], + self._std_weight_position * mean[:, 3], + 1e-2 * np.ones_like(mean[:, 3]), + self._std_weight_position * mean[:, 3]] + std_vel = [ + self._std_weight_velocity * mean[:, 3], + self._std_weight_velocity * mean[:, 3], + 1e-5 * np.ones_like(mean[:, 3]), + self._std_weight_velocity * mean[:, 3]] + sqr = np.square(np.r_[std_pos, std_vel]).T + + motion_cov = [] + for i in range(len(mean)): + motion_cov.append(np.diag(sqr[i])) + motion_cov = np.asarray(motion_cov) + + mean = np.dot(mean, self._motion_mat.T) + left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) + covariance = np.dot(left, self._motion_mat.T) + motion_cov + + return mean, covariance + + def update(self, mean, covariance, measurement): + """Run Kalman filter correction step. + + Parameters + ---------- + mean : ndarray + The predicted state's mean vector (8 dimensional). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + measurement : ndarray + The 4 dimensional measurement vector (x, y, a, h), where (x, y) + is the center position, a the aspect ratio, and h the height of the + bounding box. + + Returns + ------- + (ndarray, ndarray) + Returns the measurement-corrected state distribution. + + """ + projected_mean, projected_cov = self.project(mean, covariance) + + chol_factor, lower = scipy.linalg.cho_factor( + projected_cov, lower=True, check_finite=False) + kalman_gain = scipy.linalg.cho_solve( + (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, + check_finite=False).T + innovation = measurement - projected_mean + + new_mean = mean + np.dot(innovation, kalman_gain.T) + new_covariance = covariance - np.linalg.multi_dot(( + kalman_gain, projected_cov, kalman_gain.T)) + return new_mean, new_covariance + + def gating_distance(self, mean, covariance, measurements, + only_position=False, metric='maha'): + """Compute gating distance between state distribution and measurements. + A suitable distance threshold can be obtained from `chi2inv95`. If + `only_position` is False, the chi-square distribution has 4 degrees of + freedom, otherwise 2. + Parameters + ---------- + mean : ndarray + Mean vector over the state distribution (8 dimensional). + covariance : ndarray + Covariance of the state distribution (8x8 dimensional). + measurements : ndarray + An Nx4 dimensional matrix of N measurements, each in + format (x, y, a, h) where (x, y) is the bounding box center + position, a the aspect ratio, and h the height. + only_position : Optional[bool] + If True, distance computation is done with respect to the bounding + box center position only. + Returns + ------- + ndarray + Returns an array of length N, where the i-th element contains the + squared Mahalanobis distance between (mean, covariance) and + `measurements[i]`. + """ + mean, covariance = self.project(mean, covariance) + if only_position: + mean, covariance = mean[:2], covariance[:2, :2] + measurements = measurements[:, :2] + + d = measurements - mean + if metric == 'gaussian': + return np.sum(d * d, axis=1) + elif metric == 'maha': + cholesky_factor = np.linalg.cholesky(covariance) + z = scipy.linalg.solve_triangular( + cholesky_factor, d.T, lower=True, check_finite=False, + overwrite_b=True) + squared_maha = np.sum(z * z, axis=0) + return squared_maha + else: + raise ValueError('invalid distance metric') \ No newline at end of file diff --git a/feeder/trackers/bytetrack/matching.py b/feeder/trackers/bytetrack/matching.py new file mode 100644 index 0000000..17d7498 --- /dev/null +++ b/feeder/trackers/bytetrack/matching.py @@ -0,0 +1,219 @@ +import cv2 +import numpy as np +import scipy +import lap +from scipy.spatial.distance import cdist + +from trackers.bytetrack import kalman_filter +import time + +def merge_matches(m1, m2, shape): + O,P,Q = shape + m1 = np.asarray(m1) + m2 = np.asarray(m2) + + M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P)) + M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q)) + + mask = M1*M2 + match = mask.nonzero() + match = list(zip(match[0], match[1])) + unmatched_O = tuple(set(range(O)) - set([i for i, j in match])) + unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match])) + + return match, unmatched_O, unmatched_Q + + +def _indices_to_matches(cost_matrix, indices, thresh): + matched_cost = cost_matrix[tuple(zip(*indices))] + matched_mask = (matched_cost <= thresh) + + matches = indices[matched_mask] + unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0])) + unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1])) + + return matches, unmatched_a, unmatched_b + + +def linear_assignment(cost_matrix, thresh): + if cost_matrix.size == 0: + return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) + matches, unmatched_a, unmatched_b = [], [], [] + cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) + for ix, mx in enumerate(x): + if mx >= 0: + matches.append([ix, mx]) + unmatched_a = np.where(x < 0)[0] + unmatched_b = np.where(y < 0)[0] + matches = np.asarray(matches) + return matches, unmatched_a, unmatched_b + + +def ious(atlbrs, btlbrs): + """ + Compute cost based on IoU + :type atlbrs: list[tlbr] | np.ndarray + :type atlbrs: list[tlbr] | np.ndarray + + :rtype ious np.ndarray + """ + ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32) + if ious.size == 0: + return ious + + ious = bbox_ious( + np.ascontiguousarray(atlbrs, dtype=np.float32), + np.ascontiguousarray(btlbrs, dtype=np.float32) + ) + + return ious + + +def iou_distance(atracks, btracks): + """ + Compute cost based on IoU + :type atracks: list[STrack] + :type btracks: list[STrack] + + :rtype cost_matrix np.ndarray + """ + + if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): + atlbrs = atracks + btlbrs = btracks + else: + atlbrs = [track.tlbr for track in atracks] + btlbrs = [track.tlbr for track in btracks] + _ious = ious(atlbrs, btlbrs) + cost_matrix = 1 - _ious + + return cost_matrix + +def v_iou_distance(atracks, btracks): + """ + Compute cost based on IoU + :type atracks: list[STrack] + :type btracks: list[STrack] + + :rtype cost_matrix np.ndarray + """ + + if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): + atlbrs = atracks + btlbrs = btracks + else: + atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks] + btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks] + _ious = ious(atlbrs, btlbrs) + cost_matrix = 1 - _ious + + return cost_matrix + +def embedding_distance(tracks, detections, metric='cosine'): + """ + :param tracks: list[STrack] + :param detections: list[BaseTrack] + :param metric: + :return: cost_matrix np.ndarray + """ + + cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32) + if cost_matrix.size == 0: + return cost_matrix + det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32) + #for i, track in enumerate(tracks): + #cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric)) + track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32) + cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Nomalized features + return cost_matrix + + +def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False): + if cost_matrix.size == 0: + return cost_matrix + gating_dim = 2 if only_position else 4 + gating_threshold = kalman_filter.chi2inv95[gating_dim] + measurements = np.asarray([det.to_xyah() for det in detections]) + for row, track in enumerate(tracks): + gating_distance = kf.gating_distance( + track.mean, track.covariance, measurements, only_position) + cost_matrix[row, gating_distance > gating_threshold] = np.inf + return cost_matrix + + +def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98): + if cost_matrix.size == 0: + return cost_matrix + gating_dim = 2 if only_position else 4 + gating_threshold = kalman_filter.chi2inv95[gating_dim] + measurements = np.asarray([det.to_xyah() for det in detections]) + for row, track in enumerate(tracks): + gating_distance = kf.gating_distance( + track.mean, track.covariance, measurements, only_position, metric='maha') + cost_matrix[row, gating_distance > gating_threshold] = np.inf + cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance + return cost_matrix + + +def fuse_iou(cost_matrix, tracks, detections): + if cost_matrix.size == 0: + return cost_matrix + reid_sim = 1 - cost_matrix + iou_dist = iou_distance(tracks, detections) + iou_sim = 1 - iou_dist + fuse_sim = reid_sim * (1 + iou_sim) / 2 + det_scores = np.array([det.score for det in detections]) + det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) + #fuse_sim = fuse_sim * (1 + det_scores) / 2 + fuse_cost = 1 - fuse_sim + return fuse_cost + + +def fuse_score(cost_matrix, detections): + if cost_matrix.size == 0: + return cost_matrix + iou_sim = 1 - cost_matrix + det_scores = np.array([det.score for det in detections]) + det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) + fuse_sim = iou_sim * det_scores + fuse_cost = 1 - fuse_sim + return fuse_cost + + +def bbox_ious(boxes, query_boxes): + """ + Parameters + ---------- + boxes: (N, 4) ndarray of float + query_boxes: (K, 4) ndarray of float + Returns + ------- + overlaps: (N, K) ndarray of overlap between boxes and query_boxes + """ + N = boxes.shape[0] + K = query_boxes.shape[0] + overlaps = np.zeros((N, K), dtype=np.float32) + + for k in range(K): + box_area = ( + (query_boxes[k, 2] - query_boxes[k, 0] + 1) * + (query_boxes[k, 3] - query_boxes[k, 1] + 1) + ) + for n in range(N): + iw = ( + min(boxes[n, 2], query_boxes[k, 2]) - + max(boxes[n, 0], query_boxes[k, 0]) + 1 + ) + if iw > 0: + ih = ( + min(boxes[n, 3], query_boxes[k, 3]) - + max(boxes[n, 1], query_boxes[k, 1]) + 1 + ) + if ih > 0: + ua = float( + (boxes[n, 2] - boxes[n, 0] + 1) * + (boxes[n, 3] - boxes[n, 1] + 1) + + box_area - iw * ih + ) + overlaps[n, k] = iw * ih / ua + return overlaps \ No newline at end of file diff --git a/feeder/trackers/deepocsort/__init__.py b/feeder/trackers/deepocsort/__init__.py new file mode 100644 index 0000000..0c53de6 --- /dev/null +++ b/feeder/trackers/deepocsort/__init__.py @@ -0,0 +1,2 @@ +from . import args +from . import ocsort diff --git a/feeder/trackers/deepocsort/args.py b/feeder/trackers/deepocsort/args.py new file mode 100644 index 0000000..cfd34cc --- /dev/null +++ b/feeder/trackers/deepocsort/args.py @@ -0,0 +1,110 @@ +import argparse + + +def make_parser(): + parser = argparse.ArgumentParser("OC-SORT parameters") + + # distributed + parser.add_argument("-b", "--batch-size", type=int, default=1, help="batch size") + parser.add_argument("-d", "--devices", default=None, type=int, help="device for training") + + parser.add_argument("--local_rank", default=0, type=int, help="local rank for dist training") + parser.add_argument("--num_machines", default=1, type=int, help="num of node for training") + parser.add_argument("--machine_rank", default=0, type=int, help="node rank for multi-node training") + + parser.add_argument( + "-f", + "--exp_file", + default=None, + type=str, + help="pls input your expriment description file", + ) + parser.add_argument( + "--test", + dest="test", + default=False, + action="store_true", + help="Evaluating on test-dev set.", + ) + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + + # det args + parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval") + parser.add_argument("--conf", default=0.1, type=float, help="test conf") + parser.add_argument("--nms", default=0.7, type=float, help="test nms threshold") + parser.add_argument("--tsize", default=[800, 1440], nargs="+", type=int, help="test img size") + parser.add_argument("--seed", default=None, type=int, help="eval seed") + + # tracking args + parser.add_argument("--track_thresh", type=float, default=0.6, help="detection confidence threshold") + parser.add_argument( + "--iou_thresh", + type=float, + default=0.3, + help="the iou threshold in Sort for matching", + ) + parser.add_argument("--min_hits", type=int, default=3, help="min hits to create track in SORT") + parser.add_argument( + "--inertia", + type=float, + default=0.2, + help="the weight of VDC term in cost matrix", + ) + parser.add_argument( + "--deltat", + type=int, + default=3, + help="time step difference to estimate direction", + ) + parser.add_argument("--track_buffer", type=int, default=30, help="the frames for keep lost tracks") + parser.add_argument( + "--match_thresh", + type=float, + default=0.9, + help="matching threshold for tracking", + ) + parser.add_argument( + "--gt-type", + type=str, + default="_val_half", + help="suffix to find the gt annotation", + ) + parser.add_argument("--public", action="store_true", help="use public detection") + parser.add_argument("--asso", default="iou", help="similarity function: iou/giou/diou/ciou/ctdis") + + # for kitti/bdd100k inference with public detections + parser.add_argument( + "--raw_results_path", + type=str, + default="exps/permatrack_kitti_test/", + help="path to the raw tracking results from other tracks", + ) + parser.add_argument("--out_path", type=str, help="path to save output results") + parser.add_argument( + "--hp", + action="store_true", + help="use head padding to add the missing objects during \ + initializing the tracks (offline).", + ) + + # for demo video + parser.add_argument("--demo_type", default="image", help="demo type, eg. image, video and webcam") + parser.add_argument("--path", default="./videos/demo.mp4", help="path to images or video") + parser.add_argument("--camid", type=int, default=0, help="webcam demo camera id") + parser.add_argument( + "--save_result", + action="store_true", + help="whether to save the inference result of image/video", + ) + parser.add_argument( + "--device", + default="gpu", + type=str, + help="device to run our model, can either be cpu or gpu", + ) + return parser diff --git a/feeder/trackers/deepocsort/association.py b/feeder/trackers/deepocsort/association.py new file mode 100644 index 0000000..a84c296 --- /dev/null +++ b/feeder/trackers/deepocsort/association.py @@ -0,0 +1,445 @@ +import os +import pdb + +import numpy as np +from scipy.special import softmax + + +def iou_batch(bboxes1, bboxes2): + """ + From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2] + """ + bboxes2 = np.expand_dims(bboxes2, 0) + bboxes1 = np.expand_dims(bboxes1, 1) + + xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) + yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) + xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) + yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) + w = np.maximum(0.0, xx2 - xx1) + h = np.maximum(0.0, yy2 - yy1) + wh = w * h + o = wh / ( + (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) + - wh + ) + return o + + +def giou_batch(bboxes1, bboxes2): + """ + :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) + :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) + :return: + """ + # for details should go to https://arxiv.org/pdf/1902.09630.pdf + # ensure predict's bbox form + bboxes2 = np.expand_dims(bboxes2, 0) + bboxes1 = np.expand_dims(bboxes1, 1) + + xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) + yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) + xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) + yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) + w = np.maximum(0.0, xx2 - xx1) + h = np.maximum(0.0, yy2 - yy1) + wh = w * h + iou = wh / ( + (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) + - wh + ) + + xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) + yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) + xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) + yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) + wc = xxc2 - xxc1 + hc = yyc2 - yyc1 + assert (wc > 0).all() and (hc > 0).all() + area_enclose = wc * hc + giou = iou - (area_enclose - wh) / area_enclose + giou = (giou + 1.0) / 2.0 # resize from (-1,1) to (0,1) + return giou + + +def diou_batch(bboxes1, bboxes2): + """ + :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) + :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) + :return: + """ + # for details should go to https://arxiv.org/pdf/1902.09630.pdf + # ensure predict's bbox form + bboxes2 = np.expand_dims(bboxes2, 0) + bboxes1 = np.expand_dims(bboxes1, 1) + + # calculate the intersection box + xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) + yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) + xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) + yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) + w = np.maximum(0.0, xx2 - xx1) + h = np.maximum(0.0, yy2 - yy1) + wh = w * h + iou = wh / ( + (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) + - wh + ) + + centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 + centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 + centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 + centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 + + inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 + + xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) + yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) + xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) + yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) + + outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2 + diou = iou - inner_diag / outer_diag + + return (diou + 1) / 2.0 # resize from (-1,1) to (0,1) + + +def ciou_batch(bboxes1, bboxes2): + """ + :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) + :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) + :return: + """ + # for details should go to https://arxiv.org/pdf/1902.09630.pdf + # ensure predict's bbox form + bboxes2 = np.expand_dims(bboxes2, 0) + bboxes1 = np.expand_dims(bboxes1, 1) + + # calculate the intersection box + xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) + yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) + xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) + yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) + w = np.maximum(0.0, xx2 - xx1) + h = np.maximum(0.0, yy2 - yy1) + wh = w * h + iou = wh / ( + (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) + - wh + ) + + centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 + centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 + centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 + centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 + + inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 + + xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) + yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) + xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) + yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) + + outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2 + + w1 = bboxes1[..., 2] - bboxes1[..., 0] + h1 = bboxes1[..., 3] - bboxes1[..., 1] + w2 = bboxes2[..., 2] - bboxes2[..., 0] + h2 = bboxes2[..., 3] - bboxes2[..., 1] + + # prevent dividing over zero. add one pixel shift + h2 = h2 + 1.0 + h1 = h1 + 1.0 + arctan = np.arctan(w2 / h2) - np.arctan(w1 / h1) + v = (4 / (np.pi**2)) * (arctan**2) + S = 1 - iou + alpha = v / (S + v) + ciou = iou - inner_diag / outer_diag - alpha * v + + return (ciou + 1) / 2.0 # resize from (-1,1) to (0,1) + + +def ct_dist(bboxes1, bboxes2): + """ + Measure the center distance between two sets of bounding boxes, + this is a coarse implementation, we don't recommend using it only + for association, which can be unstable and sensitive to frame rate + and object speed. + """ + bboxes2 = np.expand_dims(bboxes2, 0) + bboxes1 = np.expand_dims(bboxes1, 1) + + centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 + centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 + centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 + centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 + + ct_dist2 = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 + + ct_dist = np.sqrt(ct_dist2) + + # The linear rescaling is a naive version and needs more study + ct_dist = ct_dist / ct_dist.max() + return ct_dist.max() - ct_dist # resize to (0,1) + + +def speed_direction_batch(dets, tracks): + tracks = tracks[..., np.newaxis] + CX1, CY1 = (dets[:, 0] + dets[:, 2]) / 2.0, (dets[:, 1] + dets[:, 3]) / 2.0 + CX2, CY2 = (tracks[:, 0] + tracks[:, 2]) / 2.0, (tracks[:, 1] + tracks[:, 3]) / 2.0 + dx = CX1 - CX2 + dy = CY1 - CY2 + norm = np.sqrt(dx**2 + dy**2) + 1e-6 + dx = dx / norm + dy = dy / norm + return dy, dx # size: num_track x num_det + + +def linear_assignment(cost_matrix): + try: + import lap + + _, x, y = lap.lapjv(cost_matrix, extend_cost=True) + return np.array([[y[i], i] for i in x if i >= 0]) # + except ImportError: + from scipy.optimize import linear_sum_assignment + + x, y = linear_sum_assignment(cost_matrix) + return np.array(list(zip(x, y))) + + +def associate_detections_to_trackers(detections, trackers, iou_threshold=0.3): + """ + Assigns detections to tracked object (both represented as bounding boxes) + Returns 3 lists of matches, unmatched_detections and unmatched_trackers + """ + if len(trackers) == 0: + return ( + np.empty((0, 2), dtype=int), + np.arange(len(detections)), + np.empty((0, 5), dtype=int), + ) + + iou_matrix = iou_batch(detections, trackers) + + if min(iou_matrix.shape) > 0: + a = (iou_matrix > iou_threshold).astype(np.int32) + if a.sum(1).max() == 1 and a.sum(0).max() == 1: + matched_indices = np.stack(np.where(a), axis=1) + else: + matched_indices = linear_assignment(-iou_matrix) + else: + matched_indices = np.empty(shape=(0, 2)) + + unmatched_detections = [] + for d, det in enumerate(detections): + if d not in matched_indices[:, 0]: + unmatched_detections.append(d) + unmatched_trackers = [] + for t, trk in enumerate(trackers): + if t not in matched_indices[:, 1]: + unmatched_trackers.append(t) + + # filter out matched with low IOU + matches = [] + for m in matched_indices: + if iou_matrix[m[0], m[1]] < iou_threshold: + unmatched_detections.append(m[0]) + unmatched_trackers.append(m[1]) + else: + matches.append(m.reshape(1, 2)) + if len(matches) == 0: + matches = np.empty((0, 2), dtype=int) + else: + matches = np.concatenate(matches, axis=0) + + return matches, np.array(unmatched_detections), np.array(unmatched_trackers) + + +def compute_aw_max_metric(emb_cost, w_association_emb, bottom=0.5): + w_emb = np.full_like(emb_cost, w_association_emb) + + for idx in range(emb_cost.shape[0]): + inds = np.argsort(-emb_cost[idx]) + # If there's less than two matches, just keep original weight + if len(inds) < 2: + continue + if emb_cost[idx, inds[0]] == 0: + row_weight = 0 + else: + row_weight = 1 - max((emb_cost[idx, inds[1]] / emb_cost[idx, inds[0]]) - bottom, 0) / (1 - bottom) + w_emb[idx] *= row_weight + + for idj in range(emb_cost.shape[1]): + inds = np.argsort(-emb_cost[:, idj]) + # If there's less than two matches, just keep original weight + if len(inds) < 2: + continue + if emb_cost[inds[0], idj] == 0: + col_weight = 0 + else: + col_weight = 1 - max((emb_cost[inds[1], idj] / emb_cost[inds[0], idj]) - bottom, 0) / (1 - bottom) + w_emb[:, idj] *= col_weight + + return w_emb * emb_cost + + +def associate( + detections, trackers, iou_threshold, velocities, previous_obs, vdc_weight, emb_cost, w_assoc_emb, aw_off, aw_param +): + if len(trackers) == 0: + return ( + np.empty((0, 2), dtype=int), + np.arange(len(detections)), + np.empty((0, 5), dtype=int), + ) + + Y, X = speed_direction_batch(detections, previous_obs) + inertia_Y, inertia_X = velocities[:, 0], velocities[:, 1] + inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1) + inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1) + diff_angle_cos = inertia_X * X + inertia_Y * Y + diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1) + diff_angle = np.arccos(diff_angle_cos) + diff_angle = (np.pi / 2.0 - np.abs(diff_angle)) / np.pi + + valid_mask = np.ones(previous_obs.shape[0]) + valid_mask[np.where(previous_obs[:, 4] < 0)] = 0 + + iou_matrix = iou_batch(detections, trackers) + scores = np.repeat(detections[:, -1][:, np.newaxis], trackers.shape[0], axis=1) + # iou_matrix = iou_matrix * scores # a trick sometiems works, we don't encourage this + valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1) + + angle_diff_cost = (valid_mask * diff_angle) * vdc_weight + angle_diff_cost = angle_diff_cost.T + angle_diff_cost = angle_diff_cost * scores + + if min(iou_matrix.shape) > 0: + a = (iou_matrix > iou_threshold).astype(np.int32) + if a.sum(1).max() == 1 and a.sum(0).max() == 1: + matched_indices = np.stack(np.where(a), axis=1) + else: + if emb_cost is None: + emb_cost = 0 + else: + emb_cost = emb_cost.cpu().numpy() + emb_cost[iou_matrix <= 0] = 0 + if not aw_off: + emb_cost = compute_aw_max_metric(emb_cost, w_assoc_emb, bottom=aw_param) + else: + emb_cost *= w_assoc_emb + + final_cost = -(iou_matrix + angle_diff_cost + emb_cost) + matched_indices = linear_assignment(final_cost) + else: + matched_indices = np.empty(shape=(0, 2)) + + unmatched_detections = [] + for d, det in enumerate(detections): + if d not in matched_indices[:, 0]: + unmatched_detections.append(d) + unmatched_trackers = [] + for t, trk in enumerate(trackers): + if t not in matched_indices[:, 1]: + unmatched_trackers.append(t) + + # filter out matched with low IOU + matches = [] + for m in matched_indices: + if iou_matrix[m[0], m[1]] < iou_threshold: + unmatched_detections.append(m[0]) + unmatched_trackers.append(m[1]) + else: + matches.append(m.reshape(1, 2)) + if len(matches) == 0: + matches = np.empty((0, 2), dtype=int) + else: + matches = np.concatenate(matches, axis=0) + + return matches, np.array(unmatched_detections), np.array(unmatched_trackers) + + +def associate_kitti(detections, trackers, det_cates, iou_threshold, velocities, previous_obs, vdc_weight): + if len(trackers) == 0: + return ( + np.empty((0, 2), dtype=int), + np.arange(len(detections)), + np.empty((0, 5), dtype=int), + ) + + """ + Cost from the velocity direction consistency + """ + Y, X = speed_direction_batch(detections, previous_obs) + inertia_Y, inertia_X = velocities[:, 0], velocities[:, 1] + inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1) + inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1) + diff_angle_cos = inertia_X * X + inertia_Y * Y + diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1) + diff_angle = np.arccos(diff_angle_cos) + diff_angle = (np.pi / 2.0 - np.abs(diff_angle)) / np.pi + + valid_mask = np.ones(previous_obs.shape[0]) + valid_mask[np.where(previous_obs[:, 4] < 0)] = 0 + valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1) + + scores = np.repeat(detections[:, -1][:, np.newaxis], trackers.shape[0], axis=1) + angle_diff_cost = (valid_mask * diff_angle) * vdc_weight + angle_diff_cost = angle_diff_cost.T + angle_diff_cost = angle_diff_cost * scores + + """ + Cost from IoU + """ + iou_matrix = iou_batch(detections, trackers) + + """ + With multiple categories, generate the cost for catgory mismatch + """ + num_dets = detections.shape[0] + num_trk = trackers.shape[0] + cate_matrix = np.zeros((num_dets, num_trk)) + for i in range(num_dets): + for j in range(num_trk): + if det_cates[i] != trackers[j, 4]: + cate_matrix[i][j] = -1e6 + + cost_matrix = -iou_matrix - angle_diff_cost - cate_matrix + + if min(iou_matrix.shape) > 0: + a = (iou_matrix > iou_threshold).astype(np.int32) + if a.sum(1).max() == 1 and a.sum(0).max() == 1: + matched_indices = np.stack(np.where(a), axis=1) + else: + matched_indices = linear_assignment(cost_matrix) + else: + matched_indices = np.empty(shape=(0, 2)) + + unmatched_detections = [] + for d, det in enumerate(detections): + if d not in matched_indices[:, 0]: + unmatched_detections.append(d) + unmatched_trackers = [] + for t, trk in enumerate(trackers): + if t not in matched_indices[:, 1]: + unmatched_trackers.append(t) + + # filter out matched with low IOU + matches = [] + for m in matched_indices: + if iou_matrix[m[0], m[1]] < iou_threshold: + unmatched_detections.append(m[0]) + unmatched_trackers.append(m[1]) + else: + matches.append(m.reshape(1, 2)) + if len(matches) == 0: + matches = np.empty((0, 2), dtype=int) + else: + matches = np.concatenate(matches, axis=0) + + return matches, np.array(unmatched_detections), np.array(unmatched_trackers) diff --git a/feeder/trackers/deepocsort/cmc.py b/feeder/trackers/deepocsort/cmc.py new file mode 100644 index 0000000..13d771f --- /dev/null +++ b/feeder/trackers/deepocsort/cmc.py @@ -0,0 +1,170 @@ +import pdb +import pickle +import os + +import cv2 +import numpy as np + + +class CMCComputer: + def __init__(self, minimum_features=10, method="sparse"): + assert method in ["file", "sparse", "sift"] + + os.makedirs("./cache", exist_ok=True) + self.cache_path = "./cache/affine_ocsort.pkl" + self.cache = {} + if os.path.exists(self.cache_path): + with open(self.cache_path, "rb") as fp: + self.cache = pickle.load(fp) + self.minimum_features = minimum_features + self.prev_img = None + self.prev_desc = None + self.sparse_flow_param = dict( + maxCorners=3000, + qualityLevel=0.01, + minDistance=1, + blockSize=3, + useHarrisDetector=False, + k=0.04, + ) + self.file_computed = {} + + self.comp_function = None + if method == "sparse": + self.comp_function = self._affine_sparse_flow + elif method == "sift": + self.comp_function = self._affine_sift + # Same BoT-SORT CMC arrays + elif method == "file": + self.comp_function = self._affine_file + self.file_affines = {} + # Maps from tag name to file name + self.file_names = {} + + # All the ablation file names + for f_name in os.listdir("./cache/cmc_files/MOT17_ablation/"): + # The tag that'll be passed into compute_affine based on image name + tag = f_name.replace("GMC-", "").replace(".txt", "") + "-FRCNN" + f_name = os.path.join("./cache/cmc_files/MOT17_ablation/", f_name) + self.file_names[tag] = f_name + for f_name in os.listdir("./cache/cmc_files/MOT20_ablation/"): + tag = f_name.replace("GMC-", "").replace(".txt", "") + f_name = os.path.join("./cache/cmc_files/MOT20_ablation/", f_name) + self.file_names[tag] = f_name + + # All the test file names + for f_name in os.listdir("./cache/cmc_files/MOTChallenge/"): + tag = f_name.replace("GMC-", "").replace(".txt", "") + if "MOT17" in tag: + tag = tag + "-FRCNN" + # If it's an ablation one (not test) don't overwrite it + if tag in self.file_names: + continue + f_name = os.path.join("./cache/cmc_files/MOTChallenge/", f_name) + self.file_names[tag] = f_name + + def compute_affine(self, img, bbox, tag): + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + if tag in self.cache: + A = self.cache[tag] + return A + mask = np.ones_like(img, dtype=np.uint8) + if bbox.shape[0] > 0: + bbox = np.round(bbox).astype(np.int32) + bbox[bbox < 0] = 0 + for bb in bbox: + mask[bb[1] : bb[3], bb[0] : bb[2]] = 0 + + A = self.comp_function(img, mask, tag) + self.cache[tag] = A + + return A + + def _load_file(self, name): + affines = [] + with open(self.file_names[name], "r") as fp: + for line in fp: + tokens = [float(f) for f in line.split("\t")[1:7]] + A = np.eye(2, 3) + A[0, 0] = tokens[0] + A[0, 1] = tokens[1] + A[0, 2] = tokens[2] + A[1, 0] = tokens[3] + A[1, 1] = tokens[4] + A[1, 2] = tokens[5] + affines.append(A) + self.file_affines[name] = affines + + def _affine_file(self, frame, mask, tag): + name, num = tag.split(":") + if name not in self.file_affines: + self._load_file(name) + if name not in self.file_affines: + raise RuntimeError("Error loading file affines for CMC.") + + return self.file_affines[name][int(num) - 1] + + def _affine_sift(self, frame, mask, tag): + A = np.eye(2, 3) + detector = cv2.SIFT_create() + kp, desc = detector.detectAndCompute(frame, mask) + if self.prev_desc is None: + self.prev_desc = [kp, desc] + return A + if desc.shape[0] < self.minimum_features or self.prev_desc[1].shape[0] < self.minimum_features: + return A + + bf = cv2.BFMatcher(cv2.NORM_L2) + matches = bf.knnMatch(self.prev_desc[1], desc, k=2) + good = [] + for m, n in matches: + if m.distance < 0.7 * n.distance: + good.append(m) + + if len(good) > self.minimum_features: + src_pts = np.float32([self.prev_desc[0][m.queryIdx].pt for m in good]).reshape(-1, 1, 2) + dst_pts = np.float32([kp[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) + A, _ = cv2.estimateAffinePartial2D(src_pts, dst_pts, method=cv2.RANSAC) + else: + print("Warning: not enough matching points") + if A is None: + A = np.eye(2, 3) + + self.prev_desc = [kp, desc] + return A + + def _affine_sparse_flow(self, frame, mask, tag): + # Initialize + A = np.eye(2, 3) + + # find the keypoints + keypoints = cv2.goodFeaturesToTrack(frame, mask=mask, **self.sparse_flow_param) + + # Handle first frame + if self.prev_img is None: + self.prev_img = frame + self.prev_desc = keypoints + return A + + matched_kp, status, err = cv2.calcOpticalFlowPyrLK(self.prev_img, frame, self.prev_desc, None) + matched_kp = matched_kp.reshape(-1, 2) + status = status.reshape(-1) + prev_points = self.prev_desc.reshape(-1, 2) + prev_points = prev_points[status] + curr_points = matched_kp[status] + + # Find rigid matrix + if prev_points.shape[0] > self.minimum_features: + A, _ = cv2.estimateAffinePartial2D(prev_points, curr_points, method=cv2.RANSAC) + else: + print("Warning: not enough matching points") + if A is None: + A = np.eye(2, 3) + + self.prev_img = frame + self.prev_desc = keypoints + return A + + def dump_cache(self): + with open(self.cache_path, "wb") as fp: + pickle.dump(self.cache, fp) diff --git a/feeder/trackers/deepocsort/configs/deepocsort.yaml b/feeder/trackers/deepocsort/configs/deepocsort.yaml new file mode 100644 index 0000000..dfa34fa --- /dev/null +++ b/feeder/trackers/deepocsort/configs/deepocsort.yaml @@ -0,0 +1,12 @@ +# Trial number: 137 +# HOTA, MOTA, IDF1: [55.567] +deepocsort: + asso_func: giou + conf_thres: 0.5122620708221085 + delta_t: 1 + det_thresh: 0 + inertia: 0.3941737016672115 + iou_thresh: 0.22136877277096445 + max_age: 50 + min_hits: 1 + use_byte: false diff --git a/feeder/trackers/deepocsort/embedding.py b/feeder/trackers/deepocsort/embedding.py new file mode 100644 index 0000000..bbef156 --- /dev/null +++ b/feeder/trackers/deepocsort/embedding.py @@ -0,0 +1,116 @@ +import pdb +from collections import OrderedDict +import os +import pickle + +import torch +import cv2 +import torchvision +import numpy as np + + + +class EmbeddingComputer: + def __init__(self, dataset): + self.model = None + self.dataset = dataset + self.crop_size = (128, 384) + os.makedirs("./cache/embeddings/", exist_ok=True) + self.cache_path = "./cache/embeddings/{}_embedding.pkl" + self.cache = {} + self.cache_name = "" + + def load_cache(self, path): + self.cache_name = path + cache_path = self.cache_path.format(path) + if os.path.exists(cache_path): + with open(cache_path, "rb") as fp: + self.cache = pickle.load(fp) + + def compute_embedding(self, img, bbox, tag, is_numpy=True): + if self.cache_name != tag.split(":")[0]: + self.load_cache(tag.split(":")[0]) + + if tag in self.cache: + embs = self.cache[tag] + if embs.shape[0] != bbox.shape[0]: + raise RuntimeError( + "ERROR: The number of cached embeddings don't match the " + "number of detections.\nWas the detector model changed? Delete cache if so." + ) + return embs + + if self.model is None: + self.initialize_model() + + # Make sure bbox is within image frame + if is_numpy: + h, w = img.shape[:2] + else: + h, w = img.shape[2:] + results = np.round(bbox).astype(np.int32) + results[:, 0] = results[:, 0].clip(0, w) + results[:, 1] = results[:, 1].clip(0, h) + results[:, 2] = results[:, 2].clip(0, w) + results[:, 3] = results[:, 3].clip(0, h) + + # Generate all the crops + crops = [] + for p in results: + if is_numpy: + crop = img[p[1] : p[3], p[0] : p[2]] + crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB) + crop = cv2.resize(crop, self.crop_size, interpolation=cv2.INTER_LINEAR) + crop = torch.as_tensor(crop.astype("float32").transpose(2, 0, 1)) + crop = crop.unsqueeze(0) + else: + crop = img[:, :, p[1] : p[3], p[0] : p[2]] + crop = torchvision.transforms.functional.resize(crop, self.crop_size) + + crops.append(crop) + + crops = torch.cat(crops, dim=0) + + # Create embeddings and l2 normalize them + with torch.no_grad(): + crops = crops.cuda() + crops = crops.half() + embs = self.model(crops) + embs = torch.nn.functional.normalize(embs) + embs = embs.cpu().numpy() + + self.cache[tag] = embs + return embs + + def initialize_model(self): + """ + model = torchreid.models.build_model(name="osnet_ain_x1_0", num_classes=2510, loss="softmax", pretrained=False) + sd = torch.load("external/weights/osnet_ain_ms_d_c.pth.tar")["state_dict"] + new_state_dict = OrderedDict() + for k, v in sd.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + # load params + model.load_state_dict(new_state_dict) + model.eval() + model.cuda() + """ + if self.dataset == "mot17": + path = "external/weights/mot17_sbs_S50.pth" + elif self.dataset == "mot20": + path = "external/weights/mot20_sbs_S50.pth" + elif self.dataset == "dance": + path = None + else: + raise RuntimeError("Need the path for a new ReID model.") + + model = FastReID(path) + model.eval() + model.cuda() + model.half() + self.model = model + + def dump_cache(self): + if self.cache_name: + with open(self.cache_path.format(self.cache_name), "wb") as fp: + pickle.dump(self.cache, fp) diff --git a/feeder/trackers/deepocsort/kalmanfilter.py b/feeder/trackers/deepocsort/kalmanfilter.py new file mode 100644 index 0000000..19e0427 --- /dev/null +++ b/feeder/trackers/deepocsort/kalmanfilter.py @@ -0,0 +1,1636 @@ +# -*- coding: utf-8 -*- +# pylint: disable=invalid-name, too-many-arguments, too-many-branches, +# pylint: disable=too-many-locals, too-many-instance-attributes, too-many-lines + +""" +This module implements the linear Kalman filter in both an object +oriented and procedural form. The KalmanFilter class implements +the filter by storing the various matrices in instance variables, +minimizing the amount of bookkeeping you have to do. +All Kalman filters operate with a predict->update cycle. The +predict step, implemented with the method or function predict(), +uses the state transition matrix F to predict the state in the next +time period (epoch). The state is stored as a gaussian (x, P), where +x is the state (column) vector, and P is its covariance. Covariance +matrix Q specifies the process covariance. In Bayesian terms, this +prediction is called the *prior*, which you can think of colloquially +as the estimate prior to incorporating the measurement. +The update step, implemented with the method or function `update()`, +incorporates the measurement z with covariance R, into the state +estimate (x, P). The class stores the system uncertainty in S, +the innovation (residual between prediction and measurement in +measurement space) in y, and the Kalman gain in k. The procedural +form returns these variables to you. In Bayesian terms this computes +the *posterior* - the estimate after the information from the +measurement is incorporated. +Whether you use the OO form or procedural form is up to you. If +matrices such as H, R, and F are changing each epoch, you'll probably +opt to use the procedural form. If they are unchanging, the OO +form is perhaps easier to use since you won't need to keep track +of these matrices. This is especially useful if you are implementing +banks of filters or comparing various KF designs for performance; +a trivial coding bug could lead to using the wrong sets of matrices. +This module also offers an implementation of the RTS smoother, and +other helper functions, such as log likelihood computations. +The Saver class allows you to easily save the state of the +KalmanFilter class after every update +This module expects NumPy arrays for all values that expect +arrays, although in a few cases, particularly method parameters, +it will accept types that convert to NumPy arrays, such as lists +of lists. These exceptions are documented in the method or function. +Examples +-------- +The following example constructs a constant velocity kinematic +filter, filters noisy data, and plots the results. It also demonstrates +using the Saver class to save the state of the filter at each epoch. +.. code-block:: Python + import matplotlib.pyplot as plt + import numpy as np + from filterpy.kalman import KalmanFilter + from filterpy.common import Q_discrete_white_noise, Saver + r_std, q_std = 2., 0.003 + cv = KalmanFilter(dim_x=2, dim_z=1) + cv.x = np.array([[0., 1.]]) # position, velocity + cv.F = np.array([[1, dt],[ [0, 1]]) + cv.R = np.array([[r_std^^2]]) + f.H = np.array([[1., 0.]]) + f.P = np.diag([.1^^2, .03^^2) + f.Q = Q_discrete_white_noise(2, dt, q_std**2) + saver = Saver(cv) + for z in range(100): + cv.predict() + cv.update([z + randn() * r_std]) + saver.save() # save the filter's state + saver.to_array() + plt.plot(saver.x[:, 0]) + # plot all of the priors + plt.plot(saver.x_prior[:, 0]) + # plot mahalanobis distance + plt.figure() + plt.plot(saver.mahalanobis) +This code implements the same filter using the procedural form + x = np.array([[0., 1.]]) # position, velocity + F = np.array([[1, dt],[ [0, 1]]) + R = np.array([[r_std^^2]]) + H = np.array([[1., 0.]]) + P = np.diag([.1^^2, .03^^2) + Q = Q_discrete_white_noise(2, dt, q_std**2) + for z in range(100): + x, P = predict(x, P, F=F, Q=Q) + x, P = update(x, P, z=[z + randn() * r_std], R=R, H=H) + xs.append(x[0, 0]) + plt.plot(xs) +For more examples see the test subdirectory, or refer to the +book cited below. In it I both teach Kalman filtering from basic +principles, and teach the use of this library in great detail. +FilterPy library. +http://github.com/rlabbe/filterpy +Documentation at: +https://filterpy.readthedocs.org +Supporting book at: +https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python +This is licensed under an MIT license. See the readme.MD file +for more information. +Copyright 2014-2018 Roger R Labbe Jr. +""" + +from __future__ import absolute_import, division + +import pdb +from copy import deepcopy +from math import log, exp, sqrt +import sys +import numpy as np +from numpy import dot, zeros, eye, isscalar, shape +import numpy.linalg as linalg +from filterpy.stats import logpdf +from filterpy.common import pretty_str, reshape_z + + +class KalmanFilterNew(object): + """Implements a Kalman filter. You are responsible for setting the + various state variables to reasonable values; the defaults will + not give you a functional filter. + For now the best documentation is my free book Kalman and Bayesian + Filters in Python [2]_. The test files in this directory also give you a + basic idea of use, albeit without much description. + In brief, you will first construct this object, specifying the size of + the state vector with dim_x and the size of the measurement vector that + you will be using with dim_z. These are mostly used to perform size checks + when you assign values to the various matrices. For example, if you + specified dim_z=2 and then try to assign a 3x3 matrix to R (the + measurement noise matrix you will get an assert exception because R + should be 2x2. (If for whatever reason you need to alter the size of + things midstream just use the underscore version of the matrices to + assign directly: your_filter._R = a_3x3_matrix.) + After construction the filter will have default matrices created for you, + but you must specify the values for each. It’s usually easiest to just + overwrite them rather than assign to each element yourself. This will be + clearer in the example below. All are of type numpy.array. + Examples + -------- + Here is a filter that tracks position and velocity using a sensor that only + reads position. + First construct the object with the required dimensionality. Here the state + (`dim_x`) has 2 coefficients (position and velocity), and the measurement + (`dim_z`) has one. In FilterPy `x` is the state, `z` is the measurement. + .. code:: + from filterpy.kalman import KalmanFilter + f = KalmanFilter (dim_x=2, dim_z=1) + Assign the initial value for the state (position and velocity). You can do this + with a two dimensional array like so: + .. code:: + f.x = np.array([[2.], # position + [0.]]) # velocity + or just use a one dimensional array, which I prefer doing. + .. code:: + f.x = np.array([2., 0.]) + Define the state transition matrix: + .. code:: + f.F = np.array([[1.,1.], + [0.,1.]]) + Define the measurement function. Here we need to convert a position-velocity + vector into just a position vector, so we use: + .. code:: + f.H = np.array([[1., 0.]]) + Define the state's covariance matrix P. + .. code:: + f.P = np.array([[1000., 0.], + [ 0., 1000.] ]) + Now assign the measurement noise. Here the dimension is 1x1, so I can + use a scalar + .. code:: + f.R = 5 + I could have done this instead: + .. code:: + f.R = np.array([[5.]]) + Note that this must be a 2 dimensional array. + Finally, I will assign the process noise. Here I will take advantage of + another FilterPy library function: + .. code:: + from filterpy.common import Q_discrete_white_noise + f.Q = Q_discrete_white_noise(dim=2, dt=0.1, var=0.13) + Now just perform the standard predict/update loop: + .. code:: + while some_condition_is_true: + z = get_sensor_reading() + f.predict() + f.update(z) + do_something_with_estimate (f.x) + **Procedural Form** + This module also contains stand alone functions to perform Kalman filtering. + Use these if you are not a fan of objects. + **Example** + .. code:: + while True: + z, R = read_sensor() + x, P = predict(x, P, F, Q) + x, P = update(x, P, z, R, H) + See my book Kalman and Bayesian Filters in Python [2]_. + You will have to set the following attributes after constructing this + object for the filter to perform properly. Please note that there are + various checks in place to ensure that you have made everything the + 'correct' size. However, it is possible to provide incorrectly sized + arrays such that the linear algebra can not perform an operation. + It can also fail silently - you can end up with matrices of a size that + allows the linear algebra to work, but are the wrong shape for the problem + you are trying to solve. + Parameters + ---------- + dim_x : int + Number of state variables for the Kalman filter. For example, if + you are tracking the position and velocity of an object in two + dimensions, dim_x would be 4. + This is used to set the default size of P, Q, and u + dim_z : int + Number of of measurement inputs. For example, if the sensor + provides you with position in (x,y), dim_z would be 2. + dim_u : int (optional) + size of the control input, if it is being used. + Default value of 0 indicates it is not used. + compute_log_likelihood : bool (default = True) + Computes log likelihood by default, but this can be a slow + computation, so if you never use it you can turn this computation + off. + Attributes + ---------- + x : numpy.array(dim_x, 1) + Current state estimate. Any call to update() or predict() updates + this variable. + P : numpy.array(dim_x, dim_x) + Current state covariance matrix. Any call to update() or predict() + updates this variable. + x_prior : numpy.array(dim_x, 1) + Prior (predicted) state estimate. The *_prior and *_post attributes + are for convenience; they store the prior and posterior of the + current epoch. Read Only. + P_prior : numpy.array(dim_x, dim_x) + Prior (predicted) state covariance matrix. Read Only. + x_post : numpy.array(dim_x, 1) + Posterior (updated) state estimate. Read Only. + P_post : numpy.array(dim_x, dim_x) + Posterior (updated) state covariance matrix. Read Only. + z : numpy.array + Last measurement used in update(). Read only. + R : numpy.array(dim_z, dim_z) + Measurement noise covariance matrix. Also known as the + observation covariance. + Q : numpy.array(dim_x, dim_x) + Process noise covariance matrix. Also known as the transition + covariance. + F : numpy.array() + State Transition matrix. Also known as `A` in some formulation. + H : numpy.array(dim_z, dim_x) + Measurement function. Also known as the observation matrix, or as `C`. + y : numpy.array + Residual of the update step. Read only. + K : numpy.array(dim_x, dim_z) + Kalman gain of the update step. Read only. + S : numpy.array + System uncertainty (P projected to measurement space). Read only. + SI : numpy.array + Inverse system uncertainty. Read only. + log_likelihood : float + log-likelihood of the last measurement. Read only. + likelihood : float + likelihood of last measurement. Read only. + Computed from the log-likelihood. The log-likelihood can be very + small, meaning a large negative value such as -28000. Taking the + exp() of that results in 0.0, which can break typical algorithms + which multiply by this value, so by default we always return a + number >= sys.float_info.min. + mahalanobis : float + mahalanobis distance of the innovation. Read only. + inv : function, default numpy.linalg.inv + If you prefer another inverse function, such as the Moore-Penrose + pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv + This is only used to invert self.S. If you know it is diagonal, you + might choose to set it to filterpy.common.inv_diagonal, which is + several times faster than numpy.linalg.inv for diagonal matrices. + alpha : float + Fading memory setting. 1.0 gives the normal Kalman filter, and + values slightly larger than 1.0 (such as 1.02) give a fading + memory effect - previous measurements have less influence on the + filter's estimates. This formulation of the Fading memory filter + (there are many) is due to Dan Simon [1]_. + References + ---------- + .. [1] Dan Simon. "Optimal State Estimation." John Wiley & Sons. + p. 208-212. (2006) + .. [2] Roger Labbe. "Kalman and Bayesian Filters in Python" + https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python + """ + + def __init__(self, dim_x, dim_z, dim_u=0): + if dim_x < 1: + raise ValueError("dim_x must be 1 or greater") + if dim_z < 1: + raise ValueError("dim_z must be 1 or greater") + if dim_u < 0: + raise ValueError("dim_u must be 0 or greater") + + self.dim_x = dim_x + self.dim_z = dim_z + self.dim_u = dim_u + + self.x = zeros((dim_x, 1)) # state + self.P = eye(dim_x) # uncertainty covariance + self.Q = eye(dim_x) # process uncertainty + self.B = None # control transition matrix + self.F = eye(dim_x) # state transition matrix + self.H = zeros((dim_z, dim_x)) # measurement function + self.R = eye(dim_z) # measurement uncertainty + self._alpha_sq = 1.0 # fading memory control + self.M = np.zeros((dim_x, dim_z)) # process-measurement cross correlation + self.z = np.array([[None] * self.dim_z]).T + + # gain and residual are computed during the innovation step. We + # save them so that in case you want to inspect them for various + # purposes + self.K = np.zeros((dim_x, dim_z)) # kalman gain + self.y = zeros((dim_z, 1)) + self.S = np.zeros((dim_z, dim_z)) # system uncertainty + self.SI = np.zeros((dim_z, dim_z)) # inverse system uncertainty + + # identity matrix. Do not alter this. + self._I = np.eye(dim_x) + + # these will always be a copy of x,P after predict() is called + self.x_prior = self.x.copy() + self.P_prior = self.P.copy() + + # these will always be a copy of x,P after update() is called + self.x_post = self.x.copy() + self.P_post = self.P.copy() + + # Only computed only if requested via property + self._log_likelihood = log(sys.float_info.min) + self._likelihood = sys.float_info.min + self._mahalanobis = None + + # keep all observations + self.history_obs = [] + + self.inv = np.linalg.inv + + self.attr_saved = None + self.observed = False + self.last_measurement = None + + def predict(self, u=None, B=None, F=None, Q=None): + """ + Predict next state (prior) using the Kalman filter state propagation + equations. + Parameters + ---------- + u : np.array, default 0 + Optional control vector. + B : np.array(dim_x, dim_u), or None + Optional control transition matrix; a value of None + will cause the filter to use `self.B`. + F : np.array(dim_x, dim_x), or None + Optional state transition matrix; a value of None + will cause the filter to use `self.F`. + Q : np.array(dim_x, dim_x), scalar, or None + Optional process noise matrix; a value of None will cause the + filter to use `self.Q`. + """ + + if B is None: + B = self.B + if F is None: + F = self.F + if Q is None: + Q = self.Q + elif isscalar(Q): + Q = eye(self.dim_x) * Q + + # x = Fx + Bu + if B is not None and u is not None: + self.x = dot(F, self.x) + dot(B, u) + else: + self.x = dot(F, self.x) + + # P = FPF' + Q + self.P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q + + # save prior + self.x_prior = self.x.copy() + self.P_prior = self.P.copy() + + def freeze(self): + """ + Save the parameters before non-observation forward + """ + self.attr_saved = deepcopy(self.__dict__) + + def apply_affine_correction(self, m, t, new_kf): + """ + Apply to both last state and last observation for OOS smoothing. + + Messy due to internal logic for kalman filter being messy. + """ + if new_kf: + big_m = np.kron(np.eye(4, dtype=float), m) + self.x = big_m @ self.x + self.x[:2] += t + self.P = big_m @ self.P @ big_m.T + + # If frozen, also need to update the frozen state for OOS + if not self.observed and self.attr_saved is not None: + self.attr_saved["x"] = big_m @ self.attr_saved["x"] + self.attr_saved["x"][:2] += t + self.attr_saved["P"] = big_m @ self.attr_saved["P"] @ big_m.T + self.attr_saved["last_measurement"][:2] = m @ self.attr_saved["last_measurement"][:2] + t + self.attr_saved["last_measurement"][2:] = m @ self.attr_saved["last_measurement"][2:] + else: + scale = np.linalg.norm(m[:, 0]) + self.x[:2] = m @ self.x[:2] + t + self.x[4:6] = m @ self.x[4:6] + # self.x[2] *= scale + # self.x[6] *= scale + + self.P[:2, :2] = m @ self.P[:2, :2] @ m.T + self.P[4:6, 4:6] = m @ self.P[4:6, 4:6] @ m.T + # self.P[2, 2] *= 2 * scale + # self.P[6, 6] *= 2 * scale + + # If frozen, also need to update the frozen state for OOS + if not self.observed and self.attr_saved is not None: + self.attr_saved["x"][:2] = m @ self.attr_saved["x"][:2] + t + self.attr_saved["x"][4:6] = m @ self.attr_saved["x"][4:6] + # self.attr_saved["x"][2] *= scale + # self.attr_saved["x"][6] *= scale + + self.attr_saved["P"][:2, :2] = m @ self.attr_saved["P"][:2, :2] @ m.T + self.attr_saved["P"][4:6, 4:6] = m @ self.attr_saved["P"][4:6, 4:6] @ m.T + # self.attr_saved["P"][2, 2] *= 2 * scale + # self.attr_saved["P"][6, 6] *= 2 * scale + + self.attr_saved["last_measurement"][:2] = m @ self.attr_saved["last_measurement"][:2] + t + # self.attr_saved["last_measurement"][2] *= scale + + def unfreeze(self): + if self.attr_saved is not None: + new_history = deepcopy(self.history_obs) + self.__dict__ = self.attr_saved + # self.history_obs = new_history + self.history_obs = self.history_obs[:-1] + occur = [int(d is None) for d in new_history] + indices = np.where(np.array(occur) == 0)[0] + index1 = indices[-2] + index2 = indices[-1] + # box1 = new_history[index1] + box1 = self.last_measurement + x1, y1, s1, r1 = box1 + w1 = np.sqrt(s1 * r1) + h1 = np.sqrt(s1 / r1) + box2 = new_history[index2] + x2, y2, s2, r2 = box2 + w2 = np.sqrt(s2 * r2) + h2 = np.sqrt(s2 / r2) + time_gap = index2 - index1 + dx = (x2 - x1) / time_gap + dy = (y2 - y1) / time_gap + dw = (w2 - w1) / time_gap + dh = (h2 - h1) / time_gap + for i in range(index2 - index1): + """ + The default virtual trajectory generation is by linear + motion (constant speed hypothesis), you could modify this + part to implement your own. + """ + x = x1 + (i + 1) * dx + y = y1 + (i + 1) * dy + w = w1 + (i + 1) * dw + h = h1 + (i + 1) * dh + s = w * h + r = w / float(h) + new_box = np.array([x, y, s, r]).reshape((4, 1)) + """ + I still use predict-update loop here to refresh the parameters, + but this can be faster by directly modifying the internal parameters + as suggested in the paper. I keep this naive but slow way for + easy read and understanding + """ + self.update(new_box) + if not i == (index2 - index1 - 1): + self.predict() + + def update(self, z, R=None, H=None): + """ + Add a new measurement (z) to the Kalman filter. + If z is None, nothing is computed. However, x_post and P_post are + updated with the prior (x_prior, P_prior), and self.z is set to None. + Parameters + ---------- + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + If you pass in a value of H, z must be a column vector the + of the correct size. + R : np.array, scalar, or None + Optionally provide R to override the measurement noise for this + one call, otherwise self.R will be used. + H : np.array, or None + Optionally provide H to override the measurement function for this + one call, otherwise self.H will be used. + """ + # set to None to force recompute + self._log_likelihood = None + self._likelihood = None + self._mahalanobis = None + + # append the observation + self.history_obs.append(z) + + if z is None: + if self.observed: + """ + Got no observation so freeze the current parameters for future + potential online smoothing. + """ + self.last_measurement = self.history_obs[-2] + self.freeze() + self.observed = False + self.z = np.array([[None] * self.dim_z]).T + self.x_post = self.x.copy() + self.P_post = self.P.copy() + self.y = zeros((self.dim_z, 1)) + return + + # self.observed = True + if not self.observed: + """ + Get observation, use online smoothing to re-update parameters + """ + self.unfreeze() + self.observed = True + + if R is None: + R = self.R + elif isscalar(R): + R = eye(self.dim_z) * R + + if H is None: + z = reshape_z(z, self.dim_z, self.x.ndim) + H = self.H + + # y = z - Hx + # error (residual) between measurement and prediction + self.y = z - dot(H, self.x) + + # common subexpression for speed + PHT = dot(self.P, H.T) + + # S = HPH' + R + # project system uncertainty into measurement space + self.S = dot(H, PHT) + R + self.SI = self.inv(self.S) + # K = PH'inv(S) + # map system uncertainty into kalman gain + self.K = dot(PHT, self.SI) + + # x = x + Ky + # predict new x with residual scaled by the kalman gain + self.x = self.x + dot(self.K, self.y) + + # P = (I-KH)P(I-KH)' + KRK' + # This is more numerically stable + # and works for non-optimal K vs the equation + # P = (I-KH)P usually seen in the literature. + + I_KH = self._I - dot(self.K, H) + self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T) + + # save measurement and posterior state + self.z = deepcopy(z) + self.x_post = self.x.copy() + self.P_post = self.P.copy() + + def md_for_measurement(self, z): + """Mahalanobis distance for any measurement. + + Should be run after a prediction() call. + """ + z = reshape_z(z, self.dim_z, self.x.ndim) + H = self.H + y = z - dot(H, self.x) + md = sqrt(float(dot(dot(y.T, self.SI), y))) + return md + + def predict_steadystate(self, u=0, B=None): + """ + Predict state (prior) using the Kalman filter state propagation + equations. Only x is updated, P is left unchanged. See + update_steadstate() for a longer explanation of when to use this + method. + Parameters + ---------- + u : np.array + Optional control vector. If non-zero, it is multiplied by B + to create the control input into the system. + B : np.array(dim_x, dim_u), or None + Optional control transition matrix; a value of None + will cause the filter to use `self.B`. + """ + + if B is None: + B = self.B + + # x = Fx + Bu + if B is not None: + self.x = dot(self.F, self.x) + dot(B, u) + else: + self.x = dot(self.F, self.x) + + # save prior + self.x_prior = self.x.copy() + self.P_prior = self.P.copy() + + def update_steadystate(self, z): + """ + Add a new measurement (z) to the Kalman filter without recomputing + the Kalman gain K, the state covariance P, or the system + uncertainty S. + You can use this for LTI systems since the Kalman gain and covariance + converge to a fixed value. Precompute these and assign them explicitly, + or run the Kalman filter using the normal predict()/update(0 cycle + until they converge. + The main advantage of this call is speed. We do significantly less + computation, notably avoiding a costly matrix inversion. + Use in conjunction with predict_steadystate(), otherwise P will grow + without bound. + Parameters + ---------- + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + Examples + -------- + >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter + >>> # let filter converge on representative data, then save k and P + >>> for i in range(100): + >>> cv.predict() + >>> cv.update([i, i, i]) + >>> saved_k = np.copy(cv.K) + >>> saved_P = np.copy(cv.P) + later on: + >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter + >>> cv.K = np.copy(saved_K) + >>> cv.P = np.copy(saved_P) + >>> for i in range(100): + >>> cv.predict_steadystate() + >>> cv.update_steadystate([i, i, i]) + """ + + # set to None to force recompute + self._log_likelihood = None + self._likelihood = None + self._mahalanobis = None + + if z is None: + self.z = np.array([[None] * self.dim_z]).T + self.x_post = self.x.copy() + self.P_post = self.P.copy() + self.y = zeros((self.dim_z, 1)) + return + + z = reshape_z(z, self.dim_z, self.x.ndim) + + # y = z - Hx + # error (residual) between measurement and prediction + self.y = z - dot(self.H, self.x) + + # x = x + Ky + # predict new x with residual scaled by the kalman gain + self.x = self.x + dot(self.K, self.y) + + self.z = deepcopy(z) + self.x_post = self.x.copy() + self.P_post = self.P.copy() + + # set to None to force recompute + self._log_likelihood = None + self._likelihood = None + self._mahalanobis = None + + def update_correlated(self, z, R=None, H=None): + """Add a new measurement (z) to the Kalman filter assuming that + process noise and measurement noise are correlated as defined in + the `self.M` matrix. + A partial derivation can be found in [1] + If z is None, nothing is changed. + Parameters + ---------- + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + R : np.array, scalar, or None + Optionally provide R to override the measurement noise for this + one call, otherwise self.R will be used. + H : np.array, or None + Optionally provide H to override the measurement function for this + one call, otherwise self.H will be used. + References + ---------- + .. [1] Bulut, Y. (2011). Applied Kalman filter theory (Doctoral dissertation, Northeastern University). + http://people.duke.edu/~hpgavin/SystemID/References/Balut-KalmanFilter-PhD-NEU-2011.pdf + """ + + # set to None to force recompute + self._log_likelihood = None + self._likelihood = None + self._mahalanobis = None + + if z is None: + self.z = np.array([[None] * self.dim_z]).T + self.x_post = self.x.copy() + self.P_post = self.P.copy() + self.y = zeros((self.dim_z, 1)) + return + + if R is None: + R = self.R + elif isscalar(R): + R = eye(self.dim_z) * R + + # rename for readability and a tiny extra bit of speed + if H is None: + z = reshape_z(z, self.dim_z, self.x.ndim) + H = self.H + + # handle special case: if z is in form [[z]] but x is not a column + # vector dimensions will not match + if self.x.ndim == 1 and shape(z) == (1, 1): + z = z[0] + + if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3) + z = np.asarray([z]) + + # y = z - Hx + # error (residual) between measurement and prediction + self.y = z - dot(H, self.x) + + # common subexpression for speed + PHT = dot(self.P, H.T) + + # project system uncertainty into measurement space + self.S = dot(H, PHT) + dot(H, self.M) + dot(self.M.T, H.T) + R + self.SI = self.inv(self.S) + + # K = PH'inv(S) + # map system uncertainty into kalman gain + self.K = dot(PHT + self.M, self.SI) + + # x = x + Ky + # predict new x with residual scaled by the kalman gain + self.x = self.x + dot(self.K, self.y) + self.P = self.P - dot(self.K, dot(H, self.P) + self.M.T) + + self.z = deepcopy(z) + self.x_post = self.x.copy() + self.P_post = self.P.copy() + + def batch_filter( + self, + zs, + Fs=None, + Qs=None, + Hs=None, + Rs=None, + Bs=None, + us=None, + update_first=False, + saver=None, + ): + """Batch processes a sequences of measurements. + Parameters + ---------- + zs : list-like + list of measurements at each time step `self.dt`. Missing + measurements must be represented by `None`. + Fs : None, list-like, default=None + optional value or list of values to use for the state transition + matrix F. + If Fs is None then self.F is used for all epochs. + Otherwise it must contain a list-like list of F's, one for + each epoch. This allows you to have varying F per epoch. + Qs : None, np.array or list-like, default=None + optional value or list of values to use for the process error + covariance Q. + If Qs is None then self.Q is used for all epochs. + Otherwise it must contain a list-like list of Q's, one for + each epoch. This allows you to have varying Q per epoch. + Hs : None, np.array or list-like, default=None + optional list of values to use for the measurement matrix H. + If Hs is None then self.H is used for all epochs. + If Hs contains a single matrix, then it is used as H for all + epochs. + Otherwise it must contain a list-like list of H's, one for + each epoch. This allows you to have varying H per epoch. + Rs : None, np.array or list-like, default=None + optional list of values to use for the measurement error + covariance R. + If Rs is None then self.R is used for all epochs. + Otherwise it must contain a list-like list of R's, one for + each epoch. This allows you to have varying R per epoch. + Bs : None, np.array or list-like, default=None + optional list of values to use for the control transition matrix B. + If Bs is None then self.B is used for all epochs. + Otherwise it must contain a list-like list of B's, one for + each epoch. This allows you to have varying B per epoch. + us : None, np.array or list-like, default=None + optional list of values to use for the control input vector; + If us is None then None is used for all epochs (equivalent to 0, + or no control input). + Otherwise it must contain a list-like list of u's, one for + each epoch. + update_first : bool, optional, default=False + controls whether the order of operations is update followed by + predict, or predict followed by update. Default is predict->update. + saver : filterpy.common.Saver, optional + filterpy.common.Saver object. If provided, saver.save() will be + called after every epoch + Returns + ------- + means : np.array((n,dim_x,1)) + array of the state for each time step after the update. Each entry + is an np.array. In other words `means[k,:]` is the state at step + `k`. + covariance : np.array((n,dim_x,dim_x)) + array of the covariances for each time step after the update. + In other words `covariance[k,:,:]` is the covariance at step `k`. + means_predictions : np.array((n,dim_x,1)) + array of the state for each time step after the predictions. Each + entry is an np.array. In other words `means[k,:]` is the state at + step `k`. + covariance_predictions : np.array((n,dim_x,dim_x)) + array of the covariances for each time step after the prediction. + In other words `covariance[k,:,:]` is the covariance at step `k`. + Examples + -------- + .. code-block:: Python + # this example demonstrates tracking a measurement where the time + # between measurement varies, as stored in dts. This requires + # that F be recomputed for each epoch. The output is then smoothed + # with an RTS smoother. + zs = [t + random.randn()*4 for t in range (40)] + Fs = [np.array([[1., dt], [0, 1]] for dt in dts] + (mu, cov, _, _) = kf.batch_filter(zs, Fs=Fs) + (xs, Ps, Ks, Pps) = kf.rts_smoother(mu, cov, Fs=Fs) + """ + + # pylint: disable=too-many-statements + n = np.size(zs, 0) + if Fs is None: + Fs = [self.F] * n + if Qs is None: + Qs = [self.Q] * n + if Hs is None: + Hs = [self.H] * n + if Rs is None: + Rs = [self.R] * n + if Bs is None: + Bs = [self.B] * n + if us is None: + us = [0] * n + + # mean estimates from Kalman Filter + if self.x.ndim == 1: + means = zeros((n, self.dim_x)) + means_p = zeros((n, self.dim_x)) + else: + means = zeros((n, self.dim_x, 1)) + means_p = zeros((n, self.dim_x, 1)) + + # state covariances from Kalman Filter + covariances = zeros((n, self.dim_x, self.dim_x)) + covariances_p = zeros((n, self.dim_x, self.dim_x)) + + if update_first: + for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): + + self.update(z, R=R, H=H) + means[i, :] = self.x + covariances[i, :, :] = self.P + + self.predict(u=u, B=B, F=F, Q=Q) + means_p[i, :] = self.x + covariances_p[i, :, :] = self.P + + if saver is not None: + saver.save() + else: + for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): + + self.predict(u=u, B=B, F=F, Q=Q) + means_p[i, :] = self.x + covariances_p[i, :, :] = self.P + + self.update(z, R=R, H=H) + means[i, :] = self.x + covariances[i, :, :] = self.P + + if saver is not None: + saver.save() + + return (means, covariances, means_p, covariances_p) + + def rts_smoother(self, Xs, Ps, Fs=None, Qs=None, inv=np.linalg.inv): + """ + Runs the Rauch-Tung-Striebel Kalman smoother on a set of + means and covariances computed by a Kalman filter. The usual input + would come from the output of `KalmanFilter.batch_filter()`. + Parameters + ---------- + Xs : numpy.array + array of the means (state variable x) of the output of a Kalman + filter. + Ps : numpy.array + array of the covariances of the output of a kalman filter. + Fs : list-like collection of numpy.array, optional + State transition matrix of the Kalman filter at each time step. + Optional, if not provided the filter's self.F will be used + Qs : list-like collection of numpy.array, optional + Process noise of the Kalman filter at each time step. Optional, + if not provided the filter's self.Q will be used + inv : function, default numpy.linalg.inv + If you prefer another inverse function, such as the Moore-Penrose + pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv + Returns + ------- + x : numpy.ndarray + smoothed means + P : numpy.ndarray + smoothed state covariances + K : numpy.ndarray + smoother gain at each step + Pp : numpy.ndarray + Predicted state covariances + Examples + -------- + .. code-block:: Python + zs = [t + random.randn()*4 for t in range (40)] + (mu, cov, _, _) = kalman.batch_filter(zs) + (x, P, K, Pp) = rts_smoother(mu, cov, kf.F, kf.Q) + """ + + if len(Xs) != len(Ps): + raise ValueError("length of Xs and Ps must be the same") + + n = Xs.shape[0] + dim_x = Xs.shape[1] + + if Fs is None: + Fs = [self.F] * n + if Qs is None: + Qs = [self.Q] * n + + # smoother gain + K = zeros((n, dim_x, dim_x)) + + x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy() + for k in range(n - 2, -1, -1): + Pp[k] = dot(dot(Fs[k + 1], P[k]), Fs[k + 1].T) + Qs[k + 1] + + # pylint: disable=bad-whitespace + K[k] = dot(dot(P[k], Fs[k + 1].T), inv(Pp[k])) + x[k] += dot(K[k], x[k + 1] - dot(Fs[k + 1], x[k])) + P[k] += dot(dot(K[k], P[k + 1] - Pp[k]), K[k].T) + + return (x, P, K, Pp) + + def get_prediction(self, u=None, B=None, F=None, Q=None): + """ + Predict next state (prior) using the Kalman filter state propagation + equations and returns it without modifying the object. + Parameters + ---------- + u : np.array, default 0 + Optional control vector. + B : np.array(dim_x, dim_u), or None + Optional control transition matrix; a value of None + will cause the filter to use `self.B`. + F : np.array(dim_x, dim_x), or None + Optional state transition matrix; a value of None + will cause the filter to use `self.F`. + Q : np.array(dim_x, dim_x), scalar, or None + Optional process noise matrix; a value of None will cause the + filter to use `self.Q`. + Returns + ------- + (x, P) : tuple + State vector and covariance array of the prediction. + """ + + if B is None: + B = self.B + if F is None: + F = self.F + if Q is None: + Q = self.Q + elif isscalar(Q): + Q = eye(self.dim_x) * Q + + # x = Fx + Bu + if B is not None and u is not None: + x = dot(F, self.x) + dot(B, u) + else: + x = dot(F, self.x) + + # P = FPF' + Q + P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q + + return x, P + + def get_update(self, z=None): + """ + Computes the new estimate based on measurement `z` and returns it + without altering the state of the filter. + Parameters + ---------- + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + Returns + ------- + (x, P) : tuple + State vector and covariance array of the update. + """ + + if z is None: + return self.x, self.P + z = reshape_z(z, self.dim_z, self.x.ndim) + + R = self.R + H = self.H + P = self.P + x = self.x + + # error (residual) between measurement and prediction + y = z - dot(H, x) + + # common subexpression for speed + PHT = dot(P, H.T) + + # project system uncertainty into measurement space + S = dot(H, PHT) + R + + # map system uncertainty into kalman gain + K = dot(PHT, self.inv(S)) + + # predict new x with residual scaled by the kalman gain + x = x + dot(K, y) + + # P = (I-KH)P(I-KH)' + KRK' + I_KH = self._I - dot(K, H) + P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T) + + return x, P + + def residual_of(self, z): + """ + Returns the residual for the given measurement (z). Does not alter + the state of the filter. + """ + z = reshape_z(z, self.dim_z, self.x.ndim) + return z - dot(self.H, self.x_prior) + + def measurement_of_state(self, x): + """ + Helper function that converts a state into a measurement. + Parameters + ---------- + x : np.array + kalman state vector + Returns + ------- + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + """ + + return dot(self.H, x) + + @property + def log_likelihood(self): + """ + log-likelihood of the last measurement. + """ + if self._log_likelihood is None: + self._log_likelihood = logpdf(x=self.y, cov=self.S) + return self._log_likelihood + + @property + def likelihood(self): + """ + Computed from the log-likelihood. The log-likelihood can be very + small, meaning a large negative value such as -28000. Taking the + exp() of that results in 0.0, which can break typical algorithms + which multiply by this value, so by default we always return a + number >= sys.float_info.min. + """ + if self._likelihood is None: + self._likelihood = exp(self.log_likelihood) + if self._likelihood == 0: + self._likelihood = sys.float_info.min + return self._likelihood + + @property + def mahalanobis(self): + """ " + Mahalanobis distance of measurement. E.g. 3 means measurement + was 3 standard deviations away from the predicted value. + Returns + ------- + mahalanobis : float + """ + if self._mahalanobis is None: + self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y))) + return self._mahalanobis + + @property + def alpha(self): + """ + Fading memory setting. 1.0 gives the normal Kalman filter, and + values slightly larger than 1.0 (such as 1.02) give a fading + memory effect - previous measurements have less influence on the + filter's estimates. This formulation of the Fading memory filter + (there are many) is due to Dan Simon [1]_. + """ + return self._alpha_sq**0.5 + + def log_likelihood_of(self, z): + """ + log likelihood of the measurement `z`. This should only be called + after a call to update(). Calling after predict() will yield an + incorrect result.""" + + if z is None: + return log(sys.float_info.min) + return logpdf(z, dot(self.H, self.x), self.S) + + @alpha.setter + def alpha(self, value): + if not np.isscalar(value) or value < 1: + raise ValueError("alpha must be a float greater than 1") + + self._alpha_sq = value**2 + + def __repr__(self): + return "\n".join( + [ + "KalmanFilter object", + pretty_str("dim_x", self.dim_x), + pretty_str("dim_z", self.dim_z), + pretty_str("dim_u", self.dim_u), + pretty_str("x", self.x), + pretty_str("P", self.P), + pretty_str("x_prior", self.x_prior), + pretty_str("P_prior", self.P_prior), + pretty_str("x_post", self.x_post), + pretty_str("P_post", self.P_post), + pretty_str("F", self.F), + pretty_str("Q", self.Q), + pretty_str("R", self.R), + pretty_str("H", self.H), + pretty_str("K", self.K), + pretty_str("y", self.y), + pretty_str("S", self.S), + pretty_str("SI", self.SI), + pretty_str("M", self.M), + pretty_str("B", self.B), + pretty_str("z", self.z), + pretty_str("log-likelihood", self.log_likelihood), + pretty_str("likelihood", self.likelihood), + pretty_str("mahalanobis", self.mahalanobis), + pretty_str("alpha", self.alpha), + pretty_str("inv", self.inv), + ] + ) + + def test_matrix_dimensions(self, z=None, H=None, R=None, F=None, Q=None): + """ + Performs a series of asserts to check that the size of everything + is what it should be. This can help you debug problems in your design. + If you pass in H, R, F, Q those will be used instead of this object's + value for those matrices. + Testing `z` (the measurement) is problamatic. x is a vector, and can be + implemented as either a 1D array or as a nx1 column vector. Thus Hx + can be of different shapes. Then, if Hx is a single value, it can + be either a 1D array or 2D vector. If either is true, z can reasonably + be a scalar (either '3' or np.array('3') are scalars under this + definition), a 1D, 1 element array, or a 2D, 1 element array. You are + allowed to pass in any combination that works. + """ + + if H is None: + H = self.H + if R is None: + R = self.R + if F is None: + F = self.F + if Q is None: + Q = self.Q + x = self.x + P = self.P + + assert x.ndim == 1 or x.ndim == 2, "x must have one or two dimensions, but has {}".format(x.ndim) + + if x.ndim == 1: + assert x.shape[0] == self.dim_x, "Shape of x must be ({},{}), but is {}".format(self.dim_x, 1, x.shape) + else: + assert x.shape == ( + self.dim_x, + 1, + ), "Shape of x must be ({},{}), but is {}".format(self.dim_x, 1, x.shape) + + assert P.shape == ( + self.dim_x, + self.dim_x, + ), "Shape of P must be ({},{}), but is {}".format(self.dim_x, self.dim_x, P.shape) + + assert Q.shape == ( + self.dim_x, + self.dim_x, + ), "Shape of Q must be ({},{}), but is {}".format(self.dim_x, self.dim_x, P.shape) + + assert F.shape == ( + self.dim_x, + self.dim_x, + ), "Shape of F must be ({},{}), but is {}".format(self.dim_x, self.dim_x, F.shape) + + assert np.ndim(H) == 2, "Shape of H must be (dim_z, {}), but is {}".format(P.shape[0], shape(H)) + + assert H.shape[1] == P.shape[0], "Shape of H must be (dim_z, {}), but is {}".format(P.shape[0], H.shape) + + # shape of R must be the same as HPH' + hph_shape = (H.shape[0], H.shape[0]) + r_shape = shape(R) + + if H.shape[0] == 1: + # r can be scalar, 1D, or 2D in this case + assert r_shape in [ + (), + (1,), + (1, 1), + ], "R must be scalar or one element array, but is shaped {}".format(r_shape) + else: + assert r_shape == hph_shape, "shape of R should be {} but it is {}".format(hph_shape, r_shape) + + if z is not None: + z_shape = shape(z) + else: + z_shape = (self.dim_z, 1) + + # H@x must have shape of z + Hx = dot(H, x) + + if z_shape == (): # scalar or np.array(scalar) + assert Hx.ndim == 1 or shape(Hx) == ( + 1, + 1, + ), "shape of z should be {}, not {} for the given H".format(shape(Hx), z_shape) + + elif shape(Hx) == (1,): + assert z_shape[0] == 1, "Shape of z must be {} for the given H".format(shape(Hx)) + + else: + assert z_shape == shape(Hx) or ( + len(z_shape) == 1 and shape(Hx) == (z_shape[0], 1) + ), "shape of z should be {}, not {} for the given H".format(shape(Hx), z_shape) + + if np.ndim(Hx) > 1 and shape(Hx) != (1, 1): + assert shape(Hx) == z_shape, "shape of z should be {} for the given H, but it is {}".format( + shape(Hx), z_shape + ) + + +def update(x, P, z, R, H=None, return_all=False): + """ + Add a new measurement (z) to the Kalman filter. If z is None, nothing + is changed. + This can handle either the multidimensional or unidimensional case. If + all parameters are floats instead of arrays the filter will still work, + and return floats for x, P as the result. + update(1, 2, 1, 1, 1) # univariate + update(x, P, 1 + Parameters + ---------- + x : numpy.array(dim_x, 1), or float + State estimate vector + P : numpy.array(dim_x, dim_x), or float + Covariance matrix + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + R : numpy.array(dim_z, dim_z), or float + Measurement noise matrix + H : numpy.array(dim_x, dim_x), or float, optional + Measurement function. If not provided, a value of 1 is assumed. + return_all : bool, default False + If true, y, K, S, and log_likelihood are returned, otherwise + only x and P are returned. + Returns + ------- + x : numpy.array + Posterior state estimate vector + P : numpy.array + Posterior covariance matrix + y : numpy.array or scalar + Residua. Difference between measurement and state in measurement space + K : numpy.array + Kalman gain + S : numpy.array + System uncertainty in measurement space + log_likelihood : float + log likelihood of the measurement + """ + + # pylint: disable=bare-except + + if z is None: + if return_all: + return x, P, None, None, None, None + return x, P + + if H is None: + H = np.array([1]) + + if np.isscalar(H): + H = np.array([H]) + + Hx = np.atleast_1d(dot(H, x)) + z = reshape_z(z, Hx.shape[0], x.ndim) + + # error (residual) between measurement and prediction + y = z - Hx + + # project system uncertainty into measurement space + S = dot(dot(H, P), H.T) + R + + # map system uncertainty into kalman gain + try: + K = dot(dot(P, H.T), linalg.inv(S)) + except: + # can't invert a 1D array, annoyingly + K = dot(dot(P, H.T), 1.0 / S) + + # predict new x with residual scaled by the kalman gain + x = x + dot(K, y) + + # P = (I-KH)P(I-KH)' + KRK' + KH = dot(K, H) + + try: + I_KH = np.eye(KH.shape[0]) - KH + except: + I_KH = np.array([1 - KH]) + P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T) + + if return_all: + # compute log likelihood + log_likelihood = logpdf(z, dot(H, x), S) + return x, P, y, K, S, log_likelihood + return x, P + + +def update_steadystate(x, z, K, H=None): + """ + Add a new measurement (z) to the Kalman filter. If z is None, nothing + is changed. + Parameters + ---------- + x : numpy.array(dim_x, 1), or float + State estimate vector + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + K : numpy.array, or float + Kalman gain matrix + H : numpy.array(dim_x, dim_x), or float, optional + Measurement function. If not provided, a value of 1 is assumed. + Returns + ------- + x : numpy.array + Posterior state estimate vector + Examples + -------- + This can handle either the multidimensional or unidimensional case. If + all parameters are floats instead of arrays the filter will still work, + and return floats for x, P as the result. + >>> update_steadystate(1, 2, 1) # univariate + >>> update_steadystate(x, P, z, H) + """ + + if z is None: + return x + + if H is None: + H = np.array([1]) + + if np.isscalar(H): + H = np.array([H]) + + Hx = np.atleast_1d(dot(H, x)) + z = reshape_z(z, Hx.shape[0], x.ndim) + + # error (residual) between measurement and prediction + y = z - Hx + + # estimate new x with residual scaled by the kalman gain + return x + dot(K, y) + + +def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.0): + """ + Predict next state (prior) using the Kalman filter state propagation + equations. + Parameters + ---------- + x : numpy.array + State estimate vector + P : numpy.array + Covariance matrix + F : numpy.array() + State Transition matrix + Q : numpy.array, Optional + Process noise matrix + u : numpy.array, Optional, default 0. + Control vector. If non-zero, it is multiplied by B + to create the control input into the system. + B : numpy.array, optional, default 0. + Control transition matrix. + alpha : float, Optional, default=1.0 + Fading memory setting. 1.0 gives the normal Kalman filter, and + values slightly larger than 1.0 (such as 1.02) give a fading + memory effect - previous measurements have less influence on the + filter's estimates. This formulation of the Fading memory filter + (there are many) is due to Dan Simon + Returns + ------- + x : numpy.array + Prior state estimate vector + P : numpy.array + Prior covariance matrix + """ + + if np.isscalar(F): + F = np.array(F) + x = dot(F, x) + dot(B, u) + P = (alpha * alpha) * dot(dot(F, P), F.T) + Q + + return x, P + + +def predict_steadystate(x, F=1, u=0, B=1): + """ + Predict next state (prior) using the Kalman filter state propagation + equations. This steady state form only computes x, assuming that the + covariance is constant. + Parameters + ---------- + x : numpy.array + State estimate vector + P : numpy.array + Covariance matrix + F : numpy.array() + State Transition matrix + u : numpy.array, Optional, default 0. + Control vector. If non-zero, it is multiplied by B + to create the control input into the system. + B : numpy.array, optional, default 0. + Control transition matrix. + Returns + ------- + x : numpy.array + Prior state estimate vector + """ + + if np.isscalar(F): + F = np.array(F) + x = dot(F, x) + dot(B, u) + + return x + + +def batch_filter(x, P, zs, Fs, Qs, Hs, Rs, Bs=None, us=None, update_first=False, saver=None): + """ + Batch processes a sequences of measurements. + Parameters + ---------- + zs : list-like + list of measurements at each time step. Missing measurements must be + represented by None. + Fs : list-like + list of values to use for the state transition matrix matrix. + Qs : list-like + list of values to use for the process error + covariance. + Hs : list-like + list of values to use for the measurement matrix. + Rs : list-like + list of values to use for the measurement error + covariance. + Bs : list-like, optional + list of values to use for the control transition matrix; + a value of None in any position will cause the filter + to use `self.B` for that time step. + us : list-like, optional + list of values to use for the control input vector; + a value of None in any position will cause the filter to use + 0 for that time step. + update_first : bool, optional + controls whether the order of operations is update followed by + predict, or predict followed by update. Default is predict->update. + saver : filterpy.common.Saver, optional + filterpy.common.Saver object. If provided, saver.save() will be + called after every epoch + Returns + ------- + means : np.array((n,dim_x,1)) + array of the state for each time step after the update. Each entry + is an np.array. In other words `means[k,:]` is the state at step + `k`. + covariance : np.array((n,dim_x,dim_x)) + array of the covariances for each time step after the update. + In other words `covariance[k,:,:]` is the covariance at step `k`. + means_predictions : np.array((n,dim_x,1)) + array of the state for each time step after the predictions. Each + entry is an np.array. In other words `means[k,:]` is the state at + step `k`. + covariance_predictions : np.array((n,dim_x,dim_x)) + array of the covariances for each time step after the prediction. + In other words `covariance[k,:,:]` is the covariance at step `k`. + Examples + -------- + .. code-block:: Python + zs = [t + random.randn()*4 for t in range (40)] + Fs = [kf.F for t in range (40)] + Hs = [kf.H for t in range (40)] + (mu, cov, _, _) = kf.batch_filter(zs, Rs=R_list, Fs=Fs, Hs=Hs, Qs=None, + Bs=None, us=None, update_first=False) + (xs, Ps, Ks, Pps) = kf.rts_smoother(mu, cov, Fs=Fs, Qs=None) + """ + + n = np.size(zs, 0) + dim_x = x.shape[0] + + # mean estimates from Kalman Filter + if x.ndim == 1: + means = zeros((n, dim_x)) + means_p = zeros((n, dim_x)) + else: + means = zeros((n, dim_x, 1)) + means_p = zeros((n, dim_x, 1)) + + # state covariances from Kalman Filter + covariances = zeros((n, dim_x, dim_x)) + covariances_p = zeros((n, dim_x, dim_x)) + + if us is None: + us = [0.0] * n + Bs = [0.0] * n + + if update_first: + for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): + + x, P = update(x, P, z, R=R, H=H) + means[i, :] = x + covariances[i, :, :] = P + + x, P = predict(x, P, u=u, B=B, F=F, Q=Q) + means_p[i, :] = x + covariances_p[i, :, :] = P + if saver is not None: + saver.save() + else: + for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): + + x, P = predict(x, P, u=u, B=B, F=F, Q=Q) + means_p[i, :] = x + covariances_p[i, :, :] = P + + x, P = update(x, P, z, R=R, H=H) + means[i, :] = x + covariances[i, :, :] = P + if saver is not None: + saver.save() + + return (means, covariances, means_p, covariances_p) + + +def rts_smoother(Xs, Ps, Fs, Qs): + """ + Runs the Rauch-Tung-Striebel Kalman smoother on a set of + means and covariances computed by a Kalman filter. The usual input + would come from the output of `KalmanFilter.batch_filter()`. + Parameters + ---------- + Xs : numpy.array + array of the means (state variable x) of the output of a Kalman + filter. + Ps : numpy.array + array of the covariances of the output of a kalman filter. + Fs : list-like collection of numpy.array + State transition matrix of the Kalman filter at each time step. + Qs : list-like collection of numpy.array, optional + Process noise of the Kalman filter at each time step. + Returns + ------- + x : numpy.ndarray + smoothed means + P : numpy.ndarray + smoothed state covariances + K : numpy.ndarray + smoother gain at each step + pP : numpy.ndarray + predicted state covariances + Examples + -------- + .. code-block:: Python + zs = [t + random.randn()*4 for t in range (40)] + (mu, cov, _, _) = kalman.batch_filter(zs) + (x, P, K, pP) = rts_smoother(mu, cov, kf.F, kf.Q) + """ + + if len(Xs) != len(Ps): + raise ValueError("length of Xs and Ps must be the same") + + n = Xs.shape[0] + dim_x = Xs.shape[1] + + # smoother gain + K = zeros((n, dim_x, dim_x)) + x, P, pP = Xs.copy(), Ps.copy(), Ps.copy() + + for k in range(n - 2, -1, -1): + pP[k] = dot(dot(Fs[k], P[k]), Fs[k].T) + Qs[k] + + # pylint: disable=bad-whitespace + K[k] = dot(dot(P[k], Fs[k].T), linalg.inv(pP[k])) + x[k] += dot(K[k], x[k + 1] - dot(Fs[k], x[k])) + P[k] += dot(dot(K[k], P[k + 1] - pP[k]), K[k].T) + + return (x, P, K, pP) diff --git a/feeder/trackers/deepocsort/ocsort.py b/feeder/trackers/deepocsort/ocsort.py new file mode 100644 index 0000000..a20f34a --- /dev/null +++ b/feeder/trackers/deepocsort/ocsort.py @@ -0,0 +1,670 @@ +""" + This script is adopted from the SORT script by Alex Bewley alex@bewley.ai +""" +from __future__ import print_function + +import pdb +import pickle + +import cv2 +import torch +import torchvision + +import numpy as np +from .association import * +from .embedding import EmbeddingComputer +from .cmc import CMCComputer +from reid_multibackend import ReIDDetectMultiBackend + + +def k_previous_obs(observations, cur_age, k): + if len(observations) == 0: + return [-1, -1, -1, -1, -1] + for i in range(k): + dt = k - i + if cur_age - dt in observations: + return observations[cur_age - dt] + max_age = max(observations.keys()) + return observations[max_age] + + +def convert_bbox_to_z(bbox): + """ + Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form + [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is + the aspect ratio + """ + w = bbox[2] - bbox[0] + h = bbox[3] - bbox[1] + x = bbox[0] + w / 2.0 + y = bbox[1] + h / 2.0 + s = w * h # scale is just area + r = w / float(h + 1e-6) + return np.array([x, y, s, r]).reshape((4, 1)) + + +def convert_bbox_to_z_new(bbox): + w = bbox[2] - bbox[0] + h = bbox[3] - bbox[1] + x = bbox[0] + w / 2.0 + y = bbox[1] + h / 2.0 + return np.array([x, y, w, h]).reshape((4, 1)) + + +def convert_x_to_bbox_new(x): + x, y, w, h = x.reshape(-1)[:4] + return np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2]).reshape(1, 4) + + +def convert_x_to_bbox(x, score=None): + """ + Takes a bounding box in the centre form [x,y,s,r] and returns it in the form + [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right + """ + w = np.sqrt(x[2] * x[3]) + h = x[2] / w + if score == None: + return np.array([x[0] - w / 2.0, x[1] - h / 2.0, x[0] + w / 2.0, x[1] + h / 2.0]).reshape((1, 4)) + else: + return np.array([x[0] - w / 2.0, x[1] - h / 2.0, x[0] + w / 2.0, x[1] + h / 2.0, score]).reshape((1, 5)) + + +def speed_direction(bbox1, bbox2): + cx1, cy1 = (bbox1[0] + bbox1[2]) / 2.0, (bbox1[1] + bbox1[3]) / 2.0 + cx2, cy2 = (bbox2[0] + bbox2[2]) / 2.0, (bbox2[1] + bbox2[3]) / 2.0 + speed = np.array([cy2 - cy1, cx2 - cx1]) + norm = np.sqrt((cy2 - cy1) ** 2 + (cx2 - cx1) ** 2) + 1e-6 + return speed / norm + + +def new_kf_process_noise(w, h, p=1 / 20, v=1 / 160): + Q = np.diag( + ((p * w) ** 2, (p * h) ** 2, (p * w) ** 2, (p * h) ** 2, (v * w) ** 2, (v * h) ** 2, (v * w) ** 2, (v * h) ** 2) + ) + return Q + + +def new_kf_measurement_noise(w, h, m=1 / 20): + w_var = (m * w) ** 2 + h_var = (m * h) ** 2 + R = np.diag((w_var, h_var, w_var, h_var)) + return R + + +class KalmanBoxTracker(object): + """ + This class represents the internal state of individual tracked objects observed as bbox. + """ + + count = 0 + + def __init__(self, bbox, cls, delta_t=3, orig=False, emb=None, alpha=0, new_kf=False): + """ + Initialises a tracker using initial bounding box. + + """ + # define constant velocity model + if not orig: + from .kalmanfilter import KalmanFilterNew as KalmanFilter + else: + from filterpy.kalman import KalmanFilter + self.cls = cls + self.conf = bbox[-1] + self.new_kf = new_kf + if new_kf: + self.kf = KalmanFilter(dim_x=8, dim_z=4) + self.kf.F = np.array( + [ + # x y w h x' y' w' h' + [1, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0, 0, 1], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 1], + ] + ) + self.kf.H = np.array( + [ + [1, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0], + ] + ) + _, _, w, h = convert_bbox_to_z_new(bbox).reshape(-1) + self.kf.P = new_kf_process_noise(w, h) + self.kf.P[:4, :4] *= 4 + self.kf.P[4:, 4:] *= 100 + # Process and measurement uncertainty happen in functions + self.bbox_to_z_func = convert_bbox_to_z_new + self.x_to_bbox_func = convert_x_to_bbox_new + else: + self.kf = KalmanFilter(dim_x=7, dim_z=4) + self.kf.F = np.array( + [ + # x y s r x' y' s' + [1, 0, 0, 0, 1, 0, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 0, 1], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 1], + ] + ) + self.kf.H = np.array( + [ + [1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + ] + ) + self.kf.R[2:, 2:] *= 10.0 + self.kf.P[4:, 4:] *= 1000.0 # give high uncertainty to the unobservable initial velocities + self.kf.P *= 10.0 + self.kf.Q[-1, -1] *= 0.01 + self.kf.Q[4:, 4:] *= 0.01 + self.bbox_to_z_func = convert_bbox_to_z + self.x_to_bbox_func = convert_x_to_bbox + + self.kf.x[:4] = self.bbox_to_z_func(bbox) + + self.time_since_update = 0 + self.id = KalmanBoxTracker.count + KalmanBoxTracker.count += 1 + self.history = [] + self.hits = 0 + self.hit_streak = 0 + self.age = 0 + """ + NOTE: [-1,-1,-1,-1,-1] is a compromising placeholder for non-observation status, the same for the return of + function k_previous_obs. It is ugly and I do not like it. But to support generate observation array in a + fast and unified way, which you would see below k_observations = np.array([k_previous_obs(...]]), let's bear it for now. + """ + # Used for OCR + self.last_observation = np.array([-1, -1, -1, -1, -1]) # placeholder + # Used to output track after min_hits reached + self.history_observations = [] + # Used for velocity + self.observations = dict() + self.velocity = None + self.delta_t = delta_t + + self.emb = emb + + self.frozen = False + + def update(self, bbox, cls): + """ + Updates the state vector with observed bbox. + """ + if bbox is not None: + self.frozen = False + self.cls = cls + if self.last_observation.sum() >= 0: # no previous observation + previous_box = None + for dt in range(self.delta_t, 0, -1): + if self.age - dt in self.observations: + previous_box = self.observations[self.age - dt] + break + if previous_box is None: + previous_box = self.last_observation + """ + Estimate the track speed direction with observations \Delta t steps away + """ + self.velocity = speed_direction(previous_box, bbox) + """ + Insert new observations. This is a ugly way to maintain both self.observations + and self.history_observations. Bear it for the moment. + """ + self.last_observation = bbox + self.observations[self.age] = bbox + self.history_observations.append(bbox) + + self.time_since_update = 0 + self.history = [] + self.hits += 1 + self.hit_streak += 1 + if self.new_kf: + R = new_kf_measurement_noise(self.kf.x[2, 0], self.kf.x[3, 0]) + self.kf.update(self.bbox_to_z_func(bbox), R=R) + else: + self.kf.update(self.bbox_to_z_func(bbox)) + else: + self.kf.update(bbox) + self.frozen = True + + def update_emb(self, emb, alpha=0.9): + self.emb = alpha * self.emb + (1 - alpha) * emb + self.emb /= np.linalg.norm(self.emb) + + def get_emb(self): + return self.emb.cpu() + + def apply_affine_correction(self, affine): + m = affine[:, :2] + t = affine[:, 2].reshape(2, 1) + # For OCR + if self.last_observation.sum() > 0: + ps = self.last_observation[:4].reshape(2, 2).T + ps = m @ ps + t + self.last_observation[:4] = ps.T.reshape(-1) + + # Apply to each box in the range of velocity computation + for dt in range(self.delta_t, -1, -1): + if self.age - dt in self.observations: + ps = self.observations[self.age - dt][:4].reshape(2, 2).T + ps = m @ ps + t + self.observations[self.age - dt][:4] = ps.T.reshape(-1) + + # Also need to change kf state, but might be frozen + self.kf.apply_affine_correction(m, t, self.new_kf) + + def predict(self): + """ + Advances the state vector and returns the predicted bounding box estimate. + """ + # Don't allow negative bounding boxes + if self.new_kf: + if self.kf.x[2] + self.kf.x[6] <= 0: + self.kf.x[6] = 0 + if self.kf.x[3] + self.kf.x[7] <= 0: + self.kf.x[7] = 0 + + # Stop velocity, will update in kf during OOS + if self.frozen: + self.kf.x[6] = self.kf.x[7] = 0 + Q = new_kf_process_noise(self.kf.x[2, 0], self.kf.x[3, 0]) + else: + if (self.kf.x[6] + self.kf.x[2]) <= 0: + self.kf.x[6] *= 0.0 + Q = None + + self.kf.predict(Q=Q) + self.age += 1 + if self.time_since_update > 0: + self.hit_streak = 0 + self.time_since_update += 1 + self.history.append(self.x_to_bbox_func(self.kf.x)) + return self.history[-1] + + def get_state(self): + """ + Returns the current bounding box estimate. + """ + return self.x_to_bbox_func(self.kf.x) + + def mahalanobis(self, bbox): + """Should be run after a predict() call for accuracy.""" + return self.kf.md_for_measurement(self.bbox_to_z_func(bbox)) + + +""" + We support multiple ways for association cost calculation, by default + we use IoU. GIoU may have better performance in some situations. We note + that we hardly normalize the cost by all methods to (0,1) which may not be + the best practice. +""" +ASSO_FUNCS = { + "iou": iou_batch, + "giou": giou_batch, + "ciou": ciou_batch, + "diou": diou_batch, + "ct_dist": ct_dist, +} + + +class OCSort(object): + def __init__( + self, + model_weights, + device, + fp16, + det_thresh, + max_age=30, + min_hits=3, + iou_threshold=0.3, + delta_t=3, + asso_func="iou", + inertia=0.2, + w_association_emb=0.75, + alpha_fixed_emb=0.95, + aw_param=0.5, + embedding_off=False, + cmc_off=False, + aw_off=False, + new_kf_off=False, + **kwargs + ): + """ + Sets key parameters for SORT + """ + self.max_age = max_age + self.min_hits = min_hits + self.iou_threshold = iou_threshold + self.trackers = [] + self.frame_count = 0 + self.det_thresh = det_thresh + self.delta_t = delta_t + self.asso_func = ASSO_FUNCS[asso_func] + self.inertia = inertia + self.w_association_emb = w_association_emb + self.alpha_fixed_emb = alpha_fixed_emb + self.aw_param = aw_param + KalmanBoxTracker.count = 0 + + self.embedder = ReIDDetectMultiBackend(weights=model_weights, device=device, fp16=fp16) + self.cmc = CMCComputer() + self.embedding_off = embedding_off + self.cmc_off = cmc_off + self.aw_off = aw_off + self.new_kf_off = new_kf_off + + def update(self, dets, img_numpy, tag='blub'): + """ + Params: + dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...] + Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections). + Returns the a similar array, where the last column is the object ID. + NOTE: The number of objects returned may differ from the number of detections provided. + """ + xyxys = dets[:, 0:4] + scores = dets[:, 4] + clss = dets[:, 5] + + classes = clss.numpy() + xyxys = xyxys.numpy() + scores = scores.numpy() + + dets = dets[:, 0:6].numpy() + remain_inds = scores > self.det_thresh + dets = dets[remain_inds] + self.height, self.width = img_numpy.shape[:2] + + # Rescale + #scale = min(img_tensor.shape[2] / img_numpy.shape[0], img_tensor.shape[3] / img_numpy.shape[1]) + #dets[:, :4] /= scale + + # Embedding + if self.embedding_off or dets.shape[0] == 0: + dets_embs = np.ones((dets.shape[0], 1)) + else: + # (Ndets x X) [512, 1024, 2048] + #dets_embs = self.embedder.compute_embedding(img_numpy, dets[:, :4], tag) + dets_embs = self._get_features(dets[:, :4], img_numpy) + + # CMC + if not self.cmc_off: + transform = self.cmc.compute_affine(img_numpy, dets[:, :4], tag) + for trk in self.trackers: + trk.apply_affine_correction(transform) + + trust = (dets[:, 4] - self.det_thresh) / (1 - self.det_thresh) + af = self.alpha_fixed_emb + # From [self.alpha_fixed_emb, 1], goes to 1 as detector is less confident + dets_alpha = af + (1 - af) * (1 - trust) + + # get predicted locations from existing trackers. + trks = np.zeros((len(self.trackers), 5)) + trk_embs = [] + to_del = [] + ret = [] + for t, trk in enumerate(trks): + pos = self.trackers[t].predict()[0] + trk[:] = [pos[0], pos[1], pos[2], pos[3], 0] + if np.any(np.isnan(pos)): + to_del.append(t) + else: + trk_embs.append(self.trackers[t].get_emb()) + trks = np.ma.compress_rows(np.ma.masked_invalid(trks)) + + if len(trk_embs) > 0: + trk_embs = np.vstack(trk_embs) + else: + trk_embs = np.array(trk_embs) + + for t in reversed(to_del): + self.trackers.pop(t) + + velocities = np.array([trk.velocity if trk.velocity is not None else np.array((0, 0)) for trk in self.trackers]) + last_boxes = np.array([trk.last_observation for trk in self.trackers]) + k_observations = np.array([k_previous_obs(trk.observations, trk.age, self.delta_t) for trk in self.trackers]) + + """ + First round of association + """ + # (M detections X N tracks, final score) + if self.embedding_off or dets.shape[0] == 0 or trk_embs.shape[0] == 0: + stage1_emb_cost = None + else: + stage1_emb_cost = dets_embs @ trk_embs.T + matched, unmatched_dets, unmatched_trks = associate( + dets, + trks, + self.iou_threshold, + velocities, + k_observations, + self.inertia, + stage1_emb_cost, + self.w_association_emb, + self.aw_off, + self.aw_param, + ) + for m in matched: + self.trackers[m[1]].update(dets[m[0], :5], dets[m[0], 5]) + self.trackers[m[1]].update_emb(dets_embs[m[0]], alpha=dets_alpha[m[0]]) + + """ + Second round of associaton by OCR + """ + if unmatched_dets.shape[0] > 0 and unmatched_trks.shape[0] > 0: + left_dets = dets[unmatched_dets] + left_dets_embs = dets_embs[unmatched_dets] + left_trks = last_boxes[unmatched_trks] + left_trks_embs = trk_embs[unmatched_trks] + + iou_left = self.asso_func(left_dets, left_trks) + # TODO: is better without this + emb_cost_left = left_dets_embs @ left_trks_embs.T + if self.embedding_off: + emb_cost_left = np.zeros_like(emb_cost_left) + iou_left = np.array(iou_left) + if iou_left.max() > self.iou_threshold: + """ + NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may + get a higher performance especially on MOT17/MOT20 datasets. But we keep it + uniform here for simplicity + """ + rematched_indices = linear_assignment(-iou_left) + to_remove_det_indices = [] + to_remove_trk_indices = [] + for m in rematched_indices: + det_ind, trk_ind = unmatched_dets[m[0]], unmatched_trks[m[1]] + if iou_left[m[0], m[1]] < self.iou_threshold: + continue + self.trackers[trk_ind].update(dets[det_ind, :5], dets[det_ind, 5]) + self.trackers[trk_ind].update_emb(dets_embs[det_ind], alpha=dets_alpha[det_ind]) + to_remove_det_indices.append(det_ind) + to_remove_trk_indices.append(trk_ind) + unmatched_dets = np.setdiff1d(unmatched_dets, np.array(to_remove_det_indices)) + unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices)) + + for m in unmatched_trks: + self.trackers[m].update(None, None) + + # create and initialise new trackers for unmatched detections + for i in unmatched_dets: + trk = KalmanBoxTracker( + dets[i, :5], dets[i, 5], delta_t=self.delta_t, emb=dets_embs[i], alpha=dets_alpha[i], new_kf=not self.new_kf_off + ) + self.trackers.append(trk) + i = len(self.trackers) + for trk in reversed(self.trackers): + if trk.last_observation.sum() < 0: + d = trk.get_state()[0] + else: + """ + this is optional to use the recent observation or the kalman filter prediction, + we didn't notice significant difference here + """ + d = trk.last_observation[:4] + if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits): + # +1 as MOT benchmark requires positive + ret.append(np.concatenate((d, [trk.id + 1], [trk.cls], [trk.conf])).reshape(1, -1)) + i -= 1 + # remove dead tracklet + if trk.time_since_update > self.max_age: + self.trackers.pop(i) + if len(ret) > 0: + return np.concatenate(ret) + return np.empty((0, 5)) + + def _xywh_to_xyxy(self, bbox_xywh): + x, y, w, h = bbox_xywh + x1 = max(int(x - w / 2), 0) + x2 = min(int(x + w / 2), self.width - 1) + y1 = max(int(y - h / 2), 0) + y2 = min(int(y + h / 2), self.height - 1) + return x1, y1, x2, y2 + + def _get_features(self, bbox_xywh, ori_img): + im_crops = [] + for box in bbox_xywh: + x1, y1, x2, y2 = self._xywh_to_xyxy(box) + im = ori_img[y1:y2, x1:x2] + im_crops.append(im) + if im_crops: + features = self.embedder(im_crops).cpu() + else: + features = np.array([]) + + return features + + def update_public(self, dets, cates, scores): + self.frame_count += 1 + + det_scores = np.ones((dets.shape[0], 1)) + dets = np.concatenate((dets, det_scores), axis=1) + + remain_inds = scores > self.det_thresh + + cates = cates[remain_inds] + dets = dets[remain_inds] + + trks = np.zeros((len(self.trackers), 5)) + to_del = [] + ret = [] + for t, trk in enumerate(trks): + pos = self.trackers[t].predict()[0] + cat = self.trackers[t].cate + trk[:] = [pos[0], pos[1], pos[2], pos[3], cat] + if np.any(np.isnan(pos)): + to_del.append(t) + trks = np.ma.compress_rows(np.ma.masked_invalid(trks)) + for t in reversed(to_del): + self.trackers.pop(t) + + velocities = np.array([trk.velocity if trk.velocity is not None else np.array((0, 0)) for trk in self.trackers]) + last_boxes = np.array([trk.last_observation for trk in self.trackers]) + k_observations = np.array([k_previous_obs(trk.observations, trk.age, self.delta_t) for trk in self.trackers]) + + matched, unmatched_dets, unmatched_trks = associate_kitti( + dets, + trks, + cates, + self.iou_threshold, + velocities, + k_observations, + self.inertia, + ) + + for m in matched: + self.trackers[m[1]].update(dets[m[0], :]) + + if unmatched_dets.shape[0] > 0 and unmatched_trks.shape[0] > 0: + """ + The re-association stage by OCR. + NOTE: at this stage, adding other strategy might be able to continue improve + the performance, such as BYTE association by ByteTrack. + """ + left_dets = dets[unmatched_dets] + left_trks = last_boxes[unmatched_trks] + left_dets_c = left_dets.copy() + left_trks_c = left_trks.copy() + + iou_left = self.asso_func(left_dets_c, left_trks_c) + iou_left = np.array(iou_left) + det_cates_left = cates[unmatched_dets] + trk_cates_left = trks[unmatched_trks][:, 4] + num_dets = unmatched_dets.shape[0] + num_trks = unmatched_trks.shape[0] + cate_matrix = np.zeros((num_dets, num_trks)) + for i in range(num_dets): + for j in range(num_trks): + if det_cates_left[i] != trk_cates_left[j]: + """ + For some datasets, such as KITTI, there are different categories, + we have to avoid associate them together. + """ + cate_matrix[i][j] = -1e6 + iou_left = iou_left + cate_matrix + if iou_left.max() > self.iou_threshold - 0.1: + rematched_indices = linear_assignment(-iou_left) + to_remove_det_indices = [] + to_remove_trk_indices = [] + for m in rematched_indices: + det_ind, trk_ind = unmatched_dets[m[0]], unmatched_trks[m[1]] + if iou_left[m[0], m[1]] < self.iou_threshold - 0.1: + continue + self.trackers[trk_ind].update(dets[det_ind, :]) + to_remove_det_indices.append(det_ind) + to_remove_trk_indices.append(trk_ind) + unmatched_dets = np.setdiff1d(unmatched_dets, np.array(to_remove_det_indices)) + unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices)) + + for i in unmatched_dets: + trk = KalmanBoxTracker(dets[i, :]) + trk.cate = cates[i] + self.trackers.append(trk) + i = len(self.trackers) + + for trk in reversed(self.trackers): + if trk.last_observation.sum() > 0: + d = trk.last_observation[:4] + else: + d = trk.get_state()[0] + if trk.time_since_update < 1: + if (self.frame_count <= self.min_hits) or (trk.hit_streak >= self.min_hits): + # id+1 as MOT benchmark requires positive + ret.append(np.concatenate((d, [trk.id + 1], [trk.cls], [trk.conf])).reshape(1, -1)) + if trk.hit_streak == self.min_hits: + # Head Padding (HP): recover the lost steps during initializing the track + for prev_i in range(self.min_hits - 1): + prev_observation = trk.history_observations[-(prev_i + 2)] + ret.append( + ( + np.concatenate( + ( + prev_observation[:4], + [trk.id + 1], + [trk.cls], + [trk.conf], + ) + ) + ).reshape(1, -1) + ) + i -= 1 + if trk.time_since_update > self.max_age: + self.trackers.pop(i) + + if len(ret) > 0: + return np.concatenate(ret) + return np.empty((0, 7)) + + def dump_cache(self): + self.cmc.dump_cache() + self.embedder.dump_cache() diff --git a/feeder/trackers/deepocsort/reid_multibackend.py b/feeder/trackers/deepocsort/reid_multibackend.py new file mode 100644 index 0000000..6578177 --- /dev/null +++ b/feeder/trackers/deepocsort/reid_multibackend.py @@ -0,0 +1,237 @@ +import torch.nn as nn +import torch +from pathlib import Path +import numpy as np +from itertools import islice +import torchvision.transforms as transforms +import cv2 +import sys +import torchvision.transforms as T +from collections import OrderedDict, namedtuple +import gdown +from os.path import exists as file_exists + + +from yolov8.ultralytics.yolo.utils.checks import check_requirements, check_version +from yolov8.ultralytics.yolo.utils import LOGGER +from trackers.strongsort.deep.reid_model_factory import (show_downloadeable_models, get_model_url, get_model_name, + download_url, load_pretrained_weights) +from trackers.strongsort.deep.models import build_model + + +def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): + # Check file(s) for acceptable suffix + if file and suffix: + if isinstance(suffix, str): + suffix = [suffix] + for f in file if isinstance(file, (list, tuple)) else [file]: + s = Path(f).suffix.lower() # file suffix + if len(s): + assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + + +class ReIDDetectMultiBackend(nn.Module): + # ReID models MultiBackend class for python inference on various backends + def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'), fp16=False): + super().__init__() + + w = weights[0] if isinstance(weights, list) else weights + self.pt, self.jit, self.onnx, self.xml, self.engine, self.tflite = self.model_type(w) # get backend + self.fp16 = fp16 + self.fp16 &= self.pt or self.jit or self.engine # FP16 + + # Build transform functions + self.device = device + self.image_size=(256, 128) + self.pixel_mean=[0.485, 0.456, 0.406] + self.pixel_std=[0.229, 0.224, 0.225] + self.transforms = [] + self.transforms += [T.Resize(self.image_size)] + self.transforms += [T.ToTensor()] + self.transforms += [T.Normalize(mean=self.pixel_mean, std=self.pixel_std)] + self.preprocess = T.Compose(self.transforms) + self.to_pil = T.ToPILImage() + + model_name = get_model_name(w) + + if w.suffix == '.pt': + model_url = get_model_url(w) + if not file_exists(w) and model_url is not None: + gdown.download(model_url, str(w), quiet=False) + elif file_exists(w): + pass + else: + print(f'No URL associated to the chosen StrongSORT weights ({w}). Choose between:') + show_downloadeable_models() + exit() + + # Build model + self.model = build_model( + model_name, + num_classes=1, + pretrained=not (w and w.is_file()), + use_gpu=device + ) + + if self.pt: # PyTorch + # populate model arch with weights + if w and w.is_file() and w.suffix == '.pt': + load_pretrained_weights(self.model, w) + + self.model.to(device).eval() + self.model.half() if self.fp16 else self.model.float() + elif self.jit: + LOGGER.info(f'Loading {w} for TorchScript inference...') + self.model = torch.jit.load(w) + self.model.half() if self.fp16 else self.model.float() + elif self.onnx: # ONNX Runtime + LOGGER.info(f'Loading {w} for ONNX Runtime inference...') + cuda = torch.cuda.is_available() and device.type != 'cpu' + #check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) + import onnxruntime + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + self.session = onnxruntime.InferenceSession(str(w), providers=providers) + elif self.engine: # TensorRT + LOGGER.info(f'Loading {w} for TensorRT inference...') + import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 + if device.type == 'cpu': + device = torch.device('cuda:0') + Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + logger = trt.Logger(trt.Logger.INFO) + with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + self.model_ = runtime.deserialize_cuda_engine(f.read()) + self.context = self.model_.create_execution_context() + self.bindings = OrderedDict() + self.fp16 = False # default updated below + dynamic = False + for index in range(self.model_.num_bindings): + name = self.model_.get_binding_name(index) + dtype = trt.nptype(self.model_.get_binding_dtype(index)) + if self.model_.binding_is_input(index): + if -1 in tuple(self.model_.get_binding_shape(index)): # dynamic + dynamic = True + self.context.set_binding_shape(index, tuple(self.model_.get_profile_shape(0, index)[2])) + if dtype == np.float16: + self.fp16 = True + shape = tuple(self.context.get_binding_shape(index)) + im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) + self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) + self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items()) + batch_size = self.bindings['images'].shape[0] # if dynamic, this is instead max batch size + elif self.xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + from openvino.runtime import Core, Layout, get_batch + ie = Core() + if not Path(w).is_file(): # if not *.xml + w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir + network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) + if network.get_parameters()[0].get_layout().empty: + network.get_parameters()[0].set_layout(Layout("NCWH")) + batch_dim = get_batch(network) + if batch_dim.is_static: + batch_size = batch_dim.get_length() + self.executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 + self.output_layer = next(iter(self.executable_network.outputs)) + + elif self.tflite: + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu + from tflite_runtime.interpreter import Interpreter, load_delegate + except ImportError: + import tensorflow as tf + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, + self.interpreter = tf.lite.Interpreter(model_path=w) + self.interpreter.allocate_tensors() + # Get input and output tensors. + self.input_details = self.interpreter.get_input_details() + self.output_details = self.interpreter.get_output_details() + + # Test model on random input data. + input_data = np.array(np.random.random_sample((1,256,128,3)), dtype=np.float32) + self.interpreter.set_tensor(self.input_details[0]['index'], input_data) + + self.interpreter.invoke() + + # The function `get_tensor()` returns a copy of the tensor data. + output_data = self.interpreter.get_tensor(self.output_details[0]['index']) + else: + print('This model framework is not supported yet!') + exit() + + + @staticmethod + def model_type(p='path/to/model.pt'): + # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx + from trackers.reid_export import export_formats + sf = list(export_formats().Suffix) # export suffixes + check_suffix(p, sf) # checks + types = [s in Path(p).name for s in sf] + return types + + def _preprocess(self, im_batch): + + images = [] + for element in im_batch: + image = self.to_pil(element) + image = self.preprocess(image) + images.append(image) + + images = torch.stack(images, dim=0) + images = images.to(self.device) + + return images + + + def forward(self, im_batch): + + # preprocess batch + im_batch = self._preprocess(im_batch) + + # batch to half + if self.fp16 and im_batch.dtype != torch.float16: + im_batch = im_batch.half() + + # batch processing + features = [] + if self.pt: + features = self.model(im_batch) + elif self.jit: # TorchScript + features = self.model(im_batch) + elif self.onnx: # ONNX Runtime + im_batch = im_batch.cpu().numpy() # torch to numpy + features = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im_batch})[0] + elif self.engine: # TensorRT + if True and im_batch.shape != self.bindings['images'].shape: + i_in, i_out = (self.model_.get_binding_index(x) for x in ('images', 'output')) + self.context.set_binding_shape(i_in, im_batch.shape) # reshape if dynamic + self.bindings['images'] = self.bindings['images']._replace(shape=im_batch.shape) + self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) + s = self.bindings['images'].shape + assert im_batch.shape == s, f"input size {im_batch.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" + self.binding_addrs['images'] = int(im_batch.data_ptr()) + self.context.execute_v2(list(self.binding_addrs.values())) + features = self.bindings['output'].data + elif self.xml: # OpenVINO + im_batch = im_batch.cpu().numpy() # FP32 + features = self.executable_network([im_batch])[self.output_layer] + else: + print('Framework not supported at the moment, we are working on it...') + exit() + + if isinstance(features, (list, tuple)): + return self.from_numpy(features[0]) if len(features) == 1 else [self.from_numpy(x) for x in features] + else: + return self.from_numpy(features) + + def from_numpy(self, x): + return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x + + def warmup(self, imgsz=[(256, 128, 3)]): + # Warmup model by running inference once + warmup_types = self.pt, self.jit, self.onnx, self.engine, self.tflite + if any(warmup_types) and self.device.type != 'cpu': + im = [np.empty(*imgsz).astype(np.uint8)] # input + for _ in range(2 if self.jit else 1): # + self.forward(im) # warmup \ No newline at end of file diff --git a/feeder/trackers/multi_tracker_zoo.py b/feeder/trackers/multi_tracker_zoo.py new file mode 100644 index 0000000..5dedf3e --- /dev/null +++ b/feeder/trackers/multi_tracker_zoo.py @@ -0,0 +1,84 @@ +from trackers.strongsort.utils.parser import get_config + +def create_tracker(tracker_type, tracker_config, reid_weights, device, half): + + cfg = get_config() + cfg.merge_from_file(tracker_config) + + if tracker_type == 'strongsort': + from trackers.strongsort.strong_sort import StrongSORT + strongsort = StrongSORT( + reid_weights, + device, + half, + max_dist=cfg.strongsort.max_dist, + max_iou_dist=cfg.strongsort.max_iou_dist, + max_age=cfg.strongsort.max_age, + max_unmatched_preds=cfg.strongsort.max_unmatched_preds, + n_init=cfg.strongsort.n_init, + nn_budget=cfg.strongsort.nn_budget, + mc_lambda=cfg.strongsort.mc_lambda, + ema_alpha=cfg.strongsort.ema_alpha, + + ) + return strongsort + + elif tracker_type == 'ocsort': + from trackers.ocsort.ocsort import OCSort + ocsort = OCSort( + det_thresh=cfg.ocsort.det_thresh, + max_age=cfg.ocsort.max_age, + min_hits=cfg.ocsort.min_hits, + iou_threshold=cfg.ocsort.iou_thresh, + delta_t=cfg.ocsort.delta_t, + asso_func=cfg.ocsort.asso_func, + inertia=cfg.ocsort.inertia, + use_byte=cfg.ocsort.use_byte, + ) + return ocsort + + elif tracker_type == 'bytetrack': + from trackers.bytetrack.byte_tracker import BYTETracker + bytetracker = BYTETracker( + track_thresh=cfg.bytetrack.track_thresh, + match_thresh=cfg.bytetrack.match_thresh, + track_buffer=cfg.bytetrack.track_buffer, + frame_rate=cfg.bytetrack.frame_rate + ) + return bytetracker + + elif tracker_type == 'botsort': + from trackers.botsort.bot_sort import BoTSORT + botsort = BoTSORT( + reid_weights, + device, + half, + track_high_thresh=cfg.botsort.track_high_thresh, + new_track_thresh=cfg.botsort.new_track_thresh, + track_buffer =cfg.botsort.track_buffer, + match_thresh=cfg.botsort.match_thresh, + proximity_thresh=cfg.botsort.proximity_thresh, + appearance_thresh=cfg.botsort.appearance_thresh, + cmc_method =cfg.botsort.cmc_method, + frame_rate=cfg.botsort.frame_rate, + lambda_=cfg.botsort.lambda_ + ) + return botsort + elif tracker_type == 'deepocsort': + from trackers.deepocsort.ocsort import OCSort + botsort = OCSort( + reid_weights, + device, + half, + det_thresh=cfg.deepocsort.det_thresh, + max_age=cfg.deepocsort.max_age, + min_hits=cfg.deepocsort.min_hits, + iou_threshold=cfg.deepocsort.iou_thresh, + delta_t=cfg.deepocsort.delta_t, + asso_func=cfg.deepocsort.asso_func, + inertia=cfg.deepocsort.inertia, + ) + return botsort + else: + print('No such tracker') + exit() \ No newline at end of file diff --git a/feeder/trackers/ocsort/association.py b/feeder/trackers/ocsort/association.py new file mode 100644 index 0000000..64c2a3e --- /dev/null +++ b/feeder/trackers/ocsort/association.py @@ -0,0 +1,377 @@ +import os +import numpy as np + + +def iou_batch(bboxes1, bboxes2): + """ + From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2] + """ + bboxes2 = np.expand_dims(bboxes2, 0) + bboxes1 = np.expand_dims(bboxes1, 1) + + xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) + yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) + xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) + yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) + w = np.maximum(0., xx2 - xx1) + h = np.maximum(0., yy2 - yy1) + wh = w * h + o = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh) + return(o) + + +def giou_batch(bboxes1, bboxes2): + """ + :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) + :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) + :return: + """ + # for details should go to https://arxiv.org/pdf/1902.09630.pdf + # ensure predict's bbox form + bboxes2 = np.expand_dims(bboxes2, 0) + bboxes1 = np.expand_dims(bboxes1, 1) + + xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) + yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) + xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) + yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) + w = np.maximum(0., xx2 - xx1) + h = np.maximum(0., yy2 - yy1) + wh = w * h + iou = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh) + + xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) + yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) + xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) + yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) + wc = xxc2 - xxc1 + hc = yyc2 - yyc1 + assert((wc > 0).all() and (hc > 0).all()) + area_enclose = wc * hc + giou = iou - (area_enclose - wh) / area_enclose + giou = (giou + 1.)/2.0 # resize from (-1,1) to (0,1) + return giou + + +def diou_batch(bboxes1, bboxes2): + """ + :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) + :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) + :return: + """ + # for details should go to https://arxiv.org/pdf/1902.09630.pdf + # ensure predict's bbox form + bboxes2 = np.expand_dims(bboxes2, 0) + bboxes1 = np.expand_dims(bboxes1, 1) + + # calculate the intersection box + xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) + yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) + xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) + yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) + w = np.maximum(0., xx2 - xx1) + h = np.maximum(0., yy2 - yy1) + wh = w * h + iou = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh) + + centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 + centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 + centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 + centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 + + inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 + + xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) + yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) + xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) + yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) + + outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2 + diou = iou - inner_diag / outer_diag + + return (diou + 1) / 2.0 # resize from (-1,1) to (0,1) + +def ciou_batch(bboxes1, bboxes2): + """ + :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) + :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) + :return: + """ + # for details should go to https://arxiv.org/pdf/1902.09630.pdf + # ensure predict's bbox form + bboxes2 = np.expand_dims(bboxes2, 0) + bboxes1 = np.expand_dims(bboxes1, 1) + + # calculate the intersection box + xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) + yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) + xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) + yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) + w = np.maximum(0., xx2 - xx1) + h = np.maximum(0., yy2 - yy1) + wh = w * h + iou = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh) + + centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 + centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 + centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 + centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 + + inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 + + xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) + yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) + xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) + yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) + + outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2 + + w1 = bboxes1[..., 2] - bboxes1[..., 0] + h1 = bboxes1[..., 3] - bboxes1[..., 1] + w2 = bboxes2[..., 2] - bboxes2[..., 0] + h2 = bboxes2[..., 3] - bboxes2[..., 1] + + # prevent dividing over zero. add one pixel shift + h2 = h2 + 1. + h1 = h1 + 1. + arctan = np.arctan(w2/h2) - np.arctan(w1/h1) + v = (4 / (np.pi ** 2)) * (arctan ** 2) + S = 1 - iou + alpha = v / (S+v) + ciou = iou - inner_diag / outer_diag - alpha * v + + return (ciou + 1) / 2.0 # resize from (-1,1) to (0,1) + + +def ct_dist(bboxes1, bboxes2): + """ + Measure the center distance between two sets of bounding boxes, + this is a coarse implementation, we don't recommend using it only + for association, which can be unstable and sensitive to frame rate + and object speed. + """ + bboxes2 = np.expand_dims(bboxes2, 0) + bboxes1 = np.expand_dims(bboxes1, 1) + + centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 + centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 + centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 + centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 + + ct_dist2 = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 + + ct_dist = np.sqrt(ct_dist2) + + # The linear rescaling is a naive version and needs more study + ct_dist = ct_dist / ct_dist.max() + return ct_dist.max() - ct_dist # resize to (0,1) + + + +def speed_direction_batch(dets, tracks): + tracks = tracks[..., np.newaxis] + CX1, CY1 = (dets[:,0] + dets[:,2])/2.0, (dets[:,1]+dets[:,3])/2.0 + CX2, CY2 = (tracks[:,0] + tracks[:,2]) /2.0, (tracks[:,1]+tracks[:,3])/2.0 + dx = CX1 - CX2 + dy = CY1 - CY2 + norm = np.sqrt(dx**2 + dy**2) + 1e-6 + dx = dx / norm + dy = dy / norm + return dy, dx # size: num_track x num_det + + +def linear_assignment(cost_matrix): + try: + import lap + _, x, y = lap.lapjv(cost_matrix, extend_cost=True) + return np.array([[y[i],i] for i in x if i >= 0]) # + except ImportError: + from scipy.optimize import linear_sum_assignment + x, y = linear_sum_assignment(cost_matrix) + return np.array(list(zip(x, y))) + + +def associate_detections_to_trackers(detections,trackers, iou_threshold = 0.3): + """ + Assigns detections to tracked object (both represented as bounding boxes) + Returns 3 lists of matches, unmatched_detections and unmatched_trackers + """ + if(len(trackers)==0): + return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int) + + iou_matrix = iou_batch(detections, trackers) + + if min(iou_matrix.shape) > 0: + a = (iou_matrix > iou_threshold).astype(np.int32) + if a.sum(1).max() == 1 and a.sum(0).max() == 1: + matched_indices = np.stack(np.where(a), axis=1) + else: + matched_indices = linear_assignment(-iou_matrix) + else: + matched_indices = np.empty(shape=(0,2)) + + unmatched_detections = [] + for d, det in enumerate(detections): + if(d not in matched_indices[:,0]): + unmatched_detections.append(d) + unmatched_trackers = [] + for t, trk in enumerate(trackers): + if(t not in matched_indices[:,1]): + unmatched_trackers.append(t) + + #filter out matched with low IOU + matches = [] + for m in matched_indices: + if(iou_matrix[m[0], m[1]] 0: + a = (iou_matrix > iou_threshold).astype(np.int32) + if a.sum(1).max() == 1 and a.sum(0).max() == 1: + matched_indices = np.stack(np.where(a), axis=1) + else: + matched_indices = linear_assignment(-(iou_matrix+angle_diff_cost)) + else: + matched_indices = np.empty(shape=(0,2)) + + unmatched_detections = [] + for d, det in enumerate(detections): + if(d not in matched_indices[:,0]): + unmatched_detections.append(d) + unmatched_trackers = [] + for t, trk in enumerate(trackers): + if(t not in matched_indices[:,1]): + unmatched_trackers.append(t) + + # filter out matched with low IOU + matches = [] + for m in matched_indices: + if(iou_matrix[m[0], m[1]] 0: + a = (iou_matrix > iou_threshold).astype(np.int32) + if a.sum(1).max() == 1 and a.sum(0).max() == 1: + matched_indices = np.stack(np.where(a), axis=1) + else: + matched_indices = linear_assignment(cost_matrix) + else: + matched_indices = np.empty(shape=(0,2)) + + unmatched_detections = [] + for d, det in enumerate(detections): + if(d not in matched_indices[:,0]): + unmatched_detections.append(d) + unmatched_trackers = [] + for t, trk in enumerate(trackers): + if(t not in matched_indices[:,1]): + unmatched_trackers.append(t) + + #filter out matched with low IOU + matches = [] + for m in matched_indices: + if(iou_matrix[m[0], m[1]]update cycle. The +predict step, implemented with the method or function predict(), +uses the state transition matrix F to predict the state in the next +time period (epoch). The state is stored as a gaussian (x, P), where +x is the state (column) vector, and P is its covariance. Covariance +matrix Q specifies the process covariance. In Bayesian terms, this +prediction is called the *prior*, which you can think of colloquially +as the estimate prior to incorporating the measurement. +The update step, implemented with the method or function `update()`, +incorporates the measurement z with covariance R, into the state +estimate (x, P). The class stores the system uncertainty in S, +the innovation (residual between prediction and measurement in +measurement space) in y, and the Kalman gain in k. The procedural +form returns these variables to you. In Bayesian terms this computes +the *posterior* - the estimate after the information from the +measurement is incorporated. +Whether you use the OO form or procedural form is up to you. If +matrices such as H, R, and F are changing each epoch, you'll probably +opt to use the procedural form. If they are unchanging, the OO +form is perhaps easier to use since you won't need to keep track +of these matrices. This is especially useful if you are implementing +banks of filters or comparing various KF designs for performance; +a trivial coding bug could lead to using the wrong sets of matrices. +This module also offers an implementation of the RTS smoother, and +other helper functions, such as log likelihood computations. +The Saver class allows you to easily save the state of the +KalmanFilter class after every update +This module expects NumPy arrays for all values that expect +arrays, although in a few cases, particularly method parameters, +it will accept types that convert to NumPy arrays, such as lists +of lists. These exceptions are documented in the method or function. +Examples +-------- +The following example constructs a constant velocity kinematic +filter, filters noisy data, and plots the results. It also demonstrates +using the Saver class to save the state of the filter at each epoch. +.. code-block:: Python + import matplotlib.pyplot as plt + import numpy as np + from filterpy.kalman import KalmanFilter + from filterpy.common import Q_discrete_white_noise, Saver + r_std, q_std = 2., 0.003 + cv = KalmanFilter(dim_x=2, dim_z=1) + cv.x = np.array([[0., 1.]]) # position, velocity + cv.F = np.array([[1, dt],[ [0, 1]]) + cv.R = np.array([[r_std^^2]]) + f.H = np.array([[1., 0.]]) + f.P = np.diag([.1^^2, .03^^2) + f.Q = Q_discrete_white_noise(2, dt, q_std**2) + saver = Saver(cv) + for z in range(100): + cv.predict() + cv.update([z + randn() * r_std]) + saver.save() # save the filter's state + saver.to_array() + plt.plot(saver.x[:, 0]) + # plot all of the priors + plt.plot(saver.x_prior[:, 0]) + # plot mahalanobis distance + plt.figure() + plt.plot(saver.mahalanobis) +This code implements the same filter using the procedural form + x = np.array([[0., 1.]]) # position, velocity + F = np.array([[1, dt],[ [0, 1]]) + R = np.array([[r_std^^2]]) + H = np.array([[1., 0.]]) + P = np.diag([.1^^2, .03^^2) + Q = Q_discrete_white_noise(2, dt, q_std**2) + for z in range(100): + x, P = predict(x, P, F=F, Q=Q) + x, P = update(x, P, z=[z + randn() * r_std], R=R, H=H) + xs.append(x[0, 0]) + plt.plot(xs) +For more examples see the test subdirectory, or refer to the +book cited below. In it I both teach Kalman filtering from basic +principles, and teach the use of this library in great detail. +FilterPy library. +http://github.com/rlabbe/filterpy +Documentation at: +https://filterpy.readthedocs.org +Supporting book at: +https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python +This is licensed under an MIT license. See the readme.MD file +for more information. +Copyright 2014-2018 Roger R Labbe Jr. +""" + +from __future__ import absolute_import, division + +from copy import deepcopy +from math import log, exp, sqrt +import sys +import numpy as np +from numpy import dot, zeros, eye, isscalar, shape +import numpy.linalg as linalg +from filterpy.stats import logpdf +from filterpy.common import pretty_str, reshape_z + + +class KalmanFilterNew(object): + """ Implements a Kalman filter. You are responsible for setting the + various state variables to reasonable values; the defaults will + not give you a functional filter. + For now the best documentation is my free book Kalman and Bayesian + Filters in Python [2]_. The test files in this directory also give you a + basic idea of use, albeit without much description. + In brief, you will first construct this object, specifying the size of + the state vector with dim_x and the size of the measurement vector that + you will be using with dim_z. These are mostly used to perform size checks + when you assign values to the various matrices. For example, if you + specified dim_z=2 and then try to assign a 3x3 matrix to R (the + measurement noise matrix you will get an assert exception because R + should be 2x2. (If for whatever reason you need to alter the size of + things midstream just use the underscore version of the matrices to + assign directly: your_filter._R = a_3x3_matrix.) + After construction the filter will have default matrices created for you, + but you must specify the values for each. It’s usually easiest to just + overwrite them rather than assign to each element yourself. This will be + clearer in the example below. All are of type numpy.array. + Examples + -------- + Here is a filter that tracks position and velocity using a sensor that only + reads position. + First construct the object with the required dimensionality. Here the state + (`dim_x`) has 2 coefficients (position and velocity), and the measurement + (`dim_z`) has one. In FilterPy `x` is the state, `z` is the measurement. + .. code:: + from filterpy.kalman import KalmanFilter + f = KalmanFilter (dim_x=2, dim_z=1) + Assign the initial value for the state (position and velocity). You can do this + with a two dimensional array like so: + .. code:: + f.x = np.array([[2.], # position + [0.]]) # velocity + or just use a one dimensional array, which I prefer doing. + .. code:: + f.x = np.array([2., 0.]) + Define the state transition matrix: + .. code:: + f.F = np.array([[1.,1.], + [0.,1.]]) + Define the measurement function. Here we need to convert a position-velocity + vector into just a position vector, so we use: + .. code:: + f.H = np.array([[1., 0.]]) + Define the state's covariance matrix P. + .. code:: + f.P = np.array([[1000., 0.], + [ 0., 1000.] ]) + Now assign the measurement noise. Here the dimension is 1x1, so I can + use a scalar + .. code:: + f.R = 5 + I could have done this instead: + .. code:: + f.R = np.array([[5.]]) + Note that this must be a 2 dimensional array. + Finally, I will assign the process noise. Here I will take advantage of + another FilterPy library function: + .. code:: + from filterpy.common import Q_discrete_white_noise + f.Q = Q_discrete_white_noise(dim=2, dt=0.1, var=0.13) + Now just perform the standard predict/update loop: + .. code:: + while some_condition_is_true: + z = get_sensor_reading() + f.predict() + f.update(z) + do_something_with_estimate (f.x) + **Procedural Form** + This module also contains stand alone functions to perform Kalman filtering. + Use these if you are not a fan of objects. + **Example** + .. code:: + while True: + z, R = read_sensor() + x, P = predict(x, P, F, Q) + x, P = update(x, P, z, R, H) + See my book Kalman and Bayesian Filters in Python [2]_. + You will have to set the following attributes after constructing this + object for the filter to perform properly. Please note that there are + various checks in place to ensure that you have made everything the + 'correct' size. However, it is possible to provide incorrectly sized + arrays such that the linear algebra can not perform an operation. + It can also fail silently - you can end up with matrices of a size that + allows the linear algebra to work, but are the wrong shape for the problem + you are trying to solve. + Parameters + ---------- + dim_x : int + Number of state variables for the Kalman filter. For example, if + you are tracking the position and velocity of an object in two + dimensions, dim_x would be 4. + This is used to set the default size of P, Q, and u + dim_z : int + Number of of measurement inputs. For example, if the sensor + provides you with position in (x,y), dim_z would be 2. + dim_u : int (optional) + size of the control input, if it is being used. + Default value of 0 indicates it is not used. + compute_log_likelihood : bool (default = True) + Computes log likelihood by default, but this can be a slow + computation, so if you never use it you can turn this computation + off. + Attributes + ---------- + x : numpy.array(dim_x, 1) + Current state estimate. Any call to update() or predict() updates + this variable. + P : numpy.array(dim_x, dim_x) + Current state covariance matrix. Any call to update() or predict() + updates this variable. + x_prior : numpy.array(dim_x, 1) + Prior (predicted) state estimate. The *_prior and *_post attributes + are for convenience; they store the prior and posterior of the + current epoch. Read Only. + P_prior : numpy.array(dim_x, dim_x) + Prior (predicted) state covariance matrix. Read Only. + x_post : numpy.array(dim_x, 1) + Posterior (updated) state estimate. Read Only. + P_post : numpy.array(dim_x, dim_x) + Posterior (updated) state covariance matrix. Read Only. + z : numpy.array + Last measurement used in update(). Read only. + R : numpy.array(dim_z, dim_z) + Measurement noise covariance matrix. Also known as the + observation covariance. + Q : numpy.array(dim_x, dim_x) + Process noise covariance matrix. Also known as the transition + covariance. + F : numpy.array() + State Transition matrix. Also known as `A` in some formulation. + H : numpy.array(dim_z, dim_x) + Measurement function. Also known as the observation matrix, or as `C`. + y : numpy.array + Residual of the update step. Read only. + K : numpy.array(dim_x, dim_z) + Kalman gain of the update step. Read only. + S : numpy.array + System uncertainty (P projected to measurement space). Read only. + SI : numpy.array + Inverse system uncertainty. Read only. + log_likelihood : float + log-likelihood of the last measurement. Read only. + likelihood : float + likelihood of last measurement. Read only. + Computed from the log-likelihood. The log-likelihood can be very + small, meaning a large negative value such as -28000. Taking the + exp() of that results in 0.0, which can break typical algorithms + which multiply by this value, so by default we always return a + number >= sys.float_info.min. + mahalanobis : float + mahalanobis distance of the innovation. Read only. + inv : function, default numpy.linalg.inv + If you prefer another inverse function, such as the Moore-Penrose + pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv + This is only used to invert self.S. If you know it is diagonal, you + might choose to set it to filterpy.common.inv_diagonal, which is + several times faster than numpy.linalg.inv for diagonal matrices. + alpha : float + Fading memory setting. 1.0 gives the normal Kalman filter, and + values slightly larger than 1.0 (such as 1.02) give a fading + memory effect - previous measurements have less influence on the + filter's estimates. This formulation of the Fading memory filter + (there are many) is due to Dan Simon [1]_. + References + ---------- + .. [1] Dan Simon. "Optimal State Estimation." John Wiley & Sons. + p. 208-212. (2006) + .. [2] Roger Labbe. "Kalman and Bayesian Filters in Python" + https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python + """ + + def __init__(self, dim_x, dim_z, dim_u=0): + if dim_x < 1: + raise ValueError('dim_x must be 1 or greater') + if dim_z < 1: + raise ValueError('dim_z must be 1 or greater') + if dim_u < 0: + raise ValueError('dim_u must be 0 or greater') + + self.dim_x = dim_x + self.dim_z = dim_z + self.dim_u = dim_u + + self.x = zeros((dim_x, 1)) # state + self.P = eye(dim_x) # uncertainty covariance + self.Q = eye(dim_x) # process uncertainty + self.B = None # control transition matrix + self.F = eye(dim_x) # state transition matrix + self.H = zeros((dim_z, dim_x)) # measurement function + self.R = eye(dim_z) # measurement uncertainty + self._alpha_sq = 1. # fading memory control + self.M = np.zeros((dim_x, dim_z)) # process-measurement cross correlation + self.z = np.array([[None]*self.dim_z]).T + + # gain and residual are computed during the innovation step. We + # save them so that in case you want to inspect them for various + # purposes + self.K = np.zeros((dim_x, dim_z)) # kalman gain + self.y = zeros((dim_z, 1)) + self.S = np.zeros((dim_z, dim_z)) # system uncertainty + self.SI = np.zeros((dim_z, dim_z)) # inverse system uncertainty + + # identity matrix. Do not alter this. + self._I = np.eye(dim_x) + + # these will always be a copy of x,P after predict() is called + self.x_prior = self.x.copy() + self.P_prior = self.P.copy() + + # these will always be a copy of x,P after update() is called + self.x_post = self.x.copy() + self.P_post = self.P.copy() + + # Only computed only if requested via property + self._log_likelihood = log(sys.float_info.min) + self._likelihood = sys.float_info.min + self._mahalanobis = None + + # keep all observations + self.history_obs = [] + + self.inv = np.linalg.inv + + self.attr_saved = None + self.observed = False + + + def predict(self, u=None, B=None, F=None, Q=None): + """ + Predict next state (prior) using the Kalman filter state propagation + equations. + Parameters + ---------- + u : np.array, default 0 + Optional control vector. + B : np.array(dim_x, dim_u), or None + Optional control transition matrix; a value of None + will cause the filter to use `self.B`. + F : np.array(dim_x, dim_x), or None + Optional state transition matrix; a value of None + will cause the filter to use `self.F`. + Q : np.array(dim_x, dim_x), scalar, or None + Optional process noise matrix; a value of None will cause the + filter to use `self.Q`. + """ + + if B is None: + B = self.B + if F is None: + F = self.F + if Q is None: + Q = self.Q + elif isscalar(Q): + Q = eye(self.dim_x) * Q + + + # x = Fx + Bu + if B is not None and u is not None: + self.x = dot(F, self.x) + dot(B, u) + else: + self.x = dot(F, self.x) + + # P = FPF' + Q + self.P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q + + # save prior + self.x_prior = self.x.copy() + self.P_prior = self.P.copy() + + + + def freeze(self): + """ + Save the parameters before non-observation forward + """ + self.attr_saved = deepcopy(self.__dict__) + + + def unfreeze(self): + if self.attr_saved is not None: + new_history = deepcopy(self.history_obs) + self.__dict__ = self.attr_saved + # self.history_obs = new_history + self.history_obs = self.history_obs[:-1] + occur = [int(d is None) for d in new_history] + indices = np.where(np.array(occur)==0)[0] + index1 = indices[-2] + index2 = indices[-1] + box1 = new_history[index1] + x1, y1, s1, r1 = box1 + w1 = np.sqrt(s1 * r1) + h1 = np.sqrt(s1 / r1) + box2 = new_history[index2] + x2, y2, s2, r2 = box2 + w2 = np.sqrt(s2 * r2) + h2 = np.sqrt(s2 / r2) + time_gap = index2 - index1 + dx = (x2-x1)/time_gap + dy = (y2-y1)/time_gap + dw = (w2-w1)/time_gap + dh = (h2-h1)/time_gap + for i in range(index2 - index1): + """ + The default virtual trajectory generation is by linear + motion (constant speed hypothesis), you could modify this + part to implement your own. + """ + x = x1 + (i+1) * dx + y = y1 + (i+1) * dy + w = w1 + (i+1) * dw + h = h1 + (i+1) * dh + s = w * h + r = w / float(h) + new_box = np.array([x, y, s, r]).reshape((4, 1)) + """ + I still use predict-update loop here to refresh the parameters, + but this can be faster by directly modifying the internal parameters + as suggested in the paper. I keep this naive but slow way for + easy read and understanding + """ + self.update(new_box) + if not i == (index2-index1-1): + self.predict() + + + def update(self, z, R=None, H=None): + """ + Add a new measurement (z) to the Kalman filter. + If z is None, nothing is computed. However, x_post and P_post are + updated with the prior (x_prior, P_prior), and self.z is set to None. + Parameters + ---------- + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + If you pass in a value of H, z must be a column vector the + of the correct size. + R : np.array, scalar, or None + Optionally provide R to override the measurement noise for this + one call, otherwise self.R will be used. + H : np.array, or None + Optionally provide H to override the measurement function for this + one call, otherwise self.H will be used. + """ + + # set to None to force recompute + self._log_likelihood = None + self._likelihood = None + self._mahalanobis = None + + # append the observation + self.history_obs.append(z) + + if z is None: + if self.observed: + """ + Got no observation so freeze the current parameters for future + potential online smoothing. + """ + self.freeze() + self.observed = False + self.z = np.array([[None]*self.dim_z]).T + self.x_post = self.x.copy() + self.P_post = self.P.copy() + self.y = zeros((self.dim_z, 1)) + return + + # self.observed = True + if not self.observed: + """ + Get observation, use online smoothing to re-update parameters + """ + self.unfreeze() + self.observed = True + + if R is None: + R = self.R + elif isscalar(R): + R = eye(self.dim_z) * R + + if H is None: + z = reshape_z(z, self.dim_z, self.x.ndim) + H = self.H + + # y = z - Hx + # error (residual) between measurement and prediction + self.y = z - dot(H, self.x) + + # common subexpression for speed + PHT = dot(self.P, H.T) + + # S = HPH' + R + # project system uncertainty into measurement space + self.S = dot(H, PHT) + R + self.SI = self.inv(self.S) + # K = PH'inv(S) + # map system uncertainty into kalman gain + self.K = dot(PHT, self.SI) + + # x = x + Ky + # predict new x with residual scaled by the kalman gain + self.x = self.x + dot(self.K, self.y) + + # P = (I-KH)P(I-KH)' + KRK' + # This is more numerically stable + # and works for non-optimal K vs the equation + # P = (I-KH)P usually seen in the literature. + + I_KH = self._I - dot(self.K, H) + self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T) + + # save measurement and posterior state + self.z = deepcopy(z) + self.x_post = self.x.copy() + self.P_post = self.P.copy() + + def predict_steadystate(self, u=0, B=None): + """ + Predict state (prior) using the Kalman filter state propagation + equations. Only x is updated, P is left unchanged. See + update_steadstate() for a longer explanation of when to use this + method. + Parameters + ---------- + u : np.array + Optional control vector. If non-zero, it is multiplied by B + to create the control input into the system. + B : np.array(dim_x, dim_u), or None + Optional control transition matrix; a value of None + will cause the filter to use `self.B`. + """ + + if B is None: + B = self.B + + # x = Fx + Bu + if B is not None: + self.x = dot(self.F, self.x) + dot(B, u) + else: + self.x = dot(self.F, self.x) + + # save prior + self.x_prior = self.x.copy() + self.P_prior = self.P.copy() + + def update_steadystate(self, z): + """ + Add a new measurement (z) to the Kalman filter without recomputing + the Kalman gain K, the state covariance P, or the system + uncertainty S. + You can use this for LTI systems since the Kalman gain and covariance + converge to a fixed value. Precompute these and assign them explicitly, + or run the Kalman filter using the normal predict()/update(0 cycle + until they converge. + The main advantage of this call is speed. We do significantly less + computation, notably avoiding a costly matrix inversion. + Use in conjunction with predict_steadystate(), otherwise P will grow + without bound. + Parameters + ---------- + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + Examples + -------- + >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter + >>> # let filter converge on representative data, then save k and P + >>> for i in range(100): + >>> cv.predict() + >>> cv.update([i, i, i]) + >>> saved_k = np.copy(cv.K) + >>> saved_P = np.copy(cv.P) + later on: + >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter + >>> cv.K = np.copy(saved_K) + >>> cv.P = np.copy(saved_P) + >>> for i in range(100): + >>> cv.predict_steadystate() + >>> cv.update_steadystate([i, i, i]) + """ + + # set to None to force recompute + self._log_likelihood = None + self._likelihood = None + self._mahalanobis = None + + if z is None: + self.z = np.array([[None]*self.dim_z]).T + self.x_post = self.x.copy() + self.P_post = self.P.copy() + self.y = zeros((self.dim_z, 1)) + return + + z = reshape_z(z, self.dim_z, self.x.ndim) + + # y = z - Hx + # error (residual) between measurement and prediction + self.y = z - dot(self.H, self.x) + + # x = x + Ky + # predict new x with residual scaled by the kalman gain + self.x = self.x + dot(self.K, self.y) + + self.z = deepcopy(z) + self.x_post = self.x.copy() + self.P_post = self.P.copy() + + # set to None to force recompute + self._log_likelihood = None + self._likelihood = None + self._mahalanobis = None + + def update_correlated(self, z, R=None, H=None): + """ Add a new measurement (z) to the Kalman filter assuming that + process noise and measurement noise are correlated as defined in + the `self.M` matrix. + A partial derivation can be found in [1] + If z is None, nothing is changed. + Parameters + ---------- + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + R : np.array, scalar, or None + Optionally provide R to override the measurement noise for this + one call, otherwise self.R will be used. + H : np.array, or None + Optionally provide H to override the measurement function for this + one call, otherwise self.H will be used. + References + ---------- + .. [1] Bulut, Y. (2011). Applied Kalman filter theory (Doctoral dissertation, Northeastern University). + http://people.duke.edu/~hpgavin/SystemID/References/Balut-KalmanFilter-PhD-NEU-2011.pdf + """ + + # set to None to force recompute + self._log_likelihood = None + self._likelihood = None + self._mahalanobis = None + + if z is None: + self.z = np.array([[None]*self.dim_z]).T + self.x_post = self.x.copy() + self.P_post = self.P.copy() + self.y = zeros((self.dim_z, 1)) + return + + if R is None: + R = self.R + elif isscalar(R): + R = eye(self.dim_z) * R + + # rename for readability and a tiny extra bit of speed + if H is None: + z = reshape_z(z, self.dim_z, self.x.ndim) + H = self.H + + # handle special case: if z is in form [[z]] but x is not a column + # vector dimensions will not match + if self.x.ndim == 1 and shape(z) == (1, 1): + z = z[0] + + if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3) + z = np.asarray([z]) + + # y = z - Hx + # error (residual) between measurement and prediction + self.y = z - dot(H, self.x) + + # common subexpression for speed + PHT = dot(self.P, H.T) + + # project system uncertainty into measurement space + self.S = dot(H, PHT) + dot(H, self.M) + dot(self.M.T, H.T) + R + self.SI = self.inv(self.S) + + # K = PH'inv(S) + # map system uncertainty into kalman gain + self.K = dot(PHT + self.M, self.SI) + + # x = x + Ky + # predict new x with residual scaled by the kalman gain + self.x = self.x + dot(self.K, self.y) + self.P = self.P - dot(self.K, dot(H, self.P) + self.M.T) + + self.z = deepcopy(z) + self.x_post = self.x.copy() + self.P_post = self.P.copy() + + def batch_filter(self, zs, Fs=None, Qs=None, Hs=None, + Rs=None, Bs=None, us=None, update_first=False, + saver=None): + """ Batch processes a sequences of measurements. + Parameters + ---------- + zs : list-like + list of measurements at each time step `self.dt`. Missing + measurements must be represented by `None`. + Fs : None, list-like, default=None + optional value or list of values to use for the state transition + matrix F. + If Fs is None then self.F is used for all epochs. + Otherwise it must contain a list-like list of F's, one for + each epoch. This allows you to have varying F per epoch. + Qs : None, np.array or list-like, default=None + optional value or list of values to use for the process error + covariance Q. + If Qs is None then self.Q is used for all epochs. + Otherwise it must contain a list-like list of Q's, one for + each epoch. This allows you to have varying Q per epoch. + Hs : None, np.array or list-like, default=None + optional list of values to use for the measurement matrix H. + If Hs is None then self.H is used for all epochs. + If Hs contains a single matrix, then it is used as H for all + epochs. + Otherwise it must contain a list-like list of H's, one for + each epoch. This allows you to have varying H per epoch. + Rs : None, np.array or list-like, default=None + optional list of values to use for the measurement error + covariance R. + If Rs is None then self.R is used for all epochs. + Otherwise it must contain a list-like list of R's, one for + each epoch. This allows you to have varying R per epoch. + Bs : None, np.array or list-like, default=None + optional list of values to use for the control transition matrix B. + If Bs is None then self.B is used for all epochs. + Otherwise it must contain a list-like list of B's, one for + each epoch. This allows you to have varying B per epoch. + us : None, np.array or list-like, default=None + optional list of values to use for the control input vector; + If us is None then None is used for all epochs (equivalent to 0, + or no control input). + Otherwise it must contain a list-like list of u's, one for + each epoch. + update_first : bool, optional, default=False + controls whether the order of operations is update followed by + predict, or predict followed by update. Default is predict->update. + saver : filterpy.common.Saver, optional + filterpy.common.Saver object. If provided, saver.save() will be + called after every epoch + Returns + ------- + means : np.array((n,dim_x,1)) + array of the state for each time step after the update. Each entry + is an np.array. In other words `means[k,:]` is the state at step + `k`. + covariance : np.array((n,dim_x,dim_x)) + array of the covariances for each time step after the update. + In other words `covariance[k,:,:]` is the covariance at step `k`. + means_predictions : np.array((n,dim_x,1)) + array of the state for each time step after the predictions. Each + entry is an np.array. In other words `means[k,:]` is the state at + step `k`. + covariance_predictions : np.array((n,dim_x,dim_x)) + array of the covariances for each time step after the prediction. + In other words `covariance[k,:,:]` is the covariance at step `k`. + Examples + -------- + .. code-block:: Python + # this example demonstrates tracking a measurement where the time + # between measurement varies, as stored in dts. This requires + # that F be recomputed for each epoch. The output is then smoothed + # with an RTS smoother. + zs = [t + random.randn()*4 for t in range (40)] + Fs = [np.array([[1., dt], [0, 1]] for dt in dts] + (mu, cov, _, _) = kf.batch_filter(zs, Fs=Fs) + (xs, Ps, Ks, Pps) = kf.rts_smoother(mu, cov, Fs=Fs) + """ + + #pylint: disable=too-many-statements + n = np.size(zs, 0) + if Fs is None: + Fs = [self.F] * n + if Qs is None: + Qs = [self.Q] * n + if Hs is None: + Hs = [self.H] * n + if Rs is None: + Rs = [self.R] * n + if Bs is None: + Bs = [self.B] * n + if us is None: + us = [0] * n + + # mean estimates from Kalman Filter + if self.x.ndim == 1: + means = zeros((n, self.dim_x)) + means_p = zeros((n, self.dim_x)) + else: + means = zeros((n, self.dim_x, 1)) + means_p = zeros((n, self.dim_x, 1)) + + # state covariances from Kalman Filter + covariances = zeros((n, self.dim_x, self.dim_x)) + covariances_p = zeros((n, self.dim_x, self.dim_x)) + + if update_first: + for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): + + self.update(z, R=R, H=H) + means[i, :] = self.x + covariances[i, :, :] = self.P + + self.predict(u=u, B=B, F=F, Q=Q) + means_p[i, :] = self.x + covariances_p[i, :, :] = self.P + + if saver is not None: + saver.save() + else: + for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): + + self.predict(u=u, B=B, F=F, Q=Q) + means_p[i, :] = self.x + covariances_p[i, :, :] = self.P + + self.update(z, R=R, H=H) + means[i, :] = self.x + covariances[i, :, :] = self.P + + if saver is not None: + saver.save() + + return (means, covariances, means_p, covariances_p) + + def rts_smoother(self, Xs, Ps, Fs=None, Qs=None, inv=np.linalg.inv): + """ + Runs the Rauch-Tung-Striebel Kalman smoother on a set of + means and covariances computed by a Kalman filter. The usual input + would come from the output of `KalmanFilter.batch_filter()`. + Parameters + ---------- + Xs : numpy.array + array of the means (state variable x) of the output of a Kalman + filter. + Ps : numpy.array + array of the covariances of the output of a kalman filter. + Fs : list-like collection of numpy.array, optional + State transition matrix of the Kalman filter at each time step. + Optional, if not provided the filter's self.F will be used + Qs : list-like collection of numpy.array, optional + Process noise of the Kalman filter at each time step. Optional, + if not provided the filter's self.Q will be used + inv : function, default numpy.linalg.inv + If you prefer another inverse function, such as the Moore-Penrose + pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv + Returns + ------- + x : numpy.ndarray + smoothed means + P : numpy.ndarray + smoothed state covariances + K : numpy.ndarray + smoother gain at each step + Pp : numpy.ndarray + Predicted state covariances + Examples + -------- + .. code-block:: Python + zs = [t + random.randn()*4 for t in range (40)] + (mu, cov, _, _) = kalman.batch_filter(zs) + (x, P, K, Pp) = rts_smoother(mu, cov, kf.F, kf.Q) + """ + + if len(Xs) != len(Ps): + raise ValueError('length of Xs and Ps must be the same') + + n = Xs.shape[0] + dim_x = Xs.shape[1] + + if Fs is None: + Fs = [self.F] * n + if Qs is None: + Qs = [self.Q] * n + + # smoother gain + K = zeros((n, dim_x, dim_x)) + + x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy() + for k in range(n-2, -1, -1): + Pp[k] = dot(dot(Fs[k+1], P[k]), Fs[k+1].T) + Qs[k+1] + + #pylint: disable=bad-whitespace + K[k] = dot(dot(P[k], Fs[k+1].T), inv(Pp[k])) + x[k] += dot(K[k], x[k+1] - dot(Fs[k+1], x[k])) + P[k] += dot(dot(K[k], P[k+1] - Pp[k]), K[k].T) + + return (x, P, K, Pp) + + def get_prediction(self, u=None, B=None, F=None, Q=None): + """ + Predict next state (prior) using the Kalman filter state propagation + equations and returns it without modifying the object. + Parameters + ---------- + u : np.array, default 0 + Optional control vector. + B : np.array(dim_x, dim_u), or None + Optional control transition matrix; a value of None + will cause the filter to use `self.B`. + F : np.array(dim_x, dim_x), or None + Optional state transition matrix; a value of None + will cause the filter to use `self.F`. + Q : np.array(dim_x, dim_x), scalar, or None + Optional process noise matrix; a value of None will cause the + filter to use `self.Q`. + Returns + ------- + (x, P) : tuple + State vector and covariance array of the prediction. + """ + + if B is None: + B = self.B + if F is None: + F = self.F + if Q is None: + Q = self.Q + elif isscalar(Q): + Q = eye(self.dim_x) * Q + + # x = Fx + Bu + if B is not None and u is not None: + x = dot(F, self.x) + dot(B, u) + else: + x = dot(F, self.x) + + # P = FPF' + Q + P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q + + return x, P + + def get_update(self, z=None): + """ + Computes the new estimate based on measurement `z` and returns it + without altering the state of the filter. + Parameters + ---------- + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + Returns + ------- + (x, P) : tuple + State vector and covariance array of the update. + """ + + if z is None: + return self.x, self.P + z = reshape_z(z, self.dim_z, self.x.ndim) + + R = self.R + H = self.H + P = self.P + x = self.x + + # error (residual) between measurement and prediction + y = z - dot(H, x) + + # common subexpression for speed + PHT = dot(P, H.T) + + # project system uncertainty into measurement space + S = dot(H, PHT) + R + + # map system uncertainty into kalman gain + K = dot(PHT, self.inv(S)) + + # predict new x with residual scaled by the kalman gain + x = x + dot(K, y) + + # P = (I-KH)P(I-KH)' + KRK' + I_KH = self._I - dot(K, H) + P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T) + + return x, P + + def residual_of(self, z): + """ + Returns the residual for the given measurement (z). Does not alter + the state of the filter. + """ + z = reshape_z(z, self.dim_z, self.x.ndim) + return z - dot(self.H, self.x_prior) + + def measurement_of_state(self, x): + """ + Helper function that converts a state into a measurement. + Parameters + ---------- + x : np.array + kalman state vector + Returns + ------- + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + """ + + return dot(self.H, x) + + @property + def log_likelihood(self): + """ + log-likelihood of the last measurement. + """ + if self._log_likelihood is None: + self._log_likelihood = logpdf(x=self.y, cov=self.S) + return self._log_likelihood + + @property + def likelihood(self): + """ + Computed from the log-likelihood. The log-likelihood can be very + small, meaning a large negative value such as -28000. Taking the + exp() of that results in 0.0, which can break typical algorithms + which multiply by this value, so by default we always return a + number >= sys.float_info.min. + """ + if self._likelihood is None: + self._likelihood = exp(self.log_likelihood) + if self._likelihood == 0: + self._likelihood = sys.float_info.min + return self._likelihood + + @property + def mahalanobis(self): + """" + Mahalanobis distance of measurement. E.g. 3 means measurement + was 3 standard deviations away from the predicted value. + Returns + ------- + mahalanobis : float + """ + if self._mahalanobis is None: + self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y))) + return self._mahalanobis + + @property + def alpha(self): + """ + Fading memory setting. 1.0 gives the normal Kalman filter, and + values slightly larger than 1.0 (such as 1.02) give a fading + memory effect - previous measurements have less influence on the + filter's estimates. This formulation of the Fading memory filter + (there are many) is due to Dan Simon [1]_. + """ + return self._alpha_sq**.5 + + def log_likelihood_of(self, z): + """ + log likelihood of the measurement `z`. This should only be called + after a call to update(). Calling after predict() will yield an + incorrect result.""" + + if z is None: + return log(sys.float_info.min) + return logpdf(z, dot(self.H, self.x), self.S) + + @alpha.setter + def alpha(self, value): + if not np.isscalar(value) or value < 1: + raise ValueError('alpha must be a float greater than 1') + + self._alpha_sq = value**2 + + def __repr__(self): + return '\n'.join([ + 'KalmanFilter object', + pretty_str('dim_x', self.dim_x), + pretty_str('dim_z', self.dim_z), + pretty_str('dim_u', self.dim_u), + pretty_str('x', self.x), + pretty_str('P', self.P), + pretty_str('x_prior', self.x_prior), + pretty_str('P_prior', self.P_prior), + pretty_str('x_post', self.x_post), + pretty_str('P_post', self.P_post), + pretty_str('F', self.F), + pretty_str('Q', self.Q), + pretty_str('R', self.R), + pretty_str('H', self.H), + pretty_str('K', self.K), + pretty_str('y', self.y), + pretty_str('S', self.S), + pretty_str('SI', self.SI), + pretty_str('M', self.M), + pretty_str('B', self.B), + pretty_str('z', self.z), + pretty_str('log-likelihood', self.log_likelihood), + pretty_str('likelihood', self.likelihood), + pretty_str('mahalanobis', self.mahalanobis), + pretty_str('alpha', self.alpha), + pretty_str('inv', self.inv) + ]) + + def test_matrix_dimensions(self, z=None, H=None, R=None, F=None, Q=None): + """ + Performs a series of asserts to check that the size of everything + is what it should be. This can help you debug problems in your design. + If you pass in H, R, F, Q those will be used instead of this object's + value for those matrices. + Testing `z` (the measurement) is problamatic. x is a vector, and can be + implemented as either a 1D array or as a nx1 column vector. Thus Hx + can be of different shapes. Then, if Hx is a single value, it can + be either a 1D array or 2D vector. If either is true, z can reasonably + be a scalar (either '3' or np.array('3') are scalars under this + definition), a 1D, 1 element array, or a 2D, 1 element array. You are + allowed to pass in any combination that works. + """ + + if H is None: + H = self.H + if R is None: + R = self.R + if F is None: + F = self.F + if Q is None: + Q = self.Q + x = self.x + P = self.P + + assert x.ndim == 1 or x.ndim == 2, \ + "x must have one or two dimensions, but has {}".format(x.ndim) + + if x.ndim == 1: + assert x.shape[0] == self.dim_x, \ + "Shape of x must be ({},{}), but is {}".format( + self.dim_x, 1, x.shape) + else: + assert x.shape == (self.dim_x, 1), \ + "Shape of x must be ({},{}), but is {}".format( + self.dim_x, 1, x.shape) + + assert P.shape == (self.dim_x, self.dim_x), \ + "Shape of P must be ({},{}), but is {}".format( + self.dim_x, self.dim_x, P.shape) + + assert Q.shape == (self.dim_x, self.dim_x), \ + "Shape of Q must be ({},{}), but is {}".format( + self.dim_x, self.dim_x, P.shape) + + assert F.shape == (self.dim_x, self.dim_x), \ + "Shape of F must be ({},{}), but is {}".format( + self.dim_x, self.dim_x, F.shape) + + assert np.ndim(H) == 2, \ + "Shape of H must be (dim_z, {}), but is {}".format( + P.shape[0], shape(H)) + + assert H.shape[1] == P.shape[0], \ + "Shape of H must be (dim_z, {}), but is {}".format( + P.shape[0], H.shape) + + # shape of R must be the same as HPH' + hph_shape = (H.shape[0], H.shape[0]) + r_shape = shape(R) + + if H.shape[0] == 1: + # r can be scalar, 1D, or 2D in this case + assert r_shape in [(), (1,), (1, 1)], \ + "R must be scalar or one element array, but is shaped {}".format( + r_shape) + else: + assert r_shape == hph_shape, \ + "shape of R should be {} but it is {}".format(hph_shape, r_shape) + + + if z is not None: + z_shape = shape(z) + else: + z_shape = (self.dim_z, 1) + + # H@x must have shape of z + Hx = dot(H, x) + + if z_shape == (): # scalar or np.array(scalar) + assert Hx.ndim == 1 or shape(Hx) == (1, 1), \ + "shape of z should be {}, not {} for the given H".format( + shape(Hx), z_shape) + + elif shape(Hx) == (1,): + assert z_shape[0] == 1, 'Shape of z must be {} for the given H'.format(shape(Hx)) + + else: + assert (z_shape == shape(Hx) or + (len(z_shape) == 1 and shape(Hx) == (z_shape[0], 1))), \ + "shape of z should be {}, not {} for the given H".format( + shape(Hx), z_shape) + + if np.ndim(Hx) > 1 and shape(Hx) != (1, 1): + assert shape(Hx) == z_shape, \ + 'shape of z should be {} for the given H, but it is {}'.format( + shape(Hx), z_shape) + + +def update(x, P, z, R, H=None, return_all=False): + """ + Add a new measurement (z) to the Kalman filter. If z is None, nothing + is changed. + This can handle either the multidimensional or unidimensional case. If + all parameters are floats instead of arrays the filter will still work, + and return floats for x, P as the result. + update(1, 2, 1, 1, 1) # univariate + update(x, P, 1 + Parameters + ---------- + x : numpy.array(dim_x, 1), or float + State estimate vector + P : numpy.array(dim_x, dim_x), or float + Covariance matrix + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + R : numpy.array(dim_z, dim_z), or float + Measurement noise matrix + H : numpy.array(dim_x, dim_x), or float, optional + Measurement function. If not provided, a value of 1 is assumed. + return_all : bool, default False + If true, y, K, S, and log_likelihood are returned, otherwise + only x and P are returned. + Returns + ------- + x : numpy.array + Posterior state estimate vector + P : numpy.array + Posterior covariance matrix + y : numpy.array or scalar + Residua. Difference between measurement and state in measurement space + K : numpy.array + Kalman gain + S : numpy.array + System uncertainty in measurement space + log_likelihood : float + log likelihood of the measurement + """ + + #pylint: disable=bare-except + + if z is None: + if return_all: + return x, P, None, None, None, None + return x, P + + if H is None: + H = np.array([1]) + + if np.isscalar(H): + H = np.array([H]) + + Hx = np.atleast_1d(dot(H, x)) + z = reshape_z(z, Hx.shape[0], x.ndim) + + # error (residual) between measurement and prediction + y = z - Hx + + # project system uncertainty into measurement space + S = dot(dot(H, P), H.T) + R + + + # map system uncertainty into kalman gain + try: + K = dot(dot(P, H.T), linalg.inv(S)) + except: + # can't invert a 1D array, annoyingly + K = dot(dot(P, H.T), 1./S) + + + # predict new x with residual scaled by the kalman gain + x = x + dot(K, y) + + # P = (I-KH)P(I-KH)' + KRK' + KH = dot(K, H) + + try: + I_KH = np.eye(KH.shape[0]) - KH + except: + I_KH = np.array([1 - KH]) + P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T) + + + if return_all: + # compute log likelihood + log_likelihood = logpdf(z, dot(H, x), S) + return x, P, y, K, S, log_likelihood + return x, P + + +def update_steadystate(x, z, K, H=None): + """ + Add a new measurement (z) to the Kalman filter. If z is None, nothing + is changed. + Parameters + ---------- + x : numpy.array(dim_x, 1), or float + State estimate vector + z : (dim_z, 1): array_like + measurement for this update. z can be a scalar if dim_z is 1, + otherwise it must be convertible to a column vector. + K : numpy.array, or float + Kalman gain matrix + H : numpy.array(dim_x, dim_x), or float, optional + Measurement function. If not provided, a value of 1 is assumed. + Returns + ------- + x : numpy.array + Posterior state estimate vector + Examples + -------- + This can handle either the multidimensional or unidimensional case. If + all parameters are floats instead of arrays the filter will still work, + and return floats for x, P as the result. + >>> update_steadystate(1, 2, 1) # univariate + >>> update_steadystate(x, P, z, H) + """ + + + if z is None: + return x + + if H is None: + H = np.array([1]) + + if np.isscalar(H): + H = np.array([H]) + + Hx = np.atleast_1d(dot(H, x)) + z = reshape_z(z, Hx.shape[0], x.ndim) + + # error (residual) between measurement and prediction + y = z - Hx + + # estimate new x with residual scaled by the kalman gain + return x + dot(K, y) + + +def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.): + """ + Predict next state (prior) using the Kalman filter state propagation + equations. + Parameters + ---------- + x : numpy.array + State estimate vector + P : numpy.array + Covariance matrix + F : numpy.array() + State Transition matrix + Q : numpy.array, Optional + Process noise matrix + u : numpy.array, Optional, default 0. + Control vector. If non-zero, it is multiplied by B + to create the control input into the system. + B : numpy.array, optional, default 0. + Control transition matrix. + alpha : float, Optional, default=1.0 + Fading memory setting. 1.0 gives the normal Kalman filter, and + values slightly larger than 1.0 (such as 1.02) give a fading + memory effect - previous measurements have less influence on the + filter's estimates. This formulation of the Fading memory filter + (there are many) is due to Dan Simon + Returns + ------- + x : numpy.array + Prior state estimate vector + P : numpy.array + Prior covariance matrix + """ + + if np.isscalar(F): + F = np.array(F) + x = dot(F, x) + dot(B, u) + P = (alpha * alpha) * dot(dot(F, P), F.T) + Q + + return x, P + + +def predict_steadystate(x, F=1, u=0, B=1): + """ + Predict next state (prior) using the Kalman filter state propagation + equations. This steady state form only computes x, assuming that the + covariance is constant. + Parameters + ---------- + x : numpy.array + State estimate vector + P : numpy.array + Covariance matrix + F : numpy.array() + State Transition matrix + u : numpy.array, Optional, default 0. + Control vector. If non-zero, it is multiplied by B + to create the control input into the system. + B : numpy.array, optional, default 0. + Control transition matrix. + Returns + ------- + x : numpy.array + Prior state estimate vector + """ + + if np.isscalar(F): + F = np.array(F) + x = dot(F, x) + dot(B, u) + + return x + + + +def batch_filter(x, P, zs, Fs, Qs, Hs, Rs, Bs=None, us=None, + update_first=False, saver=None): + """ + Batch processes a sequences of measurements. + Parameters + ---------- + zs : list-like + list of measurements at each time step. Missing measurements must be + represented by None. + Fs : list-like + list of values to use for the state transition matrix matrix. + Qs : list-like + list of values to use for the process error + covariance. + Hs : list-like + list of values to use for the measurement matrix. + Rs : list-like + list of values to use for the measurement error + covariance. + Bs : list-like, optional + list of values to use for the control transition matrix; + a value of None in any position will cause the filter + to use `self.B` for that time step. + us : list-like, optional + list of values to use for the control input vector; + a value of None in any position will cause the filter to use + 0 for that time step. + update_first : bool, optional + controls whether the order of operations is update followed by + predict, or predict followed by update. Default is predict->update. + saver : filterpy.common.Saver, optional + filterpy.common.Saver object. If provided, saver.save() will be + called after every epoch + Returns + ------- + means : np.array((n,dim_x,1)) + array of the state for each time step after the update. Each entry + is an np.array. In other words `means[k,:]` is the state at step + `k`. + covariance : np.array((n,dim_x,dim_x)) + array of the covariances for each time step after the update. + In other words `covariance[k,:,:]` is the covariance at step `k`. + means_predictions : np.array((n,dim_x,1)) + array of the state for each time step after the predictions. Each + entry is an np.array. In other words `means[k,:]` is the state at + step `k`. + covariance_predictions : np.array((n,dim_x,dim_x)) + array of the covariances for each time step after the prediction. + In other words `covariance[k,:,:]` is the covariance at step `k`. + Examples + -------- + .. code-block:: Python + zs = [t + random.randn()*4 for t in range (40)] + Fs = [kf.F for t in range (40)] + Hs = [kf.H for t in range (40)] + (mu, cov, _, _) = kf.batch_filter(zs, Rs=R_list, Fs=Fs, Hs=Hs, Qs=None, + Bs=None, us=None, update_first=False) + (xs, Ps, Ks, Pps) = kf.rts_smoother(mu, cov, Fs=Fs, Qs=None) + """ + + n = np.size(zs, 0) + dim_x = x.shape[0] + + # mean estimates from Kalman Filter + if x.ndim == 1: + means = zeros((n, dim_x)) + means_p = zeros((n, dim_x)) + else: + means = zeros((n, dim_x, 1)) + means_p = zeros((n, dim_x, 1)) + + # state covariances from Kalman Filter + covariances = zeros((n, dim_x, dim_x)) + covariances_p = zeros((n, dim_x, dim_x)) + + if us is None: + us = [0.] * n + Bs = [0.] * n + + if update_first: + for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): + + x, P = update(x, P, z, R=R, H=H) + means[i, :] = x + covariances[i, :, :] = P + + x, P = predict(x, P, u=u, B=B, F=F, Q=Q) + means_p[i, :] = x + covariances_p[i, :, :] = P + if saver is not None: + saver.save() + else: + for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): + + x, P = predict(x, P, u=u, B=B, F=F, Q=Q) + means_p[i, :] = x + covariances_p[i, :, :] = P + + x, P = update(x, P, z, R=R, H=H) + means[i, :] = x + covariances[i, :, :] = P + if saver is not None: + saver.save() + + return (means, covariances, means_p, covariances_p) + + + +def rts_smoother(Xs, Ps, Fs, Qs): + """ + Runs the Rauch-Tung-Striebel Kalman smoother on a set of + means and covariances computed by a Kalman filter. The usual input + would come from the output of `KalmanFilter.batch_filter()`. + Parameters + ---------- + Xs : numpy.array + array of the means (state variable x) of the output of a Kalman + filter. + Ps : numpy.array + array of the covariances of the output of a kalman filter. + Fs : list-like collection of numpy.array + State transition matrix of the Kalman filter at each time step. + Qs : list-like collection of numpy.array, optional + Process noise of the Kalman filter at each time step. + Returns + ------- + x : numpy.ndarray + smoothed means + P : numpy.ndarray + smoothed state covariances + K : numpy.ndarray + smoother gain at each step + pP : numpy.ndarray + predicted state covariances + Examples + -------- + .. code-block:: Python + zs = [t + random.randn()*4 for t in range (40)] + (mu, cov, _, _) = kalman.batch_filter(zs) + (x, P, K, pP) = rts_smoother(mu, cov, kf.F, kf.Q) + """ + + if len(Xs) != len(Ps): + raise ValueError('length of Xs and Ps must be the same') + + n = Xs.shape[0] + dim_x = Xs.shape[1] + + # smoother gain + K = zeros((n, dim_x, dim_x)) + x, P, pP = Xs.copy(), Ps.copy(), Ps.copy() + + for k in range(n-2, -1, -1): + pP[k] = dot(dot(Fs[k], P[k]), Fs[k].T) + Qs[k] + + #pylint: disable=bad-whitespace + K[k] = dot(dot(P[k], Fs[k].T), linalg.inv(pP[k])) + x[k] += dot(K[k], x[k+1] - dot(Fs[k], x[k])) + P[k] += dot(dot(K[k], P[k+1] - pP[k]), K[k].T) + + return (x, P, K, pP) \ No newline at end of file diff --git a/feeder/trackers/ocsort/ocsort.py b/feeder/trackers/ocsort/ocsort.py new file mode 100644 index 0000000..f4eddf0 --- /dev/null +++ b/feeder/trackers/ocsort/ocsort.py @@ -0,0 +1,328 @@ +""" + This script is adopted from the SORT script by Alex Bewley alex@bewley.ai +""" +from __future__ import print_function + +import numpy as np +from .association import * +from ultralytics.yolo.utils.ops import xywh2xyxy + + +def k_previous_obs(observations, cur_age, k): + if len(observations) == 0: + return [-1, -1, -1, -1, -1] + for i in range(k): + dt = k - i + if cur_age - dt in observations: + return observations[cur_age-dt] + max_age = max(observations.keys()) + return observations[max_age] + + +def convert_bbox_to_z(bbox): + """ + Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form + [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is + the aspect ratio + """ + w = bbox[2] - bbox[0] + h = bbox[3] - bbox[1] + x = bbox[0] + w/2. + y = bbox[1] + h/2. + s = w * h # scale is just area + r = w / float(h+1e-6) + return np.array([x, y, s, r]).reshape((4, 1)) + + +def convert_x_to_bbox(x, score=None): + """ + Takes a bounding box in the centre form [x,y,s,r] and returns it in the form + [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right + """ + w = np.sqrt(x[2] * x[3]) + h = x[2] / w + if(score == None): + return np.array([x[0]-w/2., x[1]-h/2., x[0]+w/2., x[1]+h/2.]).reshape((1, 4)) + else: + return np.array([x[0]-w/2., x[1]-h/2., x[0]+w/2., x[1]+h/2., score]).reshape((1, 5)) + + +def speed_direction(bbox1, bbox2): + cx1, cy1 = (bbox1[0]+bbox1[2]) / 2.0, (bbox1[1]+bbox1[3])/2.0 + cx2, cy2 = (bbox2[0]+bbox2[2]) / 2.0, (bbox2[1]+bbox2[3])/2.0 + speed = np.array([cy2-cy1, cx2-cx1]) + norm = np.sqrt((cy2-cy1)**2 + (cx2-cx1)**2) + 1e-6 + return speed / norm + + +class KalmanBoxTracker(object): + """ + This class represents the internal state of individual tracked objects observed as bbox. + """ + count = 0 + + def __init__(self, bbox, cls, delta_t=3, orig=False): + """ + Initialises a tracker using initial bounding box. + + """ + # define constant velocity model + if not orig: + from .kalmanfilter import KalmanFilterNew as KalmanFilter + self.kf = KalmanFilter(dim_x=7, dim_z=4) + else: + from filterpy.kalman import KalmanFilter + self.kf = KalmanFilter(dim_x=7, dim_z=4) + self.kf.F = np.array([[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 1], [ + 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1]]) + self.kf.H = np.array([[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]]) + + self.kf.R[2:, 2:] *= 10. + self.kf.P[4:, 4:] *= 1000. # give high uncertainty to the unobservable initial velocities + self.kf.P *= 10. + self.kf.Q[-1, -1] *= 0.01 + self.kf.Q[4:, 4:] *= 0.01 + + self.kf.x[:4] = convert_bbox_to_z(bbox) + self.time_since_update = 0 + self.id = KalmanBoxTracker.count + KalmanBoxTracker.count += 1 + self.history = [] + self.hits = 0 + self.hit_streak = 0 + self.age = 0 + self.conf = bbox[-1] + self.cls = cls + """ + NOTE: [-1,-1,-1,-1,-1] is a compromising placeholder for non-observation status, the same for the return of + function k_previous_obs. It is ugly and I do not like it. But to support generate observation array in a + fast and unified way, which you would see below k_observations = np.array([k_previous_obs(...]]), let's bear it for now. + """ + self.last_observation = np.array([-1, -1, -1, -1, -1]) # placeholder + self.observations = dict() + self.history_observations = [] + self.velocity = None + self.delta_t = delta_t + + def update(self, bbox, cls): + """ + Updates the state vector with observed bbox. + """ + + if bbox is not None: + self.conf = bbox[-1] + self.cls = cls + if self.last_observation.sum() >= 0: # no previous observation + previous_box = None + for i in range(self.delta_t): + dt = self.delta_t - i + if self.age - dt in self.observations: + previous_box = self.observations[self.age-dt] + break + if previous_box is None: + previous_box = self.last_observation + """ + Estimate the track speed direction with observations \Delta t steps away + """ + self.velocity = speed_direction(previous_box, bbox) + + """ + Insert new observations. This is a ugly way to maintain both self.observations + and self.history_observations. Bear it for the moment. + """ + self.last_observation = bbox + self.observations[self.age] = bbox + self.history_observations.append(bbox) + + self.time_since_update = 0 + self.history = [] + self.hits += 1 + self.hit_streak += 1 + self.kf.update(convert_bbox_to_z(bbox)) + else: + self.kf.update(bbox) + + def predict(self): + """ + Advances the state vector and returns the predicted bounding box estimate. + """ + if((self.kf.x[6]+self.kf.x[2]) <= 0): + self.kf.x[6] *= 0.0 + + self.kf.predict() + self.age += 1 + if(self.time_since_update > 0): + self.hit_streak = 0 + self.time_since_update += 1 + self.history.append(convert_x_to_bbox(self.kf.x)) + return self.history[-1] + + def get_state(self): + """ + Returns the current bounding box estimate. + """ + return convert_x_to_bbox(self.kf.x) + + +""" + We support multiple ways for association cost calculation, by default + we use IoU. GIoU may have better performance in some situations. We note + that we hardly normalize the cost by all methods to (0,1) which may not be + the best practice. +""" +ASSO_FUNCS = { "iou": iou_batch, + "giou": giou_batch, + "ciou": ciou_batch, + "diou": diou_batch, + "ct_dist": ct_dist} + + +class OCSort(object): + def __init__(self, det_thresh, max_age=30, min_hits=3, + iou_threshold=0.3, delta_t=3, asso_func="iou", inertia=0.2, use_byte=False): + """ + Sets key parameters for SORT + """ + self.max_age = max_age + self.min_hits = min_hits + self.iou_threshold = iou_threshold + self.trackers = [] + self.frame_count = 0 + self.det_thresh = det_thresh + self.delta_t = delta_t + self.asso_func = ASSO_FUNCS[asso_func] + self.inertia = inertia + self.use_byte = use_byte + KalmanBoxTracker.count = 0 + + def update(self, dets, _): + """ + Params: + dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...] + Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections). + Returns the a similar array, where the last column is the object ID. + NOTE: The number of objects returned may differ from the number of detections provided. + """ + + self.frame_count += 1 + + xyxys = dets[:, 0:4] + confs = dets[:, 4] + clss = dets[:, 5] + + classes = clss.numpy() + xyxys = xyxys.numpy() + confs = confs.numpy() + + output_results = np.column_stack((xyxys, confs, classes)) + + inds_low = confs > 0.1 + inds_high = confs < self.det_thresh + inds_second = np.logical_and(inds_low, inds_high) # self.det_thresh > score > 0.1, for second matching + dets_second = output_results[inds_second] # detections for second matching + remain_inds = confs > self.det_thresh + dets = output_results[remain_inds] + + # get predicted locations from existing trackers. + trks = np.zeros((len(self.trackers), 5)) + to_del = [] + ret = [] + for t, trk in enumerate(trks): + pos = self.trackers[t].predict()[0] + trk[:] = [pos[0], pos[1], pos[2], pos[3], 0] + if np.any(np.isnan(pos)): + to_del.append(t) + trks = np.ma.compress_rows(np.ma.masked_invalid(trks)) + for t in reversed(to_del): + self.trackers.pop(t) + + velocities = np.array( + [trk.velocity if trk.velocity is not None else np.array((0, 0)) for trk in self.trackers]) + last_boxes = np.array([trk.last_observation for trk in self.trackers]) + k_observations = np.array( + [k_previous_obs(trk.observations, trk.age, self.delta_t) for trk in self.trackers]) + + """ + First round of association + """ + matched, unmatched_dets, unmatched_trks = associate( + dets, trks, self.iou_threshold, velocities, k_observations, self.inertia) + for m in matched: + self.trackers[m[1]].update(dets[m[0], :5], dets[m[0], 5]) + + """ + Second round of associaton by OCR + """ + # BYTE association + if self.use_byte and len(dets_second) > 0 and unmatched_trks.shape[0] > 0: + u_trks = trks[unmatched_trks] + iou_left = self.asso_func(dets_second, u_trks) # iou between low score detections and unmatched tracks + iou_left = np.array(iou_left) + if iou_left.max() > self.iou_threshold: + """ + NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may + get a higher performance especially on MOT17/MOT20 datasets. But we keep it + uniform here for simplicity + """ + matched_indices = linear_assignment(-iou_left) + to_remove_trk_indices = [] + for m in matched_indices: + det_ind, trk_ind = m[0], unmatched_trks[m[1]] + if iou_left[m[0], m[1]] < self.iou_threshold: + continue + self.trackers[trk_ind].update(dets_second[det_ind, :5], dets_second[det_ind, 5]) + to_remove_trk_indices.append(trk_ind) + unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices)) + + if unmatched_dets.shape[0] > 0 and unmatched_trks.shape[0] > 0: + left_dets = dets[unmatched_dets] + left_trks = last_boxes[unmatched_trks] + iou_left = self.asso_func(left_dets, left_trks) + iou_left = np.array(iou_left) + if iou_left.max() > self.iou_threshold: + """ + NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may + get a higher performance especially on MOT17/MOT20 datasets. But we keep it + uniform here for simplicity + """ + rematched_indices = linear_assignment(-iou_left) + to_remove_det_indices = [] + to_remove_trk_indices = [] + for m in rematched_indices: + det_ind, trk_ind = unmatched_dets[m[0]], unmatched_trks[m[1]] + if iou_left[m[0], m[1]] < self.iou_threshold: + continue + self.trackers[trk_ind].update(dets[det_ind, :5], dets[det_ind, 5]) + to_remove_det_indices.append(det_ind) + to_remove_trk_indices.append(trk_ind) + unmatched_dets = np.setdiff1d(unmatched_dets, np.array(to_remove_det_indices)) + unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices)) + + for m in unmatched_trks: + self.trackers[m].update(None, None) + + # create and initialise new trackers for unmatched detections + for i in unmatched_dets: + trk = KalmanBoxTracker(dets[i, :5], dets[i, 5], delta_t=self.delta_t) + self.trackers.append(trk) + i = len(self.trackers) + for trk in reversed(self.trackers): + if trk.last_observation.sum() < 0: + d = trk.get_state()[0] + else: + """ + this is optional to use the recent observation or the kalman filter prediction, + we didn't notice significant difference here + """ + d = trk.last_observation[:4] + if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits): + # +1 as MOT benchmark requires positive + ret.append(np.concatenate((d, [trk.id+1], [trk.cls], [trk.conf])).reshape(1, -1)) + i -= 1 + # remove dead tracklet + if(trk.time_since_update > self.max_age): + self.trackers.pop(i) + if(len(ret) > 0): + return np.concatenate(ret) + return np.empty((0, 5)) diff --git a/feeder/trackers/reid_export.py b/feeder/trackers/reid_export.py new file mode 100644 index 0000000..9ef8d13 --- /dev/null +++ b/feeder/trackers/reid_export.py @@ -0,0 +1,313 @@ +import argparse + +import os +# limit the number of cpus used by high performance libraries +os.environ["OMP_NUM_THREADS"] = "1" +os.environ["OPENBLAS_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" +os.environ["VECLIB_MAXIMUM_THREADS"] = "1" +os.environ["NUMEXPR_NUM_THREADS"] = "1" + +import sys +import numpy as np +from pathlib import Path +import torch +import time +import platform +import pandas as pd +import subprocess +import torch.backends.cudnn as cudnn +from torch.utils.mobile_optimizer import optimize_for_mobile + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0].parents[0] # yolov5 strongsort root directory +WEIGHTS = ROOT / 'weights' + + +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +if str(ROOT / 'yolov5') not in sys.path: + sys.path.append(str(ROOT / 'yolov5')) # add yolov5 ROOT to PATH + +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import logging +from ultralytics.yolo.utils.torch_utils import select_device +from ultralytics.yolo.utils import LOGGER, colorstr, ops +from ultralytics.yolo.utils.checks import check_requirements, check_version +from trackers.strongsort.deep.models import build_model +from trackers.strongsort.deep.reid_model_factory import get_model_name, load_pretrained_weights + + +def file_size(path): + # Return file/dir size (MB) + path = Path(path) + if path.is_file(): + return path.stat().st_size / 1E6 + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + else: + return 0.0 + + +def export_formats(): + # YOLOv5 export formats + x = [ + ['PyTorch', '-', '.pt', True, True], + ['TorchScript', 'torchscript', '.torchscript', True, True], + ['ONNX', 'onnx', '.onnx', True, True], + ['OpenVINO', 'openvino', '_openvino_model', True, False], + ['TensorRT', 'engine', '.engine', False, True], + ['TensorFlow Lite', 'tflite', '.tflite', True, False], + ] + return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) + + +def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): + # YOLOv5 TorchScript model export + try: + LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') + f = file.with_suffix('.torchscript') + + ts = torch.jit.trace(model, im, strict=False) + if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f)) + else: + ts.save(str(f)) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'{prefix} export failure: {e}') + + +def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): + # ONNX export + try: + check_requirements(('onnx',)) + import onnx + + f = file.with_suffix('.onnx') + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') + + if dynamic: + dynamic = {'images': {0: 'batch'}} # shape(1,3,640,640) + dynamic['output'] = {0: 'batch'} # shape(1,25200,85) + + torch.onnx.export( + model.cpu() if dynamic else model, # --dynamic only compatible with cpu + im.cpu() if dynamic else im, + f, + verbose=False, + opset_version=opset, + do_constant_folding=True, + input_names=['images'], + output_names=['output'], + dynamic_axes=dynamic or None + ) + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + onnx.save(model_onnx, f) + + # Simplify + if simplify: + try: + cuda = torch.cuda.is_available() + check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) + import onnxsim + + LOGGER.info(f'simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + LOGGER.info(f'simplifier failure: {e}') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'export failure: {e}') + + + +def export_openvino(file, half, prefix=colorstr('OpenVINO:')): + # YOLOv5 OpenVINO export + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + try: + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + f = str(file).replace('.pt', f'_openvino_model{os.sep}') + + cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" + subprocess.check_output(cmd.split()) # export + except Exception as e: + LOGGER.info(f'export failure: {e}') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + + +def export_tflite(file, half, prefix=colorstr('TFLite:')): + # YOLOv5 OpenVINO export + try: + check_requirements(('openvino2tensorflow', 'tensorflow', 'tensorflow_datasets')) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + output = Path(str(file).replace(f'_openvino_model{os.sep}', f'_tflite_model{os.sep}')) + modelxml = list(Path(file).glob('*.xml'))[0] + cmd = f"openvino2tensorflow \ + --model_path {modelxml} \ + --model_output_path {output} \ + --output_pb \ + --output_saved_model \ + --output_no_quant_float32_tflite \ + --output_dynamic_range_quant_tflite" + subprocess.check_output(cmd.split()) # export + + LOGGER.info(f'{prefix} export success, results saved in {output} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + +def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): + # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt + try: + assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' + try: + import tensorrt as trt + except Exception: + if platform.system() == 'Linux': + check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) + import tensorrt as trt + + if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + grid = model.model[-1].anchor_grid + model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] + export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 + model.model[-1].anchor_grid = grid + else: # TensorRT >= 8 + check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 + export_onnx(model, im, file, 12, dynamic, simplify) # opset 13 + onnx = file.with_suffix('.onnx') + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert onnx.exists(), f'failed to export ONNX file: {onnx}' + f = file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + LOGGER.info(f'{prefix} Network Description:') + for inp in inputs: + LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + if dynamic: + if im.shape[0] <= 1: + LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") + profile = builder.create_optimization_profile() + for inp in inputs: + profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) + config.add_optimization_profile(profile) + + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') + if builder.platform_has_fast_fp16 and half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser(description="ReID export") + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[256, 128], help='image (h, w)') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') + parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') + parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') + parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') + parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') + parser.add_argument('--weights', nargs='+', type=str, default=WEIGHTS / 'osnet_x0_25_msmt17.pt', help='model.pt path(s)') + parser.add_argument('--half', action='store_true', help='FP16 half-precision export') + parser.add_argument('--include', + nargs='+', + default=['torchscript'], + help='torchscript, onnx, openvino, engine') + args = parser.parse_args() + + t = time.time() + + include = [x.lower() for x in args.include] # to lowercase + fmts = tuple(export_formats()['Argument'][1:]) # --include arguments + flags = [x in include for x in fmts] + assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' + jit, onnx, openvino, engine, tflite = flags # export booleans + + args.device = select_device(args.device) + if args.half: + assert args.device.type != 'cpu', '--half only compatible with GPU export, i.e. use --device 0' + assert not args.dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' + + if type(args.weights) is list: + args.weights = Path(args.weights[0]) + + model = build_model( + get_model_name(args.weights), + num_classes=1, + pretrained=not (args.weights and args.weights.is_file() and args.weights.suffix == '.pt'), + use_gpu=args.device + ).to(args.device) + load_pretrained_weights(model, args.weights) + model.eval() + + if args.optimize: + assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' + + im = torch.zeros(args.batch_size, 3, args.imgsz[0], args.imgsz[1]).to(args.device) # image size(1,3,640,480) BCHW iDetection + for _ in range(2): + y = model(im) # dry runs + if args.half: + im, model = im.half(), model.half() # to FP16 + shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape + LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {args.weights} with output shape {shape} ({file_size(args.weights):.1f} MB)") + + # Exports + f = [''] * len(fmts) # exported filenames + if jit: + f[0] = export_torchscript(model, im, args.weights, args.optimize) # opset 12 + if engine: # TensorRT required before ONNX + f[1] = export_engine(model, im, args.weights, args.half, args.dynamic, args.simplify, args.workspace, args.verbose) + if onnx: # OpenVINO requires ONNX + f[2] = export_onnx(model, im, args.weights, args.opset, args.dynamic, args.simplify) # opset 12 + if openvino: + f[3] = export_openvino(args.weights, args.half) + if tflite: + export_tflite(f, False) + + # Finish + f = [str(x) for x in f if x] # filter out '' and None + if any(f): + LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' + f"\nResults saved to {colorstr('bold', args.weights.parent.resolve())}" + f"\nVisualize: https://netron.app") + diff --git a/feeder/trackers/strongsort/__init__.py b/feeder/trackers/strongsort/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/feeder/trackers/strongsort/configs/strongsort.yaml b/feeder/trackers/strongsort/configs/strongsort.yaml new file mode 100644 index 0000000..c4fa8b6 --- /dev/null +++ b/feeder/trackers/strongsort/configs/strongsort.yaml @@ -0,0 +1,11 @@ +strongsort: + ecc: true + ema_alpha: 0.8962157769329083 + max_age: 40 + max_dist: 0.1594374041012136 + max_iou_dist: 0.5431835667667874 + max_unmatched_preds: 0 + mc_lambda: 0.995 + n_init: 3 + nn_budget: 100 + conf_thres: 0.5122620708221085 diff --git a/feeder/trackers/strongsort/deep/checkpoint/.gitkeep b/feeder/trackers/strongsort/deep/checkpoint/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/feeder/trackers/strongsort/deep/checkpoint/osnet_x0_25_market1501.pth b/feeder/trackers/strongsort/deep/checkpoint/osnet_x0_25_market1501.pth new file mode 100644 index 0000000..7fffc34 Binary files /dev/null and b/feeder/trackers/strongsort/deep/checkpoint/osnet_x0_25_market1501.pth differ diff --git a/feeder/trackers/strongsort/deep/checkpoint/osnet_x0_25_msmt17.pth b/feeder/trackers/strongsort/deep/checkpoint/osnet_x0_25_msmt17.pth new file mode 100644 index 0000000..f80a348 Binary files /dev/null and b/feeder/trackers/strongsort/deep/checkpoint/osnet_x0_25_msmt17.pth differ diff --git a/feeder/trackers/strongsort/deep/checkpoint/osnet_x1_0_msmt17.pth b/feeder/trackers/strongsort/deep/checkpoint/osnet_x1_0_msmt17.pth new file mode 100644 index 0000000..078ad76 Binary files /dev/null and b/feeder/trackers/strongsort/deep/checkpoint/osnet_x1_0_msmt17.pth differ diff --git a/feeder/trackers/strongsort/deep/models/__init__.py b/feeder/trackers/strongsort/deep/models/__init__.py new file mode 100644 index 0000000..3c60ba6 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/__init__.py @@ -0,0 +1,122 @@ +from __future__ import absolute_import +import torch + +from .pcb import * +from .mlfn import * +from .hacnn import * +from .osnet import * +from .senet import * +from .mudeep import * +from .nasnet import * +from .resnet import * +from .densenet import * +from .xception import * +from .osnet_ain import * +from .resnetmid import * +from .shufflenet import * +from .squeezenet import * +from .inceptionv4 import * +from .mobilenetv2 import * +from .resnet_ibn_a import * +from .resnet_ibn_b import * +from .shufflenetv2 import * +from .inceptionresnetv2 import * + +__model_factory = { + # image classification models + 'resnet18': resnet18, + 'resnet34': resnet34, + 'resnet50': resnet50, + 'resnet101': resnet101, + 'resnet152': resnet152, + 'resnext50_32x4d': resnext50_32x4d, + 'resnext101_32x8d': resnext101_32x8d, + 'resnet50_fc512': resnet50_fc512, + 'se_resnet50': se_resnet50, + 'se_resnet50_fc512': se_resnet50_fc512, + 'se_resnet101': se_resnet101, + 'se_resnext50_32x4d': se_resnext50_32x4d, + 'se_resnext101_32x4d': se_resnext101_32x4d, + 'densenet121': densenet121, + 'densenet169': densenet169, + 'densenet201': densenet201, + 'densenet161': densenet161, + 'densenet121_fc512': densenet121_fc512, + 'inceptionresnetv2': inceptionresnetv2, + 'inceptionv4': inceptionv4, + 'xception': xception, + 'resnet50_ibn_a': resnet50_ibn_a, + 'resnet50_ibn_b': resnet50_ibn_b, + # lightweight models + 'nasnsetmobile': nasnetamobile, + 'mobilenetv2_x1_0': mobilenetv2_x1_0, + 'mobilenetv2_x1_4': mobilenetv2_x1_4, + 'shufflenet': shufflenet, + 'squeezenet1_0': squeezenet1_0, + 'squeezenet1_0_fc512': squeezenet1_0_fc512, + 'squeezenet1_1': squeezenet1_1, + 'shufflenet_v2_x0_5': shufflenet_v2_x0_5, + 'shufflenet_v2_x1_0': shufflenet_v2_x1_0, + 'shufflenet_v2_x1_5': shufflenet_v2_x1_5, + 'shufflenet_v2_x2_0': shufflenet_v2_x2_0, + # reid-specific models + 'mudeep': MuDeep, + 'resnet50mid': resnet50mid, + 'hacnn': HACNN, + 'pcb_p6': pcb_p6, + 'pcb_p4': pcb_p4, + 'mlfn': mlfn, + 'osnet_x1_0': osnet_x1_0, + 'osnet_x0_75': osnet_x0_75, + 'osnet_x0_5': osnet_x0_5, + 'osnet_x0_25': osnet_x0_25, + 'osnet_ibn_x1_0': osnet_ibn_x1_0, + 'osnet_ain_x1_0': osnet_ain_x1_0, + 'osnet_ain_x0_75': osnet_ain_x0_75, + 'osnet_ain_x0_5': osnet_ain_x0_5, + 'osnet_ain_x0_25': osnet_ain_x0_25 +} + + +def show_avai_models(): + """Displays available models. + + Examples:: + >>> from torchreid import models + >>> models.show_avai_models() + """ + print(list(__model_factory.keys())) + + +def build_model( + name, num_classes, loss='softmax', pretrained=True, use_gpu=True +): + """A function wrapper for building a model. + + Args: + name (str): model name. + num_classes (int): number of training identities. + loss (str, optional): loss function to optimize the model. Currently + supports "softmax" and "triplet". Default is "softmax". + pretrained (bool, optional): whether to load ImageNet-pretrained weights. + Default is True. + use_gpu (bool, optional): whether to use gpu. Default is True. + + Returns: + nn.Module + + Examples:: + >>> from torchreid import models + >>> model = models.build_model('resnet50', 751, loss='softmax') + """ + avai_models = list(__model_factory.keys()) + if name not in avai_models: + raise KeyError( + 'Unknown model: {}. Must be one of {}'.format(name, avai_models) + ) + return __model_factory[name]( + num_classes=num_classes, + loss=loss, + pretrained=pretrained, + use_gpu=use_gpu + ) diff --git a/feeder/trackers/strongsort/deep/models/densenet.py b/feeder/trackers/strongsort/deep/models/densenet.py new file mode 100644 index 0000000..a1d9b7e --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/densenet.py @@ -0,0 +1,380 @@ +""" +Code source: https://github.com/pytorch/vision +""" +from __future__ import division, absolute_import +import re +from collections import OrderedDict +import torch +import torch.nn as nn +from torch.nn import functional as F +from torch.utils import model_zoo + +__all__ = [ + 'densenet121', 'densenet169', 'densenet201', 'densenet161', + 'densenet121_fc512' +] + +model_urls = { + 'densenet121': + 'https://download.pytorch.org/models/densenet121-a639ec97.pth', + 'densenet169': + 'https://download.pytorch.org/models/densenet169-b2777c0a.pth', + 'densenet201': + 'https://download.pytorch.org/models/densenet201-c1103571.pth', + 'densenet161': + 'https://download.pytorch.org/models/densenet161-8d451a50.pth', +} + + +class _DenseLayer(nn.Sequential): + + def __init__(self, num_input_features, growth_rate, bn_size, drop_rate): + super(_DenseLayer, self).__init__() + self.add_module('norm1', nn.BatchNorm2d(num_input_features)), + self.add_module('relu1', nn.ReLU(inplace=True)), + self.add_module( + 'conv1', + nn.Conv2d( + num_input_features, + bn_size * growth_rate, + kernel_size=1, + stride=1, + bias=False + ) + ), + self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), + self.add_module('relu2', nn.ReLU(inplace=True)), + self.add_module( + 'conv2', + nn.Conv2d( + bn_size * growth_rate, + growth_rate, + kernel_size=3, + stride=1, + padding=1, + bias=False + ) + ), + self.drop_rate = drop_rate + + def forward(self, x): + new_features = super(_DenseLayer, self).forward(x) + if self.drop_rate > 0: + new_features = F.dropout( + new_features, p=self.drop_rate, training=self.training + ) + return torch.cat([x, new_features], 1) + + +class _DenseBlock(nn.Sequential): + + def __init__( + self, num_layers, num_input_features, bn_size, growth_rate, drop_rate + ): + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer( + num_input_features + i*growth_rate, growth_rate, bn_size, + drop_rate + ) + self.add_module('denselayer%d' % (i+1), layer) + + +class _Transition(nn.Sequential): + + def __init__(self, num_input_features, num_output_features): + super(_Transition, self).__init__() + self.add_module('norm', nn.BatchNorm2d(num_input_features)) + self.add_module('relu', nn.ReLU(inplace=True)) + self.add_module( + 'conv', + nn.Conv2d( + num_input_features, + num_output_features, + kernel_size=1, + stride=1, + bias=False + ) + ) + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + """Densely connected network. + + Reference: + Huang et al. Densely Connected Convolutional Networks. CVPR 2017. + + Public keys: + - ``densenet121``: DenseNet121. + - ``densenet169``: DenseNet169. + - ``densenet201``: DenseNet201. + - ``densenet161``: DenseNet161. + - ``densenet121_fc512``: DenseNet121 + FC. + """ + + def __init__( + self, + num_classes, + loss, + growth_rate=32, + block_config=(6, 12, 24, 16), + num_init_features=64, + bn_size=4, + drop_rate=0, + fc_dims=None, + dropout_p=None, + **kwargs + ): + + super(DenseNet, self).__init__() + self.loss = loss + + # First convolution + self.features = nn.Sequential( + OrderedDict( + [ + ( + 'conv0', + nn.Conv2d( + 3, + num_init_features, + kernel_size=7, + stride=2, + padding=3, + bias=False + ) + ), + ('norm0', nn.BatchNorm2d(num_init_features)), + ('relu0', nn.ReLU(inplace=True)), + ( + 'pool0', + nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + ), + ] + ) + ) + + # Each denseblock + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock( + num_layers=num_layers, + num_input_features=num_features, + bn_size=bn_size, + growth_rate=growth_rate, + drop_rate=drop_rate + ) + self.features.add_module('denseblock%d' % (i+1), block) + num_features = num_features + num_layers*growth_rate + if i != len(block_config) - 1: + trans = _Transition( + num_input_features=num_features, + num_output_features=num_features // 2 + ) + self.features.add_module('transition%d' % (i+1), trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', nn.BatchNorm2d(num_features)) + + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.feature_dim = num_features + self.fc = self._construct_fc_layer(fc_dims, num_features, dropout_p) + + # Linear layer + self.classifier = nn.Linear(self.feature_dim, num_classes) + + self._init_params() + + def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): + """Constructs fully connected layer. + + Args: + fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed + input_dim (int): input dimension + dropout_p (float): dropout probability, if None, dropout is unused + """ + if fc_dims is None: + self.feature_dim = input_dim + return None + + assert isinstance( + fc_dims, (list, tuple) + ), 'fc_dims must be either list or tuple, but got {}'.format( + type(fc_dims) + ) + + layers = [] + for dim in fc_dims: + layers.append(nn.Linear(input_dim, dim)) + layers.append(nn.BatchNorm1d(dim)) + layers.append(nn.ReLU(inplace=True)) + if dropout_p is not None: + layers.append(nn.Dropout(p=dropout_p)) + input_dim = dim + + self.feature_dim = fc_dims[-1] + + return nn.Sequential(*layers) + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu' + ) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + f = self.features(x) + f = F.relu(f, inplace=True) + v = self.global_avgpool(f) + v = v.view(v.size(0), -1) + + if self.fc is not None: + v = self.fc(v) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError('Unsupported loss: {}'.format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + + # '.'s are no longer allowed in module names, but pervious _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$' + ) + for key in list(pretrain_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + pretrain_dict[new_key] = pretrain_dict[key] + del pretrain_dict[key] + + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +""" +Dense network configurations: +-- +densenet121: num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16) +densenet169: num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32) +densenet201: num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32) +densenet161: num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24) +""" + + +def densenet121(num_classes, loss='softmax', pretrained=True, **kwargs): + model = DenseNet( + num_classes=num_classes, + loss=loss, + num_init_features=64, + growth_rate=32, + block_config=(6, 12, 24, 16), + fc_dims=None, + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['densenet121']) + return model + + +def densenet169(num_classes, loss='softmax', pretrained=True, **kwargs): + model = DenseNet( + num_classes=num_classes, + loss=loss, + num_init_features=64, + growth_rate=32, + block_config=(6, 12, 32, 32), + fc_dims=None, + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['densenet169']) + return model + + +def densenet201(num_classes, loss='softmax', pretrained=True, **kwargs): + model = DenseNet( + num_classes=num_classes, + loss=loss, + num_init_features=64, + growth_rate=32, + block_config=(6, 12, 48, 32), + fc_dims=None, + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['densenet201']) + return model + + +def densenet161(num_classes, loss='softmax', pretrained=True, **kwargs): + model = DenseNet( + num_classes=num_classes, + loss=loss, + num_init_features=96, + growth_rate=48, + block_config=(6, 12, 36, 24), + fc_dims=None, + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['densenet161']) + return model + + +def densenet121_fc512(num_classes, loss='softmax', pretrained=True, **kwargs): + model = DenseNet( + num_classes=num_classes, + loss=loss, + num_init_features=64, + growth_rate=32, + block_config=(6, 12, 24, 16), + fc_dims=[512], + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['densenet121']) + return model diff --git a/feeder/trackers/strongsort/deep/models/hacnn.py b/feeder/trackers/strongsort/deep/models/hacnn.py new file mode 100644 index 0000000..f21cc82 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/hacnn.py @@ -0,0 +1,414 @@ +from __future__ import division, absolute_import +import torch +from torch import nn +from torch.nn import functional as F + +__all__ = ['HACNN'] + + +class ConvBlock(nn.Module): + """Basic convolutional block. + + convolution + batch normalization + relu. + + Args: + in_c (int): number of input channels. + out_c (int): number of output channels. + k (int or tuple): kernel size. + s (int or tuple): stride. + p (int or tuple): padding. + """ + + def __init__(self, in_c, out_c, k, s=1, p=0): + super(ConvBlock, self).__init__() + self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p) + self.bn = nn.BatchNorm2d(out_c) + + def forward(self, x): + return F.relu(self.bn(self.conv(x))) + + +class InceptionA(nn.Module): + + def __init__(self, in_channels, out_channels): + super(InceptionA, self).__init__() + mid_channels = out_channels // 4 + + self.stream1 = nn.Sequential( + ConvBlock(in_channels, mid_channels, 1), + ConvBlock(mid_channels, mid_channels, 3, p=1), + ) + self.stream2 = nn.Sequential( + ConvBlock(in_channels, mid_channels, 1), + ConvBlock(mid_channels, mid_channels, 3, p=1), + ) + self.stream3 = nn.Sequential( + ConvBlock(in_channels, mid_channels, 1), + ConvBlock(mid_channels, mid_channels, 3, p=1), + ) + self.stream4 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1), + ConvBlock(in_channels, mid_channels, 1), + ) + + def forward(self, x): + s1 = self.stream1(x) + s2 = self.stream2(x) + s3 = self.stream3(x) + s4 = self.stream4(x) + y = torch.cat([s1, s2, s3, s4], dim=1) + return y + + +class InceptionB(nn.Module): + + def __init__(self, in_channels, out_channels): + super(InceptionB, self).__init__() + mid_channels = out_channels // 4 + + self.stream1 = nn.Sequential( + ConvBlock(in_channels, mid_channels, 1), + ConvBlock(mid_channels, mid_channels, 3, s=2, p=1), + ) + self.stream2 = nn.Sequential( + ConvBlock(in_channels, mid_channels, 1), + ConvBlock(mid_channels, mid_channels, 3, p=1), + ConvBlock(mid_channels, mid_channels, 3, s=2, p=1), + ) + self.stream3 = nn.Sequential( + nn.MaxPool2d(3, stride=2, padding=1), + ConvBlock(in_channels, mid_channels * 2, 1), + ) + + def forward(self, x): + s1 = self.stream1(x) + s2 = self.stream2(x) + s3 = self.stream3(x) + y = torch.cat([s1, s2, s3], dim=1) + return y + + +class SpatialAttn(nn.Module): + """Spatial Attention (Sec. 3.1.I.1)""" + + def __init__(self): + super(SpatialAttn, self).__init__() + self.conv1 = ConvBlock(1, 1, 3, s=2, p=1) + self.conv2 = ConvBlock(1, 1, 1) + + def forward(self, x): + # global cross-channel averaging + x = x.mean(1, keepdim=True) + # 3-by-3 conv + x = self.conv1(x) + # bilinear resizing + x = F.upsample( + x, (x.size(2) * 2, x.size(3) * 2), + mode='bilinear', + align_corners=True + ) + # scaling conv + x = self.conv2(x) + return x + + +class ChannelAttn(nn.Module): + """Channel Attention (Sec. 3.1.I.2)""" + + def __init__(self, in_channels, reduction_rate=16): + super(ChannelAttn, self).__init__() + assert in_channels % reduction_rate == 0 + self.conv1 = ConvBlock(in_channels, in_channels // reduction_rate, 1) + self.conv2 = ConvBlock(in_channels // reduction_rate, in_channels, 1) + + def forward(self, x): + # squeeze operation (global average pooling) + x = F.avg_pool2d(x, x.size()[2:]) + # excitation operation (2 conv layers) + x = self.conv1(x) + x = self.conv2(x) + return x + + +class SoftAttn(nn.Module): + """Soft Attention (Sec. 3.1.I) + + Aim: Spatial Attention + Channel Attention + + Output: attention maps with shape identical to input. + """ + + def __init__(self, in_channels): + super(SoftAttn, self).__init__() + self.spatial_attn = SpatialAttn() + self.channel_attn = ChannelAttn(in_channels) + self.conv = ConvBlock(in_channels, in_channels, 1) + + def forward(self, x): + y_spatial = self.spatial_attn(x) + y_channel = self.channel_attn(x) + y = y_spatial * y_channel + y = torch.sigmoid(self.conv(y)) + return y + + +class HardAttn(nn.Module): + """Hard Attention (Sec. 3.1.II)""" + + def __init__(self, in_channels): + super(HardAttn, self).__init__() + self.fc = nn.Linear(in_channels, 4 * 2) + self.init_params() + + def init_params(self): + self.fc.weight.data.zero_() + self.fc.bias.data.copy_( + torch.tensor( + [0, -0.75, 0, -0.25, 0, 0.25, 0, 0.75], dtype=torch.float + ) + ) + + def forward(self, x): + # squeeze operation (global average pooling) + x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), x.size(1)) + # predict transformation parameters + theta = torch.tanh(self.fc(x)) + theta = theta.view(-1, 4, 2) + return theta + + +class HarmAttn(nn.Module): + """Harmonious Attention (Sec. 3.1)""" + + def __init__(self, in_channels): + super(HarmAttn, self).__init__() + self.soft_attn = SoftAttn(in_channels) + self.hard_attn = HardAttn(in_channels) + + def forward(self, x): + y_soft_attn = self.soft_attn(x) + theta = self.hard_attn(x) + return y_soft_attn, theta + + +class HACNN(nn.Module): + """Harmonious Attention Convolutional Neural Network. + + Reference: + Li et al. Harmonious Attention Network for Person Re-identification. CVPR 2018. + + Public keys: + - ``hacnn``: HACNN. + """ + + # Args: + # num_classes (int): number of classes to predict + # nchannels (list): number of channels AFTER concatenation + # feat_dim (int): feature dimension for a single stream + # learn_region (bool): whether to learn region features (i.e. local branch) + + def __init__( + self, + num_classes, + loss='softmax', + nchannels=[128, 256, 384], + feat_dim=512, + learn_region=True, + use_gpu=True, + **kwargs + ): + super(HACNN, self).__init__() + self.loss = loss + self.learn_region = learn_region + self.use_gpu = use_gpu + + self.conv = ConvBlock(3, 32, 3, s=2, p=1) + + # Construct Inception + HarmAttn blocks + # ============== Block 1 ============== + self.inception1 = nn.Sequential( + InceptionA(32, nchannels[0]), + InceptionB(nchannels[0], nchannels[0]), + ) + self.ha1 = HarmAttn(nchannels[0]) + + # ============== Block 2 ============== + self.inception2 = nn.Sequential( + InceptionA(nchannels[0], nchannels[1]), + InceptionB(nchannels[1], nchannels[1]), + ) + self.ha2 = HarmAttn(nchannels[1]) + + # ============== Block 3 ============== + self.inception3 = nn.Sequential( + InceptionA(nchannels[1], nchannels[2]), + InceptionB(nchannels[2], nchannels[2]), + ) + self.ha3 = HarmAttn(nchannels[2]) + + self.fc_global = nn.Sequential( + nn.Linear(nchannels[2], feat_dim), + nn.BatchNorm1d(feat_dim), + nn.ReLU(), + ) + self.classifier_global = nn.Linear(feat_dim, num_classes) + + if self.learn_region: + self.init_scale_factors() + self.local_conv1 = InceptionB(32, nchannels[0]) + self.local_conv2 = InceptionB(nchannels[0], nchannels[1]) + self.local_conv3 = InceptionB(nchannels[1], nchannels[2]) + self.fc_local = nn.Sequential( + nn.Linear(nchannels[2] * 4, feat_dim), + nn.BatchNorm1d(feat_dim), + nn.ReLU(), + ) + self.classifier_local = nn.Linear(feat_dim, num_classes) + self.feat_dim = feat_dim * 2 + else: + self.feat_dim = feat_dim + + def init_scale_factors(self): + # initialize scale factors (s_w, s_h) for four regions + self.scale_factors = [] + self.scale_factors.append( + torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float) + ) + self.scale_factors.append( + torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float) + ) + self.scale_factors.append( + torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float) + ) + self.scale_factors.append( + torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float) + ) + + def stn(self, x, theta): + """Performs spatial transform + + x: (batch, channel, height, width) + theta: (batch, 2, 3) + """ + grid = F.affine_grid(theta, x.size()) + x = F.grid_sample(x, grid) + return x + + def transform_theta(self, theta_i, region_idx): + """Transforms theta to include (s_w, s_h), resulting in (batch, 2, 3)""" + scale_factors = self.scale_factors[region_idx] + theta = torch.zeros(theta_i.size(0), 2, 3) + theta[:, :, :2] = scale_factors + theta[:, :, -1] = theta_i + if self.use_gpu: + theta = theta.cuda() + return theta + + def forward(self, x): + assert x.size(2) == 160 and x.size(3) == 64, \ + 'Input size does not match, expected (160, 64) but got ({}, {})'.format(x.size(2), x.size(3)) + x = self.conv(x) + + # ============== Block 1 ============== + # global branch + x1 = self.inception1(x) + x1_attn, x1_theta = self.ha1(x1) + x1_out = x1 * x1_attn + # local branch + if self.learn_region: + x1_local_list = [] + for region_idx in range(4): + x1_theta_i = x1_theta[:, region_idx, :] + x1_theta_i = self.transform_theta(x1_theta_i, region_idx) + x1_trans_i = self.stn(x, x1_theta_i) + x1_trans_i = F.upsample( + x1_trans_i, (24, 28), mode='bilinear', align_corners=True + ) + x1_local_i = self.local_conv1(x1_trans_i) + x1_local_list.append(x1_local_i) + + # ============== Block 2 ============== + # Block 2 + # global branch + x2 = self.inception2(x1_out) + x2_attn, x2_theta = self.ha2(x2) + x2_out = x2 * x2_attn + # local branch + if self.learn_region: + x2_local_list = [] + for region_idx in range(4): + x2_theta_i = x2_theta[:, region_idx, :] + x2_theta_i = self.transform_theta(x2_theta_i, region_idx) + x2_trans_i = self.stn(x1_out, x2_theta_i) + x2_trans_i = F.upsample( + x2_trans_i, (12, 14), mode='bilinear', align_corners=True + ) + x2_local_i = x2_trans_i + x1_local_list[region_idx] + x2_local_i = self.local_conv2(x2_local_i) + x2_local_list.append(x2_local_i) + + # ============== Block 3 ============== + # Block 3 + # global branch + x3 = self.inception3(x2_out) + x3_attn, x3_theta = self.ha3(x3) + x3_out = x3 * x3_attn + # local branch + if self.learn_region: + x3_local_list = [] + for region_idx in range(4): + x3_theta_i = x3_theta[:, region_idx, :] + x3_theta_i = self.transform_theta(x3_theta_i, region_idx) + x3_trans_i = self.stn(x2_out, x3_theta_i) + x3_trans_i = F.upsample( + x3_trans_i, (6, 7), mode='bilinear', align_corners=True + ) + x3_local_i = x3_trans_i + x2_local_list[region_idx] + x3_local_i = self.local_conv3(x3_local_i) + x3_local_list.append(x3_local_i) + + # ============== Feature generation ============== + # global branch + x_global = F.avg_pool2d(x3_out, + x3_out.size()[2:] + ).view(x3_out.size(0), x3_out.size(1)) + x_global = self.fc_global(x_global) + # local branch + if self.learn_region: + x_local_list = [] + for region_idx in range(4): + x_local_i = x3_local_list[region_idx] + x_local_i = F.avg_pool2d(x_local_i, + x_local_i.size()[2:] + ).view(x_local_i.size(0), -1) + x_local_list.append(x_local_i) + x_local = torch.cat(x_local_list, 1) + x_local = self.fc_local(x_local) + + if not self.training: + # l2 normalization before concatenation + if self.learn_region: + x_global = x_global / x_global.norm(p=2, dim=1, keepdim=True) + x_local = x_local / x_local.norm(p=2, dim=1, keepdim=True) + return torch.cat([x_global, x_local], 1) + else: + return x_global + + prelogits_global = self.classifier_global(x_global) + if self.learn_region: + prelogits_local = self.classifier_local(x_local) + + if self.loss == 'softmax': + if self.learn_region: + return (prelogits_global, prelogits_local) + else: + return prelogits_global + + elif self.loss == 'triplet': + if self.learn_region: + return (prelogits_global, prelogits_local), (x_global, x_local) + else: + return prelogits_global, x_global + + else: + raise KeyError("Unsupported loss: {}".format(self.loss)) diff --git a/feeder/trackers/strongsort/deep/models/inceptionresnetv2.py b/feeder/trackers/strongsort/deep/models/inceptionresnetv2.py new file mode 100644 index 0000000..03e4034 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/inceptionresnetv2.py @@ -0,0 +1,361 @@ +""" +Code imported from https://github.com/Cadene/pretrained-models.pytorch +""" +from __future__ import division, absolute_import +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['inceptionresnetv2'] + +pretrained_settings = { + 'inceptionresnetv2': { + 'imagenet': { + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth', + 'input_space': 'RGB', + 'input_size': [3, 299, 299], + 'input_range': [0, 1], + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'num_classes': 1000 + }, + 'imagenet+background': { + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth', + 'input_space': 'RGB', + 'input_size': [3, 299, 299], + 'input_range': [0, 1], + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'num_classes': 1001 + } + } +} + + +class BasicConv2d(nn.Module): + + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, + out_planes, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=False + ) # verify bias false + self.bn = nn.BatchNorm2d( + out_planes, + eps=0.001, # value found in tensorflow + momentum=0.1, # default pytorch value + affine=True + ) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed_5b(nn.Module): + + def __init__(self): + super(Mixed_5b, self).__init__() + + self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(192, 48, kernel_size=1, stride=1), + BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(192, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(192, 64, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block35(nn.Module): + + def __init__(self, scale=1.0): + super(Block35, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1), + BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1) + ) + + self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_6a(nn.Module): + + def __init__(self): + super(Mixed_6a, self).__init__() + + self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 256, kernel_size=1, stride=1), + BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class Block17(nn.Module): + + def __init__(self, scale=1.0): + super(Block17, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 128, kernel_size=1, stride=1), + BasicConv2d( + 128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3) + ), + BasicConv2d( + 160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0) + ) + ) + + self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_7a(nn.Module): + + def __init__(self): + super(Mixed_7a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1), + BasicConv2d(288, 320, kernel_size=3, stride=2) + ) + + self.branch3 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block8(nn.Module): + + def __init__(self, scale=1.0, noReLU=False): + super(Block8, self).__init__() + + self.scale = scale + self.noReLU = noReLU + + self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(2080, 192, kernel_size=1, stride=1), + BasicConv2d( + 192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1) + ), + BasicConv2d( + 224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0) + ) + ) + + self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) + if not self.noReLU: + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + if not self.noReLU: + out = self.relu(out) + return out + + +# ---------------- +# Model Definition +# ---------------- +class InceptionResNetV2(nn.Module): + """Inception-ResNet-V2. + + Reference: + Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual + Connections on Learning. AAAI 2017. + + Public keys: + - ``inceptionresnetv2``: Inception-ResNet-V2. + """ + + def __init__(self, num_classes, loss='softmax', **kwargs): + super(InceptionResNetV2, self).__init__() + self.loss = loss + + # Modules + self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2) + self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1) + self.conv2d_2b = BasicConv2d( + 32, 64, kernel_size=3, stride=1, padding=1 + ) + self.maxpool_3a = nn.MaxPool2d(3, stride=2) + self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1) + self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1) + self.maxpool_5a = nn.MaxPool2d(3, stride=2) + self.mixed_5b = Mixed_5b() + self.repeat = nn.Sequential( + Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), + Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), + Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), + Block35(scale=0.17) + ) + self.mixed_6a = Mixed_6a() + self.repeat_1 = nn.Sequential( + Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), + Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), + Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), + Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), + Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), + Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), + Block17(scale=0.10), Block17(scale=0.10) + ) + self.mixed_7a = Mixed_7a() + self.repeat_2 = nn.Sequential( + Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20), + Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20), + Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20) + ) + + self.block8 = Block8(noReLU=True) + self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.classifier = nn.Linear(1536, num_classes) + + def load_imagenet_weights(self): + settings = pretrained_settings['inceptionresnetv2']['imagenet'] + pretrain_dict = model_zoo.load_url(settings['url']) + model_dict = self.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + self.load_state_dict(model_dict) + + def featuremaps(self, x): + x = self.conv2d_1a(x) + x = self.conv2d_2a(x) + x = self.conv2d_2b(x) + x = self.maxpool_3a(x) + x = self.conv2d_3b(x) + x = self.conv2d_4a(x) + x = self.maxpool_5a(x) + x = self.mixed_5b(x) + x = self.repeat(x) + x = self.mixed_6a(x) + x = self.repeat_1(x) + x = self.mixed_7a(x) + x = self.repeat_2(x) + x = self.block8(x) + x = self.conv2d_7b(x) + return x + + def forward(self, x): + f = self.featuremaps(x) + v = self.global_avgpool(f) + v = v.view(v.size(0), -1) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError('Unsupported loss: {}'.format(self.loss)) + + +def inceptionresnetv2(num_classes, loss='softmax', pretrained=True, **kwargs): + model = InceptionResNetV2(num_classes=num_classes, loss=loss, **kwargs) + if pretrained: + model.load_imagenet_weights() + return model diff --git a/feeder/trackers/strongsort/deep/models/inceptionv4.py b/feeder/trackers/strongsort/deep/models/inceptionv4.py new file mode 100644 index 0000000..b14916f --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/inceptionv4.py @@ -0,0 +1,381 @@ +from __future__ import division, absolute_import +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['inceptionv4'] +""" +Code imported from https://github.com/Cadene/pretrained-models.pytorch +""" + +pretrained_settings = { + 'inceptionv4': { + 'imagenet': { + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth', + 'input_space': 'RGB', + 'input_size': [3, 299, 299], + 'input_range': [0, 1], + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'num_classes': 1000 + }, + 'imagenet+background': { + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth', + 'input_space': 'RGB', + 'input_size': [3, 299, 299], + 'input_range': [0, 1], + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'num_classes': 1001 + } + } +} + + +class BasicConv2d(nn.Module): + + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, + out_planes, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=False + ) # verify bias false + self.bn = nn.BatchNorm2d( + out_planes, + eps=0.001, # value found in tensorflow + momentum=0.1, # default pytorch value + affine=True + ) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed_3a(nn.Module): + + def __init__(self): + super(Mixed_3a, self).__init__() + self.maxpool = nn.MaxPool2d(3, stride=2) + self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2) + + def forward(self, x): + x0 = self.maxpool(x) + x1 = self.conv(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed_4a(nn.Module): + + def __init__(self): + super(Mixed_4a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(64, 96, kernel_size=(3, 3), stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed_5a(nn.Module): + + def __init__(self): + super(Mixed_5a, self).__init__() + self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2) + self.maxpool = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.conv(x) + x1 = self.maxpool(x) + out = torch.cat((x0, x1), 1) + return out + + +class Inception_A(nn.Module): + + def __init__(self): + super(Inception_A, self).__init__() + self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(384, 96, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Reduction_A(nn.Module): + + def __init__(self): + super(Reduction_A, self).__init__() + self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1), + BasicConv2d(224, 256, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class Inception_B(nn.Module): + + def __init__(self): + super(Inception_B, self).__init__() + self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d( + 192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3) + ), + BasicConv2d( + 224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0) + ) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d( + 192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0) + ), + BasicConv2d( + 192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3) + ), + BasicConv2d( + 224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0) + ), + BasicConv2d( + 224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3) + ) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1024, 128, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Reduction_B(nn.Module): + + def __init__(self): + super(Reduction_B, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 192, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 256, kernel_size=1, stride=1), + BasicConv2d( + 256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3) + ), + BasicConv2d( + 256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0) + ), BasicConv2d(320, 320, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class Inception_C(nn.Module): + + def __init__(self): + super(Inception_C, self).__init__() + + self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1) + + self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch1_1a = BasicConv2d( + 384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1) + ) + self.branch1_1b = BasicConv2d( + 384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0) + ) + + self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch2_1 = BasicConv2d( + 384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0) + ) + self.branch2_2 = BasicConv2d( + 448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1) + ) + self.branch2_3a = BasicConv2d( + 512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1) + ) + self.branch2_3b = BasicConv2d( + 512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1536, 256, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + + x1_0 = self.branch1_0(x) + x1_1a = self.branch1_1a(x1_0) + x1_1b = self.branch1_1b(x1_0) + x1 = torch.cat((x1_1a, x1_1b), 1) + + x2_0 = self.branch2_0(x) + x2_1 = self.branch2_1(x2_0) + x2_2 = self.branch2_2(x2_1) + x2_3a = self.branch2_3a(x2_2) + x2_3b = self.branch2_3b(x2_2) + x2 = torch.cat((x2_3a, x2_3b), 1) + + x3 = self.branch3(x) + + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class InceptionV4(nn.Module): + """Inception-v4. + + Reference: + Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual + Connections on Learning. AAAI 2017. + + Public keys: + - ``inceptionv4``: InceptionV4. + """ + + def __init__(self, num_classes, loss, **kwargs): + super(InceptionV4, self).__init__() + self.loss = loss + + self.features = nn.Sequential( + BasicConv2d(3, 32, kernel_size=3, stride=2), + BasicConv2d(32, 32, kernel_size=3, stride=1), + BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), + Mixed_3a(), + Mixed_4a(), + Mixed_5a(), + Inception_A(), + Inception_A(), + Inception_A(), + Inception_A(), + Reduction_A(), # Mixed_6a + Inception_B(), + Inception_B(), + Inception_B(), + Inception_B(), + Inception_B(), + Inception_B(), + Inception_B(), + Reduction_B(), # Mixed_7a + Inception_C(), + Inception_C(), + Inception_C() + ) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.classifier = nn.Linear(1536, num_classes) + + def forward(self, x): + f = self.features(x) + v = self.global_avgpool(f) + v = v.view(v.size(0), -1) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError('Unsupported loss: {}'.format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def inceptionv4(num_classes, loss='softmax', pretrained=True, **kwargs): + model = InceptionV4(num_classes, loss, **kwargs) + if pretrained: + model_url = pretrained_settings['inceptionv4']['imagenet']['url'] + init_pretrained_weights(model, model_url) + return model diff --git a/feeder/trackers/strongsort/deep/models/mlfn.py b/feeder/trackers/strongsort/deep/models/mlfn.py new file mode 100644 index 0000000..ac7e126 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/mlfn.py @@ -0,0 +1,269 @@ +from __future__ import division, absolute_import +import torch +import torch.utils.model_zoo as model_zoo +from torch import nn +from torch.nn import functional as F + +__all__ = ['mlfn'] + +model_urls = { + # training epoch = 5, top1 = 51.6 + 'imagenet': + 'https://mega.nz/#!YHxAhaxC!yu9E6zWl0x5zscSouTdbZu8gdFFytDdl-RAdD2DEfpk', +} + + +class MLFNBlock(nn.Module): + + def __init__( + self, in_channels, out_channels, stride, fsm_channels, groups=32 + ): + super(MLFNBlock, self).__init__() + self.groups = groups + mid_channels = out_channels // 2 + + # Factor Modules + self.fm_conv1 = nn.Conv2d(in_channels, mid_channels, 1, bias=False) + self.fm_bn1 = nn.BatchNorm2d(mid_channels) + self.fm_conv2 = nn.Conv2d( + mid_channels, + mid_channels, + 3, + stride=stride, + padding=1, + bias=False, + groups=self.groups + ) + self.fm_bn2 = nn.BatchNorm2d(mid_channels) + self.fm_conv3 = nn.Conv2d(mid_channels, out_channels, 1, bias=False) + self.fm_bn3 = nn.BatchNorm2d(out_channels) + + # Factor Selection Module + self.fsm = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, fsm_channels[0], 1), + nn.BatchNorm2d(fsm_channels[0]), + nn.ReLU(inplace=True), + nn.Conv2d(fsm_channels[0], fsm_channels[1], 1), + nn.BatchNorm2d(fsm_channels[1]), + nn.ReLU(inplace=True), + nn.Conv2d(fsm_channels[1], self.groups, 1), + nn.BatchNorm2d(self.groups), + nn.Sigmoid(), + ) + + self.downsample = None + if in_channels != out_channels or stride > 1: + self.downsample = nn.Sequential( + nn.Conv2d( + in_channels, out_channels, 1, stride=stride, bias=False + ), + nn.BatchNorm2d(out_channels), + ) + + def forward(self, x): + residual = x + s = self.fsm(x) + + # reduce dimension + x = self.fm_conv1(x) + x = self.fm_bn1(x) + x = F.relu(x, inplace=True) + + # group convolution + x = self.fm_conv2(x) + x = self.fm_bn2(x) + x = F.relu(x, inplace=True) + + # factor selection + b, c = x.size(0), x.size(1) + n = c // self.groups + ss = s.repeat(1, n, 1, 1) # from (b, g, 1, 1) to (b, g*n=c, 1, 1) + ss = ss.view(b, n, self.groups, 1, 1) + ss = ss.permute(0, 2, 1, 3, 4).contiguous() + ss = ss.view(b, c, 1, 1) + x = ss * x + + # recover dimension + x = self.fm_conv3(x) + x = self.fm_bn3(x) + x = F.relu(x, inplace=True) + + if self.downsample is not None: + residual = self.downsample(residual) + + return F.relu(residual + x, inplace=True), s + + +class MLFN(nn.Module): + """Multi-Level Factorisation Net. + + Reference: + Chang et al. Multi-Level Factorisation Net for + Person Re-Identification. CVPR 2018. + + Public keys: + - ``mlfn``: MLFN (Multi-Level Factorisation Net). + """ + + def __init__( + self, + num_classes, + loss='softmax', + groups=32, + channels=[64, 256, 512, 1024, 2048], + embed_dim=1024, + **kwargs + ): + super(MLFN, self).__init__() + self.loss = loss + self.groups = groups + + # first convolutional layer + self.conv1 = nn.Conv2d(3, channels[0], 7, stride=2, padding=3) + self.bn1 = nn.BatchNorm2d(channels[0]) + self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) + + # main body + self.feature = nn.ModuleList( + [ + # layer 1-3 + MLFNBlock(channels[0], channels[1], 1, [128, 64], self.groups), + MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups), + MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups), + # layer 4-7 + MLFNBlock( + channels[1], channels[2], 2, [256, 128], self.groups + ), + MLFNBlock( + channels[2], channels[2], 1, [256, 128], self.groups + ), + MLFNBlock( + channels[2], channels[2], 1, [256, 128], self.groups + ), + MLFNBlock( + channels[2], channels[2], 1, [256, 128], self.groups + ), + # layer 8-13 + MLFNBlock( + channels[2], channels[3], 2, [512, 128], self.groups + ), + MLFNBlock( + channels[3], channels[3], 1, [512, 128], self.groups + ), + MLFNBlock( + channels[3], channels[3], 1, [512, 128], self.groups + ), + MLFNBlock( + channels[3], channels[3], 1, [512, 128], self.groups + ), + MLFNBlock( + channels[3], channels[3], 1, [512, 128], self.groups + ), + MLFNBlock( + channels[3], channels[3], 1, [512, 128], self.groups + ), + # layer 14-16 + MLFNBlock( + channels[3], channels[4], 2, [512, 128], self.groups + ), + MLFNBlock( + channels[4], channels[4], 1, [512, 128], self.groups + ), + MLFNBlock( + channels[4], channels[4], 1, [512, 128], self.groups + ), + ] + ) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + + # projection functions + self.fc_x = nn.Sequential( + nn.Conv2d(channels[4], embed_dim, 1, bias=False), + nn.BatchNorm2d(embed_dim), + nn.ReLU(inplace=True), + ) + self.fc_s = nn.Sequential( + nn.Conv2d(self.groups * 16, embed_dim, 1, bias=False), + nn.BatchNorm2d(embed_dim), + nn.ReLU(inplace=True), + ) + + self.classifier = nn.Linear(embed_dim, num_classes) + + self.init_params() + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu' + ) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = F.relu(x, inplace=True) + x = self.maxpool(x) + + s_hat = [] + for block in self.feature: + x, s = block(x) + s_hat.append(s) + s_hat = torch.cat(s_hat, 1) + + x = self.global_avgpool(x) + x = self.fc_x(x) + s_hat = self.fc_s(s_hat) + + v = (x+s_hat) * 0.5 + v = v.view(v.size(0), -1) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError('Unsupported loss: {}'.format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def mlfn(num_classes, loss='softmax', pretrained=True, **kwargs): + model = MLFN(num_classes, loss, **kwargs) + if pretrained: + # init_pretrained_weights(model, model_urls['imagenet']) + import warnings + warnings.warn( + 'The imagenet pretrained weights need to be manually downloaded from {}' + .format(model_urls['imagenet']) + ) + return model diff --git a/feeder/trackers/strongsort/deep/models/mobilenetv2.py b/feeder/trackers/strongsort/deep/models/mobilenetv2.py new file mode 100644 index 0000000..c451ef8 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/mobilenetv2.py @@ -0,0 +1,274 @@ +from __future__ import division, absolute_import +import torch.utils.model_zoo as model_zoo +from torch import nn +from torch.nn import functional as F + +__all__ = ['mobilenetv2_x1_0', 'mobilenetv2_x1_4'] + +model_urls = { + # 1.0: top-1 71.3 + 'mobilenetv2_x1_0': + 'https://mega.nz/#!NKp2wAIA!1NH1pbNzY_M2hVk_hdsxNM1NUOWvvGPHhaNr-fASF6c', + # 1.4: top-1 73.9 + 'mobilenetv2_x1_4': + 'https://mega.nz/#!RGhgEIwS!xN2s2ZdyqI6vQ3EwgmRXLEW3khr9tpXg96G9SUJugGk', +} + + +class ConvBlock(nn.Module): + """Basic convolutional block. + + convolution (bias discarded) + batch normalization + relu6. + + Args: + in_c (int): number of input channels. + out_c (int): number of output channels. + k (int or tuple): kernel size. + s (int or tuple): stride. + p (int or tuple): padding. + g (int): number of blocked connections from input channels + to output channels (default: 1). + """ + + def __init__(self, in_c, out_c, k, s=1, p=0, g=1): + super(ConvBlock, self).__init__() + self.conv = nn.Conv2d( + in_c, out_c, k, stride=s, padding=p, bias=False, groups=g + ) + self.bn = nn.BatchNorm2d(out_c) + + def forward(self, x): + return F.relu6(self.bn(self.conv(x))) + + +class Bottleneck(nn.Module): + + def __init__(self, in_channels, out_channels, expansion_factor, stride=1): + super(Bottleneck, self).__init__() + mid_channels = in_channels * expansion_factor + self.use_residual = stride == 1 and in_channels == out_channels + self.conv1 = ConvBlock(in_channels, mid_channels, 1) + self.dwconv2 = ConvBlock( + mid_channels, mid_channels, 3, stride, 1, g=mid_channels + ) + self.conv3 = nn.Sequential( + nn.Conv2d(mid_channels, out_channels, 1, bias=False), + nn.BatchNorm2d(out_channels), + ) + + def forward(self, x): + m = self.conv1(x) + m = self.dwconv2(m) + m = self.conv3(m) + if self.use_residual: + return x + m + else: + return m + + +class MobileNetV2(nn.Module): + """MobileNetV2. + + Reference: + Sandler et al. MobileNetV2: Inverted Residuals and + Linear Bottlenecks. CVPR 2018. + + Public keys: + - ``mobilenetv2_x1_0``: MobileNetV2 x1.0. + - ``mobilenetv2_x1_4``: MobileNetV2 x1.4. + """ + + def __init__( + self, + num_classes, + width_mult=1, + loss='softmax', + fc_dims=None, + dropout_p=None, + **kwargs + ): + super(MobileNetV2, self).__init__() + self.loss = loss + self.in_channels = int(32 * width_mult) + self.feature_dim = int(1280 * width_mult) if width_mult > 1 else 1280 + + # construct layers + self.conv1 = ConvBlock(3, self.in_channels, 3, s=2, p=1) + self.conv2 = self._make_layer( + Bottleneck, 1, int(16 * width_mult), 1, 1 + ) + self.conv3 = self._make_layer( + Bottleneck, 6, int(24 * width_mult), 2, 2 + ) + self.conv4 = self._make_layer( + Bottleneck, 6, int(32 * width_mult), 3, 2 + ) + self.conv5 = self._make_layer( + Bottleneck, 6, int(64 * width_mult), 4, 2 + ) + self.conv6 = self._make_layer( + Bottleneck, 6, int(96 * width_mult), 3, 1 + ) + self.conv7 = self._make_layer( + Bottleneck, 6, int(160 * width_mult), 3, 2 + ) + self.conv8 = self._make_layer( + Bottleneck, 6, int(320 * width_mult), 1, 1 + ) + self.conv9 = ConvBlock(self.in_channels, self.feature_dim, 1) + + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = self._construct_fc_layer( + fc_dims, self.feature_dim, dropout_p + ) + self.classifier = nn.Linear(self.feature_dim, num_classes) + + self._init_params() + + def _make_layer(self, block, t, c, n, s): + # t: expansion factor + # c: output channels + # n: number of blocks + # s: stride for first layer + layers = [] + layers.append(block(self.in_channels, c, t, s)) + self.in_channels = c + for i in range(1, n): + layers.append(block(self.in_channels, c, t)) + return nn.Sequential(*layers) + + def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): + """Constructs fully connected layer. + + Args: + fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed + input_dim (int): input dimension + dropout_p (float): dropout probability, if None, dropout is unused + """ + if fc_dims is None: + self.feature_dim = input_dim + return None + + assert isinstance( + fc_dims, (list, tuple) + ), 'fc_dims must be either list or tuple, but got {}'.format( + type(fc_dims) + ) + + layers = [] + for dim in fc_dims: + layers.append(nn.Linear(input_dim, dim)) + layers.append(nn.BatchNorm1d(dim)) + layers.append(nn.ReLU(inplace=True)) + if dropout_p is not None: + layers.append(nn.Dropout(p=dropout_p)) + input_dim = dim + + self.feature_dim = fc_dims[-1] + + return nn.Sequential(*layers) + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu' + ) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def featuremaps(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + x = self.conv4(x) + x = self.conv5(x) + x = self.conv6(x) + x = self.conv7(x) + x = self.conv8(x) + x = self.conv9(x) + return x + + def forward(self, x): + f = self.featuremaps(x) + v = self.global_avgpool(f) + v = v.view(v.size(0), -1) + + if self.fc is not None: + v = self.fc(v) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError("Unsupported loss: {}".format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def mobilenetv2_x1_0(num_classes, loss, pretrained=True, **kwargs): + model = MobileNetV2( + num_classes, + loss=loss, + width_mult=1, + fc_dims=None, + dropout_p=None, + **kwargs + ) + if pretrained: + # init_pretrained_weights(model, model_urls['mobilenetv2_x1_0']) + import warnings + warnings.warn( + 'The imagenet pretrained weights need to be manually downloaded from {}' + .format(model_urls['mobilenetv2_x1_0']) + ) + return model + + +def mobilenetv2_x1_4(num_classes, loss, pretrained=True, **kwargs): + model = MobileNetV2( + num_classes, + loss=loss, + width_mult=1.4, + fc_dims=None, + dropout_p=None, + **kwargs + ) + if pretrained: + # init_pretrained_weights(model, model_urls['mobilenetv2_x1_4']) + import warnings + warnings.warn( + 'The imagenet pretrained weights need to be manually downloaded from {}' + .format(model_urls['mobilenetv2_x1_4']) + ) + return model diff --git a/feeder/trackers/strongsort/deep/models/mudeep.py b/feeder/trackers/strongsort/deep/models/mudeep.py new file mode 100644 index 0000000..ddbca67 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/mudeep.py @@ -0,0 +1,206 @@ +from __future__ import division, absolute_import +import torch +from torch import nn +from torch.nn import functional as F + +__all__ = ['MuDeep'] + + +class ConvBlock(nn.Module): + """Basic convolutional block. + + convolution + batch normalization + relu. + + Args: + in_c (int): number of input channels. + out_c (int): number of output channels. + k (int or tuple): kernel size. + s (int or tuple): stride. + p (int or tuple): padding. + """ + + def __init__(self, in_c, out_c, k, s, p): + super(ConvBlock, self).__init__() + self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p) + self.bn = nn.BatchNorm2d(out_c) + + def forward(self, x): + return F.relu(self.bn(self.conv(x))) + + +class ConvLayers(nn.Module): + """Preprocessing layers.""" + + def __init__(self): + super(ConvLayers, self).__init__() + self.conv1 = ConvBlock(3, 48, k=3, s=1, p=1) + self.conv2 = ConvBlock(48, 96, k=3, s=1, p=1) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = self.maxpool(x) + return x + + +class MultiScaleA(nn.Module): + """Multi-scale stream layer A (Sec.3.1)""" + + def __init__(self): + super(MultiScaleA, self).__init__() + self.stream1 = nn.Sequential( + ConvBlock(96, 96, k=1, s=1, p=0), + ConvBlock(96, 24, k=3, s=1, p=1), + ) + self.stream2 = nn.Sequential( + nn.AvgPool2d(kernel_size=3, stride=1, padding=1), + ConvBlock(96, 24, k=1, s=1, p=0), + ) + self.stream3 = ConvBlock(96, 24, k=1, s=1, p=0) + self.stream4 = nn.Sequential( + ConvBlock(96, 16, k=1, s=1, p=0), + ConvBlock(16, 24, k=3, s=1, p=1), + ConvBlock(24, 24, k=3, s=1, p=1), + ) + + def forward(self, x): + s1 = self.stream1(x) + s2 = self.stream2(x) + s3 = self.stream3(x) + s4 = self.stream4(x) + y = torch.cat([s1, s2, s3, s4], dim=1) + return y + + +class Reduction(nn.Module): + """Reduction layer (Sec.3.1)""" + + def __init__(self): + super(Reduction, self).__init__() + self.stream1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.stream2 = ConvBlock(96, 96, k=3, s=2, p=1) + self.stream3 = nn.Sequential( + ConvBlock(96, 48, k=1, s=1, p=0), + ConvBlock(48, 56, k=3, s=1, p=1), + ConvBlock(56, 64, k=3, s=2, p=1), + ) + + def forward(self, x): + s1 = self.stream1(x) + s2 = self.stream2(x) + s3 = self.stream3(x) + y = torch.cat([s1, s2, s3], dim=1) + return y + + +class MultiScaleB(nn.Module): + """Multi-scale stream layer B (Sec.3.1)""" + + def __init__(self): + super(MultiScaleB, self).__init__() + self.stream1 = nn.Sequential( + nn.AvgPool2d(kernel_size=3, stride=1, padding=1), + ConvBlock(256, 256, k=1, s=1, p=0), + ) + self.stream2 = nn.Sequential( + ConvBlock(256, 64, k=1, s=1, p=0), + ConvBlock(64, 128, k=(1, 3), s=1, p=(0, 1)), + ConvBlock(128, 256, k=(3, 1), s=1, p=(1, 0)), + ) + self.stream3 = ConvBlock(256, 256, k=1, s=1, p=0) + self.stream4 = nn.Sequential( + ConvBlock(256, 64, k=1, s=1, p=0), + ConvBlock(64, 64, k=(1, 3), s=1, p=(0, 1)), + ConvBlock(64, 128, k=(3, 1), s=1, p=(1, 0)), + ConvBlock(128, 128, k=(1, 3), s=1, p=(0, 1)), + ConvBlock(128, 256, k=(3, 1), s=1, p=(1, 0)), + ) + + def forward(self, x): + s1 = self.stream1(x) + s2 = self.stream2(x) + s3 = self.stream3(x) + s4 = self.stream4(x) + return s1, s2, s3, s4 + + +class Fusion(nn.Module): + """Saliency-based learning fusion layer (Sec.3.2)""" + + def __init__(self): + super(Fusion, self).__init__() + self.a1 = nn.Parameter(torch.rand(1, 256, 1, 1)) + self.a2 = nn.Parameter(torch.rand(1, 256, 1, 1)) + self.a3 = nn.Parameter(torch.rand(1, 256, 1, 1)) + self.a4 = nn.Parameter(torch.rand(1, 256, 1, 1)) + + # We add an average pooling layer to reduce the spatial dimension + # of feature maps, which differs from the original paper. + self.avgpool = nn.AvgPool2d(kernel_size=4, stride=4, padding=0) + + def forward(self, x1, x2, x3, x4): + s1 = self.a1.expand_as(x1) * x1 + s2 = self.a2.expand_as(x2) * x2 + s3 = self.a3.expand_as(x3) * x3 + s4 = self.a4.expand_as(x4) * x4 + y = self.avgpool(s1 + s2 + s3 + s4) + return y + + +class MuDeep(nn.Module): + """Multiscale deep neural network. + + Reference: + Qian et al. Multi-scale Deep Learning Architectures + for Person Re-identification. ICCV 2017. + + Public keys: + - ``mudeep``: Multiscale deep neural network. + """ + + def __init__(self, num_classes, loss='softmax', **kwargs): + super(MuDeep, self).__init__() + self.loss = loss + + self.block1 = ConvLayers() + self.block2 = MultiScaleA() + self.block3 = Reduction() + self.block4 = MultiScaleB() + self.block5 = Fusion() + + # Due to this fully connected layer, input image has to be fixed + # in shape, i.e. (3, 256, 128), such that the last convolutional feature + # maps are of shape (256, 16, 8). If input shape is changed, + # the input dimension of this layer has to be changed accordingly. + self.fc = nn.Sequential( + nn.Linear(256 * 16 * 8, 4096), + nn.BatchNorm1d(4096), + nn.ReLU(), + ) + self.classifier = nn.Linear(4096, num_classes) + self.feat_dim = 4096 + + def featuremaps(self, x): + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + x = self.block4(x) + x = self.block5(*x) + return x + + def forward(self, x): + x = self.featuremaps(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + y = self.classifier(x) + + if not self.training: + return x + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, x + else: + raise KeyError('Unsupported loss: {}'.format(self.loss)) diff --git a/feeder/trackers/strongsort/deep/models/nasnet.py b/feeder/trackers/strongsort/deep/models/nasnet.py new file mode 100644 index 0000000..b1f31de --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/nasnet.py @@ -0,0 +1,1131 @@ +from __future__ import division, absolute_import +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as model_zoo + +__all__ = ['nasnetamobile'] +""" +NASNet Mobile +Thanks to Anastasiia (https://github.com/DagnyT) for the great help, support and motivation! + + +------------------------------------------------------------------------------------ + Architecture | Top-1 Acc | Top-5 Acc | Multiply-Adds | Params (M) +------------------------------------------------------------------------------------ +| NASNet-A (4 @ 1056) | 74.08% | 91.74% | 564 M | 5.3 | +------------------------------------------------------------------------------------ +# References: + - [Learning Transferable Architectures for Scalable Image Recognition] + (https://arxiv.org/abs/1707.07012) +""" +""" +Code imported from https://github.com/Cadene/pretrained-models.pytorch +""" + +pretrained_settings = { + 'nasnetamobile': { + 'imagenet': { + # 'url': 'https://github.com/veronikayurchuk/pretrained-models.pytorch/releases/download/v1.0/nasnetmobile-7e03cead.pth.tar', + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetamobile-7e03cead.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], # resize 256 + 'input_range': [0, 1], + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'num_classes': 1000 + }, + # 'imagenet+background': { + # # 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth', + # 'input_space': 'RGB', + # 'input_size': [3, 224, 224], # resize 256 + # 'input_range': [0, 1], + # 'mean': [0.5, 0.5, 0.5], + # 'std': [0.5, 0.5, 0.5], + # 'num_classes': 1001 + # } + } +} + + +class MaxPoolPad(nn.Module): + + def __init__(self): + super(MaxPoolPad, self).__init__() + self.pad = nn.ZeroPad2d((1, 0, 1, 0)) + self.pool = nn.MaxPool2d(3, stride=2, padding=1) + + def forward(self, x): + x = self.pad(x) + x = self.pool(x) + x = x[:, :, 1:, 1:].contiguous() + return x + + +class AvgPoolPad(nn.Module): + + def __init__(self, stride=2, padding=1): + super(AvgPoolPad, self).__init__() + self.pad = nn.ZeroPad2d((1, 0, 1, 0)) + self.pool = nn.AvgPool2d( + 3, stride=stride, padding=padding, count_include_pad=False + ) + + def forward(self, x): + x = self.pad(x) + x = self.pool(x) + x = x[:, :, 1:, 1:].contiguous() + return x + + +class SeparableConv2d(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + dw_kernel, + dw_stride, + dw_padding, + bias=False + ): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = nn.Conv2d( + in_channels, + in_channels, + dw_kernel, + stride=dw_stride, + padding=dw_padding, + bias=bias, + groups=in_channels + ) + self.pointwise_conv2d = nn.Conv2d( + in_channels, out_channels, 1, stride=1, bias=bias + ) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + name=None, + bias=False + ): + super(BranchSeparables, self).__init__() + self.relu = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, in_channels, kernel_size, stride, padding, bias=bias + ) + self.bn_sep_1 = nn.BatchNorm2d( + in_channels, eps=0.001, momentum=0.1, affine=True + ) + self.relu1 = nn.ReLU() + self.separable_2 = SeparableConv2d( + in_channels, out_channels, kernel_size, 1, padding, bias=bias + ) + self.bn_sep_2 = nn.BatchNorm2d( + out_channels, eps=0.001, momentum=0.1, affine=True + ) + self.name = name + + def forward(self, x): + x = self.relu(x) + if self.name == 'specific': + x = nn.ZeroPad2d((1, 0, 1, 0))(x) + x = self.separable_1(x) + if self.name == 'specific': + x = x[:, :, 1:, 1:].contiguous() + + x = self.bn_sep_1(x) + x = self.relu1(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class BranchSeparablesStem(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + bias=False + ): + super(BranchSeparablesStem, self).__init__() + self.relu = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, out_channels, kernel_size, stride, padding, bias=bias + ) + self.bn_sep_1 = nn.BatchNorm2d( + out_channels, eps=0.001, momentum=0.1, affine=True + ) + self.relu1 = nn.ReLU() + self.separable_2 = SeparableConv2d( + out_channels, out_channels, kernel_size, 1, padding, bias=bias + ) + self.bn_sep_2 = nn.BatchNorm2d( + out_channels, eps=0.001, momentum=0.1, affine=True + ) + + def forward(self, x): + x = self.relu(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.relu1(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class BranchSeparablesReduction(BranchSeparables): + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + z_padding=1, + bias=False + ): + BranchSeparables.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, bias + ) + self.padding = nn.ZeroPad2d((z_padding, 0, z_padding, 0)) + + def forward(self, x): + x = self.relu(x) + x = self.padding(x) + x = self.separable_1(x) + x = x[:, :, 1:, 1:].contiguous() + x = self.bn_sep_1(x) + x = self.relu1(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class CellStem0(nn.Module): + + def __init__(self, stem_filters, num_filters=42): + super(CellStem0, self).__init__() + self.num_filters = num_filters + self.stem_filters = stem_filters + self.conv_1x1 = nn.Sequential() + self.conv_1x1.add_module('relu', nn.ReLU()) + self.conv_1x1.add_module( + 'conv', + nn.Conv2d( + self.stem_filters, self.num_filters, 1, stride=1, bias=False + ) + ) + self.conv_1x1.add_module( + 'bn', + nn.BatchNorm2d( + self.num_filters, eps=0.001, momentum=0.1, affine=True + ) + ) + + self.comb_iter_0_left = BranchSeparables( + self.num_filters, self.num_filters, 5, 2, 2 + ) + self.comb_iter_0_right = BranchSeparablesStem( + self.stem_filters, self.num_filters, 7, 2, 3, bias=False + ) + + self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1) + self.comb_iter_1_right = BranchSeparablesStem( + self.stem_filters, self.num_filters, 7, 2, 3, bias=False + ) + + self.comb_iter_2_left = nn.AvgPool2d( + 3, stride=2, padding=1, count_include_pad=False + ) + self.comb_iter_2_right = BranchSeparablesStem( + self.stem_filters, self.num_filters, 5, 2, 2, bias=False + ) + + self.comb_iter_3_right = nn.AvgPool2d( + 3, stride=1, padding=1, count_include_pad=False + ) + + self.comb_iter_4_left = BranchSeparables( + self.num_filters, self.num_filters, 3, 1, 1, bias=False + ) + self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1) + + def forward(self, x): + x1 = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x1) + x_comb_iter_0_right = self.comb_iter_0_right(x) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x1) + x_comb_iter_1_right = self.comb_iter_1_right(x) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x1) + x_comb_iter_2_right = self.comb_iter_2_right(x) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x1) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat( + [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 + ) + return x_out + + +class CellStem1(nn.Module): + + def __init__(self, stem_filters, num_filters): + super(CellStem1, self).__init__() + self.num_filters = num_filters + self.stem_filters = stem_filters + self.conv_1x1 = nn.Sequential() + self.conv_1x1.add_module('relu', nn.ReLU()) + self.conv_1x1.add_module( + 'conv', + nn.Conv2d( + 2 * self.num_filters, + self.num_filters, + 1, + stride=1, + bias=False + ) + ) + self.conv_1x1.add_module( + 'bn', + nn.BatchNorm2d( + self.num_filters, eps=0.001, momentum=0.1, affine=True + ) + ) + + self.relu = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module( + 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) + ) + self.path_1.add_module( + 'conv', + nn.Conv2d( + self.stem_filters, + self.num_filters // 2, + 1, + stride=1, + bias=False + ) + ) + self.path_2 = nn.ModuleList() + self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1))) + self.path_2.add_module( + 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) + ) + self.path_2.add_module( + 'conv', + nn.Conv2d( + self.stem_filters, + self.num_filters // 2, + 1, + stride=1, + bias=False + ) + ) + + self.final_path_bn = nn.BatchNorm2d( + self.num_filters, eps=0.001, momentum=0.1, affine=True + ) + + self.comb_iter_0_left = BranchSeparables( + self.num_filters, + self.num_filters, + 5, + 2, + 2, + name='specific', + bias=False + ) + self.comb_iter_0_right = BranchSeparables( + self.num_filters, + self.num_filters, + 7, + 2, + 3, + name='specific', + bias=False + ) + + # self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1) + self.comb_iter_1_left = MaxPoolPad() + self.comb_iter_1_right = BranchSeparables( + self.num_filters, + self.num_filters, + 7, + 2, + 3, + name='specific', + bias=False + ) + + # self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False) + self.comb_iter_2_left = AvgPoolPad() + self.comb_iter_2_right = BranchSeparables( + self.num_filters, + self.num_filters, + 5, + 2, + 2, + name='specific', + bias=False + ) + + self.comb_iter_3_right = nn.AvgPool2d( + 3, stride=1, padding=1, count_include_pad=False + ) + + self.comb_iter_4_left = BranchSeparables( + self.num_filters, + self.num_filters, + 3, + 1, + 1, + name='specific', + bias=False + ) + # self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1) + self.comb_iter_4_right = MaxPoolPad() + + def forward(self, x_conv0, x_stem_0): + x_left = self.conv_1x1(x_stem_0) + + x_relu = self.relu(x_conv0) + # path 1 + x_path1 = self.path_1(x_relu) + # path 2 + x_path2 = self.path_2.pad(x_relu) + x_path2 = x_path2[:, :, 1:, 1:] + x_path2 = self.path_2.avgpool(x_path2) + x_path2 = self.path_2.conv(x_path2) + # final path + x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_right) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_left) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_left) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat( + [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 + ) + return x_out + + +class FirstCell(nn.Module): + + def __init__( + self, in_channels_left, out_channels_left, in_channels_right, + out_channels_right + ): + super(FirstCell, self).__init__() + self.conv_1x1 = nn.Sequential() + self.conv_1x1.add_module('relu', nn.ReLU()) + self.conv_1x1.add_module( + 'conv', + nn.Conv2d( + in_channels_right, out_channels_right, 1, stride=1, bias=False + ) + ) + self.conv_1x1.add_module( + 'bn', + nn.BatchNorm2d( + out_channels_right, eps=0.001, momentum=0.1, affine=True + ) + ) + + self.relu = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module( + 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) + ) + self.path_1.add_module( + 'conv', + nn.Conv2d( + in_channels_left, out_channels_left, 1, stride=1, bias=False + ) + ) + self.path_2 = nn.ModuleList() + self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1))) + self.path_2.add_module( + 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) + ) + self.path_2.add_module( + 'conv', + nn.Conv2d( + in_channels_left, out_channels_left, 1, stride=1, bias=False + ) + ) + + self.final_path_bn = nn.BatchNorm2d( + out_channels_left * 2, eps=0.001, momentum=0.1, affine=True + ) + + self.comb_iter_0_left = BranchSeparables( + out_channels_right, out_channels_right, 5, 1, 2, bias=False + ) + self.comb_iter_0_right = BranchSeparables( + out_channels_right, out_channels_right, 3, 1, 1, bias=False + ) + + self.comb_iter_1_left = BranchSeparables( + out_channels_right, out_channels_right, 5, 1, 2, bias=False + ) + self.comb_iter_1_right = BranchSeparables( + out_channels_right, out_channels_right, 3, 1, 1, bias=False + ) + + self.comb_iter_2_left = nn.AvgPool2d( + 3, stride=1, padding=1, count_include_pad=False + ) + + self.comb_iter_3_left = nn.AvgPool2d( + 3, stride=1, padding=1, count_include_pad=False + ) + self.comb_iter_3_right = nn.AvgPool2d( + 3, stride=1, padding=1, count_include_pad=False + ) + + self.comb_iter_4_left = BranchSeparables( + out_channels_right, out_channels_right, 3, 1, 1, bias=False + ) + + def forward(self, x, x_prev): + x_relu = self.relu(x_prev) + # path 1 + x_path1 = self.path_1(x_relu) + # path 2 + x_path2 = self.path_2.pad(x_relu) + x_path2 = x_path2[:, :, 1:, 1:] + x_path2 = self.path_2.avgpool(x_path2) + x_path2 = self.path_2.conv(x_path2) + # final path + x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat( + [ + x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, + x_comb_iter_3, x_comb_iter_4 + ], 1 + ) + return x_out + + +class NormalCell(nn.Module): + + def __init__( + self, in_channels_left, out_channels_left, in_channels_right, + out_channels_right + ): + super(NormalCell, self).__init__() + self.conv_prev_1x1 = nn.Sequential() + self.conv_prev_1x1.add_module('relu', nn.ReLU()) + self.conv_prev_1x1.add_module( + 'conv', + nn.Conv2d( + in_channels_left, out_channels_left, 1, stride=1, bias=False + ) + ) + self.conv_prev_1x1.add_module( + 'bn', + nn.BatchNorm2d( + out_channels_left, eps=0.001, momentum=0.1, affine=True + ) + ) + + self.conv_1x1 = nn.Sequential() + self.conv_1x1.add_module('relu', nn.ReLU()) + self.conv_1x1.add_module( + 'conv', + nn.Conv2d( + in_channels_right, out_channels_right, 1, stride=1, bias=False + ) + ) + self.conv_1x1.add_module( + 'bn', + nn.BatchNorm2d( + out_channels_right, eps=0.001, momentum=0.1, affine=True + ) + ) + + self.comb_iter_0_left = BranchSeparables( + out_channels_right, out_channels_right, 5, 1, 2, bias=False + ) + self.comb_iter_0_right = BranchSeparables( + out_channels_left, out_channels_left, 3, 1, 1, bias=False + ) + + self.comb_iter_1_left = BranchSeparables( + out_channels_left, out_channels_left, 5, 1, 2, bias=False + ) + self.comb_iter_1_right = BranchSeparables( + out_channels_left, out_channels_left, 3, 1, 1, bias=False + ) + + self.comb_iter_2_left = nn.AvgPool2d( + 3, stride=1, padding=1, count_include_pad=False + ) + + self.comb_iter_3_left = nn.AvgPool2d( + 3, stride=1, padding=1, count_include_pad=False + ) + self.comb_iter_3_right = nn.AvgPool2d( + 3, stride=1, padding=1, count_include_pad=False + ) + + self.comb_iter_4_left = BranchSeparables( + out_channels_right, out_channels_right, 3, 1, 1, bias=False + ) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat( + [ + x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, + x_comb_iter_3, x_comb_iter_4 + ], 1 + ) + return x_out + + +class ReductionCell0(nn.Module): + + def __init__( + self, in_channels_left, out_channels_left, in_channels_right, + out_channels_right + ): + super(ReductionCell0, self).__init__() + self.conv_prev_1x1 = nn.Sequential() + self.conv_prev_1x1.add_module('relu', nn.ReLU()) + self.conv_prev_1x1.add_module( + 'conv', + nn.Conv2d( + in_channels_left, out_channels_left, 1, stride=1, bias=False + ) + ) + self.conv_prev_1x1.add_module( + 'bn', + nn.BatchNorm2d( + out_channels_left, eps=0.001, momentum=0.1, affine=True + ) + ) + + self.conv_1x1 = nn.Sequential() + self.conv_1x1.add_module('relu', nn.ReLU()) + self.conv_1x1.add_module( + 'conv', + nn.Conv2d( + in_channels_right, out_channels_right, 1, stride=1, bias=False + ) + ) + self.conv_1x1.add_module( + 'bn', + nn.BatchNorm2d( + out_channels_right, eps=0.001, momentum=0.1, affine=True + ) + ) + + self.comb_iter_0_left = BranchSeparablesReduction( + out_channels_right, out_channels_right, 5, 2, 2, bias=False + ) + self.comb_iter_0_right = BranchSeparablesReduction( + out_channels_right, out_channels_right, 7, 2, 3, bias=False + ) + + self.comb_iter_1_left = MaxPoolPad() + self.comb_iter_1_right = BranchSeparablesReduction( + out_channels_right, out_channels_right, 7, 2, 3, bias=False + ) + + self.comb_iter_2_left = AvgPoolPad() + self.comb_iter_2_right = BranchSeparablesReduction( + out_channels_right, out_channels_right, 5, 2, 2, bias=False + ) + + self.comb_iter_3_right = nn.AvgPool2d( + 3, stride=1, padding=1, count_include_pad=False + ) + + self.comb_iter_4_left = BranchSeparablesReduction( + out_channels_right, out_channels_right, 3, 1, 1, bias=False + ) + self.comb_iter_4_right = MaxPoolPad() + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat( + [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 + ) + return x_out + + +class ReductionCell1(nn.Module): + + def __init__( + self, in_channels_left, out_channels_left, in_channels_right, + out_channels_right + ): + super(ReductionCell1, self).__init__() + self.conv_prev_1x1 = nn.Sequential() + self.conv_prev_1x1.add_module('relu', nn.ReLU()) + self.conv_prev_1x1.add_module( + 'conv', + nn.Conv2d( + in_channels_left, out_channels_left, 1, stride=1, bias=False + ) + ) + self.conv_prev_1x1.add_module( + 'bn', + nn.BatchNorm2d( + out_channels_left, eps=0.001, momentum=0.1, affine=True + ) + ) + + self.conv_1x1 = nn.Sequential() + self.conv_1x1.add_module('relu', nn.ReLU()) + self.conv_1x1.add_module( + 'conv', + nn.Conv2d( + in_channels_right, out_channels_right, 1, stride=1, bias=False + ) + ) + self.conv_1x1.add_module( + 'bn', + nn.BatchNorm2d( + out_channels_right, eps=0.001, momentum=0.1, affine=True + ) + ) + + self.comb_iter_0_left = BranchSeparables( + out_channels_right, + out_channels_right, + 5, + 2, + 2, + name='specific', + bias=False + ) + self.comb_iter_0_right = BranchSeparables( + out_channels_right, + out_channels_right, + 7, + 2, + 3, + name='specific', + bias=False + ) + + # self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1) + self.comb_iter_1_left = MaxPoolPad() + self.comb_iter_1_right = BranchSeparables( + out_channels_right, + out_channels_right, + 7, + 2, + 3, + name='specific', + bias=False + ) + + # self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False) + self.comb_iter_2_left = AvgPoolPad() + self.comb_iter_2_right = BranchSeparables( + out_channels_right, + out_channels_right, + 5, + 2, + 2, + name='specific', + bias=False + ) + + self.comb_iter_3_right = nn.AvgPool2d( + 3, stride=1, padding=1, count_include_pad=False + ) + + self.comb_iter_4_left = BranchSeparables( + out_channels_right, + out_channels_right, + 3, + 1, + 1, + name='specific', + bias=False + ) + # self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1) + self.comb_iter_4_right = MaxPoolPad() + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat( + [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 + ) + return x_out + + +class NASNetAMobile(nn.Module): + """Neural Architecture Search (NAS). + + Reference: + Zoph et al. Learning Transferable Architectures + for Scalable Image Recognition. CVPR 2018. + + Public keys: + - ``nasnetamobile``: NASNet-A Mobile. + """ + + def __init__( + self, + num_classes, + loss, + stem_filters=32, + penultimate_filters=1056, + filters_multiplier=2, + **kwargs + ): + super(NASNetAMobile, self).__init__() + self.stem_filters = stem_filters + self.penultimate_filters = penultimate_filters + self.filters_multiplier = filters_multiplier + self.loss = loss + + filters = self.penultimate_filters // 24 + # 24 is default value for the architecture + + self.conv0 = nn.Sequential() + self.conv0.add_module( + 'conv', + nn.Conv2d( + in_channels=3, + out_channels=self.stem_filters, + kernel_size=3, + padding=0, + stride=2, + bias=False + ) + ) + self.conv0.add_module( + 'bn', + nn.BatchNorm2d( + self.stem_filters, eps=0.001, momentum=0.1, affine=True + ) + ) + + self.cell_stem_0 = CellStem0( + self.stem_filters, num_filters=filters // (filters_multiplier**2) + ) + self.cell_stem_1 = CellStem1( + self.stem_filters, num_filters=filters // filters_multiplier + ) + + self.cell_0 = FirstCell( + in_channels_left=filters, + out_channels_left=filters // 2, # 1, 0.5 + in_channels_right=2 * filters, + out_channels_right=filters + ) # 2, 1 + self.cell_1 = NormalCell( + in_channels_left=2 * filters, + out_channels_left=filters, # 2, 1 + in_channels_right=6 * filters, + out_channels_right=filters + ) # 6, 1 + self.cell_2 = NormalCell( + in_channels_left=6 * filters, + out_channels_left=filters, # 6, 1 + in_channels_right=6 * filters, + out_channels_right=filters + ) # 6, 1 + self.cell_3 = NormalCell( + in_channels_left=6 * filters, + out_channels_left=filters, # 6, 1 + in_channels_right=6 * filters, + out_channels_right=filters + ) # 6, 1 + + self.reduction_cell_0 = ReductionCell0( + in_channels_left=6 * filters, + out_channels_left=2 * filters, # 6, 2 + in_channels_right=6 * filters, + out_channels_right=2 * filters + ) # 6, 2 + + self.cell_6 = FirstCell( + in_channels_left=6 * filters, + out_channels_left=filters, # 6, 1 + in_channels_right=8 * filters, + out_channels_right=2 * filters + ) # 8, 2 + self.cell_7 = NormalCell( + in_channels_left=8 * filters, + out_channels_left=2 * filters, # 8, 2 + in_channels_right=12 * filters, + out_channels_right=2 * filters + ) # 12, 2 + self.cell_8 = NormalCell( + in_channels_left=12 * filters, + out_channels_left=2 * filters, # 12, 2 + in_channels_right=12 * filters, + out_channels_right=2 * filters + ) # 12, 2 + self.cell_9 = NormalCell( + in_channels_left=12 * filters, + out_channels_left=2 * filters, # 12, 2 + in_channels_right=12 * filters, + out_channels_right=2 * filters + ) # 12, 2 + + self.reduction_cell_1 = ReductionCell1( + in_channels_left=12 * filters, + out_channels_left=4 * filters, # 12, 4 + in_channels_right=12 * filters, + out_channels_right=4 * filters + ) # 12, 4 + + self.cell_12 = FirstCell( + in_channels_left=12 * filters, + out_channels_left=2 * filters, # 12, 2 + in_channels_right=16 * filters, + out_channels_right=4 * filters + ) # 16, 4 + self.cell_13 = NormalCell( + in_channels_left=16 * filters, + out_channels_left=4 * filters, # 16, 4 + in_channels_right=24 * filters, + out_channels_right=4 * filters + ) # 24, 4 + self.cell_14 = NormalCell( + in_channels_left=24 * filters, + out_channels_left=4 * filters, # 24, 4 + in_channels_right=24 * filters, + out_channels_right=4 * filters + ) # 24, 4 + self.cell_15 = NormalCell( + in_channels_left=24 * filters, + out_channels_left=4 * filters, # 24, 4 + in_channels_right=24 * filters, + out_channels_right=4 * filters + ) # 24, 4 + + self.relu = nn.ReLU() + self.dropout = nn.Dropout() + self.classifier = nn.Linear(24 * filters, num_classes) + + self._init_params() + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu' + ) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def features(self, input): + x_conv0 = self.conv0(input) + x_stem_0 = self.cell_stem_0(x_conv0) + x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) + + x_cell_0 = self.cell_0(x_stem_1, x_stem_0) + x_cell_1 = self.cell_1(x_cell_0, x_stem_1) + x_cell_2 = self.cell_2(x_cell_1, x_cell_0) + x_cell_3 = self.cell_3(x_cell_2, x_cell_1) + + x_reduction_cell_0 = self.reduction_cell_0(x_cell_3, x_cell_2) + + x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_3) + x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) + x_cell_8 = self.cell_8(x_cell_7, x_cell_6) + x_cell_9 = self.cell_9(x_cell_8, x_cell_7) + + x_reduction_cell_1 = self.reduction_cell_1(x_cell_9, x_cell_8) + + x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_9) + x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) + x_cell_14 = self.cell_14(x_cell_13, x_cell_12) + x_cell_15 = self.cell_15(x_cell_14, x_cell_13) + + x_cell_15 = self.relu(x_cell_15) + x_cell_15 = F.avg_pool2d( + x_cell_15, + x_cell_15.size()[2:] + ) # global average pool + x_cell_15 = x_cell_15.view(x_cell_15.size(0), -1) + x_cell_15 = self.dropout(x_cell_15) + + return x_cell_15 + + def forward(self, input): + v = self.features(input) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError('Unsupported loss: {}'.format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def nasnetamobile(num_classes, loss='softmax', pretrained=True, **kwargs): + model = NASNetAMobile(num_classes, loss, **kwargs) + if pretrained: + model_url = pretrained_settings['nasnetamobile']['imagenet']['url'] + init_pretrained_weights(model, model_url) + return model diff --git a/feeder/trackers/strongsort/deep/models/osnet.py b/feeder/trackers/strongsort/deep/models/osnet.py new file mode 100644 index 0000000..b77388f --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/osnet.py @@ -0,0 +1,598 @@ +from __future__ import division, absolute_import +import warnings +import torch +from torch import nn +from torch.nn import functional as F + +__all__ = [ + 'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0' +] + +pretrained_urls = { + 'osnet_x1_0': + 'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY', + 'osnet_x0_75': + 'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq', + 'osnet_x0_5': + 'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i', + 'osnet_x0_25': + 'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs', + 'osnet_ibn_x1_0': + 'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l' +} + + +########## +# Basic layers +########## +class ConvLayer(nn.Module): + """Convolution layer (conv + bn + relu).""" + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + groups=1, + IN=False + ): + super(ConvLayer, self).__init__() + self.conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + bias=False, + groups=groups + ) + if IN: + self.bn = nn.InstanceNorm2d(out_channels, affine=True) + else: + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Conv1x1(nn.Module): + """1x1 convolution + bn + relu.""" + + def __init__(self, in_channels, out_channels, stride=1, groups=1): + super(Conv1x1, self).__init__() + self.conv = nn.Conv2d( + in_channels, + out_channels, + 1, + stride=stride, + padding=0, + bias=False, + groups=groups + ) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Conv1x1Linear(nn.Module): + """1x1 convolution + bn (w/o non-linearity).""" + + def __init__(self, in_channels, out_channels, stride=1): + super(Conv1x1Linear, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, 1, stride=stride, padding=0, bias=False + ) + self.bn = nn.BatchNorm2d(out_channels) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class Conv3x3(nn.Module): + """3x3 convolution + bn + relu.""" + + def __init__(self, in_channels, out_channels, stride=1, groups=1): + super(Conv3x3, self).__init__() + self.conv = nn.Conv2d( + in_channels, + out_channels, + 3, + stride=stride, + padding=1, + bias=False, + groups=groups + ) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class LightConv3x3(nn.Module): + """Lightweight 3x3 convolution. + + 1x1 (linear) + dw 3x3 (nonlinear). + """ + + def __init__(self, in_channels, out_channels): + super(LightConv3x3, self).__init__() + self.conv1 = nn.Conv2d( + in_channels, out_channels, 1, stride=1, padding=0, bias=False + ) + self.conv2 = nn.Conv2d( + out_channels, + out_channels, + 3, + stride=1, + padding=1, + bias=False, + groups=out_channels + ) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = self.bn(x) + x = self.relu(x) + return x + + +########## +# Building blocks for omni-scale feature learning +########## +class ChannelGate(nn.Module): + """A mini-network that generates channel-wise gates conditioned on input tensor.""" + + def __init__( + self, + in_channels, + num_gates=None, + return_gates=False, + gate_activation='sigmoid', + reduction=16, + layer_norm=False + ): + super(ChannelGate, self).__init__() + if num_gates is None: + num_gates = in_channels + self.return_gates = return_gates + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.fc1 = nn.Conv2d( + in_channels, + in_channels // reduction, + kernel_size=1, + bias=True, + padding=0 + ) + self.norm1 = None + if layer_norm: + self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1)) + self.relu = nn.ReLU(inplace=True) + self.fc2 = nn.Conv2d( + in_channels // reduction, + num_gates, + kernel_size=1, + bias=True, + padding=0 + ) + if gate_activation == 'sigmoid': + self.gate_activation = nn.Sigmoid() + elif gate_activation == 'relu': + self.gate_activation = nn.ReLU(inplace=True) + elif gate_activation == 'linear': + self.gate_activation = None + else: + raise RuntimeError( + "Unknown gate activation: {}".format(gate_activation) + ) + + def forward(self, x): + input = x + x = self.global_avgpool(x) + x = self.fc1(x) + if self.norm1 is not None: + x = self.norm1(x) + x = self.relu(x) + x = self.fc2(x) + if self.gate_activation is not None: + x = self.gate_activation(x) + if self.return_gates: + return x + return input * x + + +class OSBlock(nn.Module): + """Omni-scale feature learning block.""" + + def __init__( + self, + in_channels, + out_channels, + IN=False, + bottleneck_reduction=4, + **kwargs + ): + super(OSBlock, self).__init__() + mid_channels = out_channels // bottleneck_reduction + self.conv1 = Conv1x1(in_channels, mid_channels) + self.conv2a = LightConv3x3(mid_channels, mid_channels) + self.conv2b = nn.Sequential( + LightConv3x3(mid_channels, mid_channels), + LightConv3x3(mid_channels, mid_channels), + ) + self.conv2c = nn.Sequential( + LightConv3x3(mid_channels, mid_channels), + LightConv3x3(mid_channels, mid_channels), + LightConv3x3(mid_channels, mid_channels), + ) + self.conv2d = nn.Sequential( + LightConv3x3(mid_channels, mid_channels), + LightConv3x3(mid_channels, mid_channels), + LightConv3x3(mid_channels, mid_channels), + LightConv3x3(mid_channels, mid_channels), + ) + self.gate = ChannelGate(mid_channels) + self.conv3 = Conv1x1Linear(mid_channels, out_channels) + self.downsample = None + if in_channels != out_channels: + self.downsample = Conv1x1Linear(in_channels, out_channels) + self.IN = None + if IN: + self.IN = nn.InstanceNorm2d(out_channels, affine=True) + + def forward(self, x): + identity = x + x1 = self.conv1(x) + x2a = self.conv2a(x1) + x2b = self.conv2b(x1) + x2c = self.conv2c(x1) + x2d = self.conv2d(x1) + x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d) + x3 = self.conv3(x2) + if self.downsample is not None: + identity = self.downsample(identity) + out = x3 + identity + if self.IN is not None: + out = self.IN(out) + return F.relu(out) + + +########## +# Network architecture +########## +class OSNet(nn.Module): + """Omni-Scale Network. + + Reference: + - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019. + - Zhou et al. Learning Generalisable Omni-Scale Representations + for Person Re-Identification. TPAMI, 2021. + """ + + def __init__( + self, + num_classes, + blocks, + layers, + channels, + feature_dim=512, + loss='softmax', + IN=False, + **kwargs + ): + super(OSNet, self).__init__() + num_blocks = len(blocks) + assert num_blocks == len(layers) + assert num_blocks == len(channels) - 1 + self.loss = loss + self.feature_dim = feature_dim + + # convolutional backbone + self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN) + self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) + self.conv2 = self._make_layer( + blocks[0], + layers[0], + channels[0], + channels[1], + reduce_spatial_size=True, + IN=IN + ) + self.conv3 = self._make_layer( + blocks[1], + layers[1], + channels[1], + channels[2], + reduce_spatial_size=True + ) + self.conv4 = self._make_layer( + blocks[2], + layers[2], + channels[2], + channels[3], + reduce_spatial_size=False + ) + self.conv5 = Conv1x1(channels[3], channels[3]) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + # fully connected layer + self.fc = self._construct_fc_layer( + self.feature_dim, channels[3], dropout_p=None + ) + # identity classification layer + self.classifier = nn.Linear(self.feature_dim, num_classes) + + self._init_params() + + def _make_layer( + self, + block, + layer, + in_channels, + out_channels, + reduce_spatial_size, + IN=False + ): + layers = [] + + layers.append(block(in_channels, out_channels, IN=IN)) + for i in range(1, layer): + layers.append(block(out_channels, out_channels, IN=IN)) + + if reduce_spatial_size: + layers.append( + nn.Sequential( + Conv1x1(out_channels, out_channels), + nn.AvgPool2d(2, stride=2) + ) + ) + + return nn.Sequential(*layers) + + def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): + if fc_dims is None or fc_dims < 0: + self.feature_dim = input_dim + return None + + if isinstance(fc_dims, int): + fc_dims = [fc_dims] + + layers = [] + for dim in fc_dims: + layers.append(nn.Linear(input_dim, dim)) + layers.append(nn.BatchNorm1d(dim)) + layers.append(nn.ReLU(inplace=True)) + if dropout_p is not None: + layers.append(nn.Dropout(p=dropout_p)) + input_dim = dim + + self.feature_dim = fc_dims[-1] + + return nn.Sequential(*layers) + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu' + ) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def featuremaps(self, x): + x = self.conv1(x) + x = self.maxpool(x) + x = self.conv2(x) + x = self.conv3(x) + x = self.conv4(x) + x = self.conv5(x) + return x + + def forward(self, x, return_featuremaps=False): + x = self.featuremaps(x) + if return_featuremaps: + return x + v = self.global_avgpool(x) + v = v.view(v.size(0), -1) + if self.fc is not None: + v = self.fc(v) + if not self.training: + return v + y = self.classifier(v) + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError("Unsupported loss: {}".format(self.loss)) + + +def init_pretrained_weights(model, key=''): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + import os + import errno + import gdown + from collections import OrderedDict + + def _get_torch_home(): + ENV_TORCH_HOME = 'TORCH_HOME' + ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' + DEFAULT_CACHE_DIR = '~/.cache' + torch_home = os.path.expanduser( + os.getenv( + ENV_TORCH_HOME, + os.path.join( + os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch' + ) + ) + ) + return torch_home + + torch_home = _get_torch_home() + model_dir = os.path.join(torch_home, 'checkpoints') + try: + os.makedirs(model_dir) + except OSError as e: + if e.errno == errno.EEXIST: + # Directory already exists, ignore. + pass + else: + # Unexpected OSError, re-raise. + raise + filename = key + '_imagenet.pth' + cached_file = os.path.join(model_dir, filename) + + if not os.path.exists(cached_file): + gdown.download(pretrained_urls[key], cached_file, quiet=False) + + state_dict = torch.load(cached_file) + model_dict = model.state_dict() + new_state_dict = OrderedDict() + matched_layers, discarded_layers = [], [] + + for k, v in state_dict.items(): + if k.startswith('module.'): + k = k[7:] # discard module. + + if k in model_dict and model_dict[k].size() == v.size(): + new_state_dict[k] = v + matched_layers.append(k) + else: + discarded_layers.append(k) + + model_dict.update(new_state_dict) + model.load_state_dict(model_dict) + + if len(matched_layers) == 0: + warnings.warn( + 'The pretrained weights from "{}" cannot be loaded, ' + 'please check the key names manually ' + '(** ignored and continue **)'.format(cached_file) + ) + else: + print( + 'Successfully loaded imagenet pretrained weights from "{}"'. + format(cached_file) + ) + if len(discarded_layers) > 0: + print( + '** The following layers are discarded ' + 'due to unmatched keys or layer size: {}'. + format(discarded_layers) + ) + + +########## +# Instantiation +########## +def osnet_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs): + # standard size (width x1.0) + model = OSNet( + num_classes, + blocks=[OSBlock, OSBlock, OSBlock], + layers=[2, 2, 2], + channels=[64, 256, 384, 512], + loss=loss, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, key='osnet_x1_0') + return model + + +def osnet_x0_75(num_classes=1000, pretrained=True, loss='softmax', **kwargs): + # medium size (width x0.75) + model = OSNet( + num_classes, + blocks=[OSBlock, OSBlock, OSBlock], + layers=[2, 2, 2], + channels=[48, 192, 288, 384], + loss=loss, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, key='osnet_x0_75') + return model + + +def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs): + # tiny size (width x0.5) + model = OSNet( + num_classes, + blocks=[OSBlock, OSBlock, OSBlock], + layers=[2, 2, 2], + channels=[32, 128, 192, 256], + loss=loss, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, key='osnet_x0_5') + return model + + +def osnet_x0_25(num_classes=1000, pretrained=True, loss='softmax', **kwargs): + # very tiny size (width x0.25) + model = OSNet( + num_classes, + blocks=[OSBlock, OSBlock, OSBlock], + layers=[2, 2, 2], + channels=[16, 64, 96, 128], + loss=loss, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, key='osnet_x0_25') + return model + + +def osnet_ibn_x1_0( + num_classes=1000, pretrained=True, loss='softmax', **kwargs +): + # standard size (width x1.0) + IBN layer + # Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018. + model = OSNet( + num_classes, + blocks=[OSBlock, OSBlock, OSBlock], + layers=[2, 2, 2], + channels=[64, 256, 384, 512], + loss=loss, + IN=True, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, key='osnet_ibn_x1_0') + return model diff --git a/feeder/trackers/strongsort/deep/models/osnet_ain.py b/feeder/trackers/strongsort/deep/models/osnet_ain.py new file mode 100644 index 0000000..3f9f7bd --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/osnet_ain.py @@ -0,0 +1,609 @@ +from __future__ import division, absolute_import +import warnings +import torch +from torch import nn +from torch.nn import functional as F + +__all__ = [ + 'osnet_ain_x1_0', 'osnet_ain_x0_75', 'osnet_ain_x0_5', 'osnet_ain_x0_25' +] + +pretrained_urls = { + 'osnet_ain_x1_0': + 'https://drive.google.com/uc?id=1-CaioD9NaqbHK_kzSMW8VE4_3KcsRjEo', + 'osnet_ain_x0_75': + 'https://drive.google.com/uc?id=1apy0hpsMypqstfencdH-jKIUEFOW4xoM', + 'osnet_ain_x0_5': + 'https://drive.google.com/uc?id=1KusKvEYyKGDTUBVRxRiz55G31wkihB6l', + 'osnet_ain_x0_25': + 'https://drive.google.com/uc?id=1SxQt2AvmEcgWNhaRb2xC4rP6ZwVDP0Wt' +} + + +########## +# Basic layers +########## +class ConvLayer(nn.Module): + """Convolution layer (conv + bn + relu).""" + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + groups=1, + IN=False + ): + super(ConvLayer, self).__init__() + self.conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + bias=False, + groups=groups + ) + if IN: + self.bn = nn.InstanceNorm2d(out_channels, affine=True) + else: + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return self.relu(x) + + +class Conv1x1(nn.Module): + """1x1 convolution + bn + relu.""" + + def __init__(self, in_channels, out_channels, stride=1, groups=1): + super(Conv1x1, self).__init__() + self.conv = nn.Conv2d( + in_channels, + out_channels, + 1, + stride=stride, + padding=0, + bias=False, + groups=groups + ) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return self.relu(x) + + +class Conv1x1Linear(nn.Module): + """1x1 convolution + bn (w/o non-linearity).""" + + def __init__(self, in_channels, out_channels, stride=1, bn=True): + super(Conv1x1Linear, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, 1, stride=stride, padding=0, bias=False + ) + self.bn = None + if bn: + self.bn = nn.BatchNorm2d(out_channels) + + def forward(self, x): + x = self.conv(x) + if self.bn is not None: + x = self.bn(x) + return x + + +class Conv3x3(nn.Module): + """3x3 convolution + bn + relu.""" + + def __init__(self, in_channels, out_channels, stride=1, groups=1): + super(Conv3x3, self).__init__() + self.conv = nn.Conv2d( + in_channels, + out_channels, + 3, + stride=stride, + padding=1, + bias=False, + groups=groups + ) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return self.relu(x) + + +class LightConv3x3(nn.Module): + """Lightweight 3x3 convolution. + + 1x1 (linear) + dw 3x3 (nonlinear). + """ + + def __init__(self, in_channels, out_channels): + super(LightConv3x3, self).__init__() + self.conv1 = nn.Conv2d( + in_channels, out_channels, 1, stride=1, padding=0, bias=False + ) + self.conv2 = nn.Conv2d( + out_channels, + out_channels, + 3, + stride=1, + padding=1, + bias=False, + groups=out_channels + ) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = self.bn(x) + return self.relu(x) + + +class LightConvStream(nn.Module): + """Lightweight convolution stream.""" + + def __init__(self, in_channels, out_channels, depth): + super(LightConvStream, self).__init__() + assert depth >= 1, 'depth must be equal to or larger than 1, but got {}'.format( + depth + ) + layers = [] + layers += [LightConv3x3(in_channels, out_channels)] + for i in range(depth - 1): + layers += [LightConv3x3(out_channels, out_channels)] + self.layers = nn.Sequential(*layers) + + def forward(self, x): + return self.layers(x) + + +########## +# Building blocks for omni-scale feature learning +########## +class ChannelGate(nn.Module): + """A mini-network that generates channel-wise gates conditioned on input tensor.""" + + def __init__( + self, + in_channels, + num_gates=None, + return_gates=False, + gate_activation='sigmoid', + reduction=16, + layer_norm=False + ): + super(ChannelGate, self).__init__() + if num_gates is None: + num_gates = in_channels + self.return_gates = return_gates + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.fc1 = nn.Conv2d( + in_channels, + in_channels // reduction, + kernel_size=1, + bias=True, + padding=0 + ) + self.norm1 = None + if layer_norm: + self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1)) + self.relu = nn.ReLU() + self.fc2 = nn.Conv2d( + in_channels // reduction, + num_gates, + kernel_size=1, + bias=True, + padding=0 + ) + if gate_activation == 'sigmoid': + self.gate_activation = nn.Sigmoid() + elif gate_activation == 'relu': + self.gate_activation = nn.ReLU() + elif gate_activation == 'linear': + self.gate_activation = None + else: + raise RuntimeError( + "Unknown gate activation: {}".format(gate_activation) + ) + + def forward(self, x): + input = x + x = self.global_avgpool(x) + x = self.fc1(x) + if self.norm1 is not None: + x = self.norm1(x) + x = self.relu(x) + x = self.fc2(x) + if self.gate_activation is not None: + x = self.gate_activation(x) + if self.return_gates: + return x + return input * x + + +class OSBlock(nn.Module): + """Omni-scale feature learning block.""" + + def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs): + super(OSBlock, self).__init__() + assert T >= 1 + assert out_channels >= reduction and out_channels % reduction == 0 + mid_channels = out_channels // reduction + + self.conv1 = Conv1x1(in_channels, mid_channels) + self.conv2 = nn.ModuleList() + for t in range(1, T + 1): + self.conv2 += [LightConvStream(mid_channels, mid_channels, t)] + self.gate = ChannelGate(mid_channels) + self.conv3 = Conv1x1Linear(mid_channels, out_channels) + self.downsample = None + if in_channels != out_channels: + self.downsample = Conv1x1Linear(in_channels, out_channels) + + def forward(self, x): + identity = x + x1 = self.conv1(x) + x2 = 0 + for conv2_t in self.conv2: + x2_t = conv2_t(x1) + x2 = x2 + self.gate(x2_t) + x3 = self.conv3(x2) + if self.downsample is not None: + identity = self.downsample(identity) + out = x3 + identity + return F.relu(out) + + +class OSBlockINin(nn.Module): + """Omni-scale feature learning block with instance normalization.""" + + def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs): + super(OSBlockINin, self).__init__() + assert T >= 1 + assert out_channels >= reduction and out_channels % reduction == 0 + mid_channels = out_channels // reduction + + self.conv1 = Conv1x1(in_channels, mid_channels) + self.conv2 = nn.ModuleList() + for t in range(1, T + 1): + self.conv2 += [LightConvStream(mid_channels, mid_channels, t)] + self.gate = ChannelGate(mid_channels) + self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn=False) + self.downsample = None + if in_channels != out_channels: + self.downsample = Conv1x1Linear(in_channels, out_channels) + self.IN = nn.InstanceNorm2d(out_channels, affine=True) + + def forward(self, x): + identity = x + x1 = self.conv1(x) + x2 = 0 + for conv2_t in self.conv2: + x2_t = conv2_t(x1) + x2 = x2 + self.gate(x2_t) + x3 = self.conv3(x2) + x3 = self.IN(x3) # IN inside residual + if self.downsample is not None: + identity = self.downsample(identity) + out = x3 + identity + return F.relu(out) + + +########## +# Network architecture +########## +class OSNet(nn.Module): + """Omni-Scale Network. + + Reference: + - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019. + - Zhou et al. Learning Generalisable Omni-Scale Representations + for Person Re-Identification. TPAMI, 2021. + """ + + def __init__( + self, + num_classes, + blocks, + layers, + channels, + feature_dim=512, + loss='softmax', + conv1_IN=False, + **kwargs + ): + super(OSNet, self).__init__() + num_blocks = len(blocks) + assert num_blocks == len(layers) + assert num_blocks == len(channels) - 1 + self.loss = loss + self.feature_dim = feature_dim + + # convolutional backbone + self.conv1 = ConvLayer( + 3, channels[0], 7, stride=2, padding=3, IN=conv1_IN + ) + self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) + self.conv2 = self._make_layer( + blocks[0], layers[0], channels[0], channels[1] + ) + self.pool2 = nn.Sequential( + Conv1x1(channels[1], channels[1]), nn.AvgPool2d(2, stride=2) + ) + self.conv3 = self._make_layer( + blocks[1], layers[1], channels[1], channels[2] + ) + self.pool3 = nn.Sequential( + Conv1x1(channels[2], channels[2]), nn.AvgPool2d(2, stride=2) + ) + self.conv4 = self._make_layer( + blocks[2], layers[2], channels[2], channels[3] + ) + self.conv5 = Conv1x1(channels[3], channels[3]) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + # fully connected layer + self.fc = self._construct_fc_layer( + self.feature_dim, channels[3], dropout_p=None + ) + # identity classification layer + self.classifier = nn.Linear(self.feature_dim, num_classes) + + self._init_params() + + def _make_layer(self, blocks, layer, in_channels, out_channels): + layers = [] + layers += [blocks[0](in_channels, out_channels)] + for i in range(1, len(blocks)): + layers += [blocks[i](out_channels, out_channels)] + return nn.Sequential(*layers) + + def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): + if fc_dims is None or fc_dims < 0: + self.feature_dim = input_dim + return None + + if isinstance(fc_dims, int): + fc_dims = [fc_dims] + + layers = [] + for dim in fc_dims: + layers.append(nn.Linear(input_dim, dim)) + layers.append(nn.BatchNorm1d(dim)) + layers.append(nn.ReLU()) + if dropout_p is not None: + layers.append(nn.Dropout(p=dropout_p)) + input_dim = dim + + self.feature_dim = fc_dims[-1] + + return nn.Sequential(*layers) + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu' + ) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + elif isinstance(m, nn.InstanceNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def featuremaps(self, x): + x = self.conv1(x) + x = self.maxpool(x) + x = self.conv2(x) + x = self.pool2(x) + x = self.conv3(x) + x = self.pool3(x) + x = self.conv4(x) + x = self.conv5(x) + return x + + def forward(self, x, return_featuremaps=False): + x = self.featuremaps(x) + if return_featuremaps: + return x + v = self.global_avgpool(x) + v = v.view(v.size(0), -1) + if self.fc is not None: + v = self.fc(v) + if not self.training: + return v + y = self.classifier(v) + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError("Unsupported loss: {}".format(self.loss)) + + +def init_pretrained_weights(model, key=''): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + import os + import errno + import gdown + from collections import OrderedDict + + def _get_torch_home(): + ENV_TORCH_HOME = 'TORCH_HOME' + ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' + DEFAULT_CACHE_DIR = '~/.cache' + torch_home = os.path.expanduser( + os.getenv( + ENV_TORCH_HOME, + os.path.join( + os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch' + ) + ) + ) + return torch_home + + torch_home = _get_torch_home() + model_dir = os.path.join(torch_home, 'checkpoints') + try: + os.makedirs(model_dir) + except OSError as e: + if e.errno == errno.EEXIST: + # Directory already exists, ignore. + pass + else: + # Unexpected OSError, re-raise. + raise + filename = key + '_imagenet.pth' + cached_file = os.path.join(model_dir, filename) + + if not os.path.exists(cached_file): + gdown.download(pretrained_urls[key], cached_file, quiet=False) + + state_dict = torch.load(cached_file) + model_dict = model.state_dict() + new_state_dict = OrderedDict() + matched_layers, discarded_layers = [], [] + + for k, v in state_dict.items(): + if k.startswith('module.'): + k = k[7:] # discard module. + + if k in model_dict and model_dict[k].size() == v.size(): + new_state_dict[k] = v + matched_layers.append(k) + else: + discarded_layers.append(k) + + model_dict.update(new_state_dict) + model.load_state_dict(model_dict) + + if len(matched_layers) == 0: + warnings.warn( + 'The pretrained weights from "{}" cannot be loaded, ' + 'please check the key names manually ' + '(** ignored and continue **)'.format(cached_file) + ) + else: + print( + 'Successfully loaded imagenet pretrained weights from "{}"'. + format(cached_file) + ) + if len(discarded_layers) > 0: + print( + '** The following layers are discarded ' + 'due to unmatched keys or layer size: {}'. + format(discarded_layers) + ) + + +########## +# Instantiation +########## +def osnet_ain_x1_0( + num_classes=1000, pretrained=True, loss='softmax', **kwargs +): + model = OSNet( + num_classes, + blocks=[ + [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], + [OSBlockINin, OSBlock] + ], + layers=[2, 2, 2], + channels=[64, 256, 384, 512], + loss=loss, + conv1_IN=True, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, key='osnet_ain_x1_0') + return model + + +def osnet_ain_x0_75( + num_classes=1000, pretrained=True, loss='softmax', **kwargs +): + model = OSNet( + num_classes, + blocks=[ + [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], + [OSBlockINin, OSBlock] + ], + layers=[2, 2, 2], + channels=[48, 192, 288, 384], + loss=loss, + conv1_IN=True, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, key='osnet_ain_x0_75') + return model + + +def osnet_ain_x0_5( + num_classes=1000, pretrained=True, loss='softmax', **kwargs +): + model = OSNet( + num_classes, + blocks=[ + [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], + [OSBlockINin, OSBlock] + ], + layers=[2, 2, 2], + channels=[32, 128, 192, 256], + loss=loss, + conv1_IN=True, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, key='osnet_ain_x0_5') + return model + + +def osnet_ain_x0_25( + num_classes=1000, pretrained=True, loss='softmax', **kwargs +): + model = OSNet( + num_classes, + blocks=[ + [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], + [OSBlockINin, OSBlock] + ], + layers=[2, 2, 2], + channels=[16, 64, 96, 128], + loss=loss, + conv1_IN=True, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, key='osnet_ain_x0_25') + return model diff --git a/feeder/trackers/strongsort/deep/models/pcb.py b/feeder/trackers/strongsort/deep/models/pcb.py new file mode 100644 index 0000000..92c7414 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/pcb.py @@ -0,0 +1,314 @@ +from __future__ import division, absolute_import +import torch.utils.model_zoo as model_zoo +from torch import nn +from torch.nn import functional as F + +__all__ = ['pcb_p6', 'pcb_p4'] + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d( + in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False + ) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False + ) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d( + planes, planes * self.expansion, kernel_size=1, bias=False + ) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class DimReduceLayer(nn.Module): + + def __init__(self, in_channels, out_channels, nonlinear): + super(DimReduceLayer, self).__init__() + layers = [] + layers.append( + nn.Conv2d( + in_channels, out_channels, 1, stride=1, padding=0, bias=False + ) + ) + layers.append(nn.BatchNorm2d(out_channels)) + + if nonlinear == 'relu': + layers.append(nn.ReLU(inplace=True)) + elif nonlinear == 'leakyrelu': + layers.append(nn.LeakyReLU(0.1)) + + self.layers = nn.Sequential(*layers) + + def forward(self, x): + return self.layers(x) + + +class PCB(nn.Module): + """Part-based Convolutional Baseline. + + Reference: + Sun et al. Beyond Part Models: Person Retrieval with Refined + Part Pooling (and A Strong Convolutional Baseline). ECCV 2018. + + Public keys: + - ``pcb_p4``: PCB with 4-part strips. + - ``pcb_p6``: PCB with 6-part strips. + """ + + def __init__( + self, + num_classes, + loss, + block, + layers, + parts=6, + reduced_dim=256, + nonlinear='relu', + **kwargs + ): + self.inplanes = 64 + super(PCB, self).__init__() + self.loss = loss + self.parts = parts + self.feature_dim = 512 * block.expansion + + # backbone network + self.conv1 = nn.Conv2d( + 3, 64, kernel_size=7, stride=2, padding=3, bias=False + ) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=1) + + # pcb layers + self.parts_avgpool = nn.AdaptiveAvgPool2d((self.parts, 1)) + self.dropout = nn.Dropout(p=0.5) + self.conv5 = DimReduceLayer( + 512 * block.expansion, reduced_dim, nonlinear=nonlinear + ) + self.feature_dim = reduced_dim + self.classifier = nn.ModuleList( + [ + nn.Linear(self.feature_dim, num_classes) + for _ in range(self.parts) + ] + ) + + self._init_params() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False + ), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu' + ) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def featuremaps(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward(self, x): + f = self.featuremaps(x) + v_g = self.parts_avgpool(f) + + if not self.training: + v_g = F.normalize(v_g, p=2, dim=1) + return v_g.view(v_g.size(0), -1) + + v_g = self.dropout(v_g) + v_h = self.conv5(v_g) + + y = [] + for i in range(self.parts): + v_h_i = v_h[:, :, i, :] + v_h_i = v_h_i.view(v_h_i.size(0), -1) + y_i = self.classifier[i](v_h_i) + y.append(y_i) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + v_g = F.normalize(v_g, p=2, dim=1) + return y, v_g.view(v_g.size(0), -1) + else: + raise KeyError('Unsupported loss: {}'.format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def pcb_p6(num_classes, loss='softmax', pretrained=True, **kwargs): + model = PCB( + num_classes=num_classes, + loss=loss, + block=Bottleneck, + layers=[3, 4, 6, 3], + last_stride=1, + parts=6, + reduced_dim=256, + nonlinear='relu', + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnet50']) + return model + + +def pcb_p4(num_classes, loss='softmax', pretrained=True, **kwargs): + model = PCB( + num_classes=num_classes, + loss=loss, + block=Bottleneck, + layers=[3, 4, 6, 3], + last_stride=1, + parts=4, + reduced_dim=256, + nonlinear='relu', + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnet50']) + return model diff --git a/feeder/trackers/strongsort/deep/models/resnet.py b/feeder/trackers/strongsort/deep/models/resnet.py new file mode 100644 index 0000000..63d7f43 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/resnet.py @@ -0,0 +1,530 @@ +""" +Code source: https://github.com/pytorch/vision +""" +from __future__ import division, absolute_import +import torch.utils.model_zoo as model_zoo +from torch import nn + +__all__ = [ + 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', + 'resnext50_32x4d', 'resnext101_32x8d', 'resnet50_fc512' +] + +model_urls = { + 'resnet18': + 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': + 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': + 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': + 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': + 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', + 'resnext50_32x4d': + 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': + 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d( + in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=dilation, + groups=groups, + bias=False, + dilation=dilation + ) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d( + in_planes, out_planes, kernel_size=1, stride=stride, bias=False + ) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__( + self, + inplanes, + planes, + stride=1, + downsample=None, + groups=1, + base_width=64, + dilation=1, + norm_layer=None + ): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError( + 'BasicBlock only supports groups=1 and base_width=64' + ) + if dilation > 1: + raise NotImplementedError( + "Dilation > 1 not supported in BasicBlock" + ) + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__( + self, + inplanes, + planes, + stride=1, + downsample=None, + groups=1, + base_width=64, + dilation=1, + norm_layer=None + ): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width/64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + """Residual network. + + Reference: + - He et al. Deep Residual Learning for Image Recognition. CVPR 2016. + - Xie et al. Aggregated Residual Transformations for Deep Neural Networks. CVPR 2017. + + Public keys: + - ``resnet18``: ResNet18. + - ``resnet34``: ResNet34. + - ``resnet50``: ResNet50. + - ``resnet101``: ResNet101. + - ``resnet152``: ResNet152. + - ``resnext50_32x4d``: ResNeXt50. + - ``resnext101_32x8d``: ResNeXt101. + - ``resnet50_fc512``: ResNet50 + FC. + """ + + def __init__( + self, + num_classes, + loss, + block, + layers, + zero_init_residual=False, + groups=1, + width_per_group=64, + replace_stride_with_dilation=None, + norm_layer=None, + last_stride=2, + fc_dims=None, + dropout_p=None, + **kwargs + ): + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + self.loss = loss + self.feature_dim = 512 * block.expansion + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError( + "replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}". + format(replace_stride_with_dilation) + ) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d( + 3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False + ) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer( + block, + 128, + layers[1], + stride=2, + dilate=replace_stride_with_dilation[0] + ) + self.layer3 = self._make_layer( + block, + 256, + layers[2], + stride=2, + dilate=replace_stride_with_dilation[1] + ) + self.layer4 = self._make_layer( + block, + 512, + layers[3], + stride=last_stride, + dilate=replace_stride_with_dilation[2] + ) + self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = self._construct_fc_layer( + fc_dims, 512 * block.expansion, dropout_p + ) + self.classifier = nn.Linear(self.feature_dim, num_classes) + + self._init_params() + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append( + block( + self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer + ) + ) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append( + block( + self.inplanes, + planes, + groups=self.groups, + base_width=self.base_width, + dilation=self.dilation, + norm_layer=norm_layer + ) + ) + + return nn.Sequential(*layers) + + def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): + """Constructs fully connected layer + + Args: + fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed + input_dim (int): input dimension + dropout_p (float): dropout probability, if None, dropout is unused + """ + if fc_dims is None: + self.feature_dim = input_dim + return None + + assert isinstance( + fc_dims, (list, tuple) + ), 'fc_dims must be either list or tuple, but got {}'.format( + type(fc_dims) + ) + + layers = [] + for dim in fc_dims: + layers.append(nn.Linear(input_dim, dim)) + layers.append(nn.BatchNorm1d(dim)) + layers.append(nn.ReLU(inplace=True)) + if dropout_p is not None: + layers.append(nn.Dropout(p=dropout_p)) + input_dim = dim + + self.feature_dim = fc_dims[-1] + + return nn.Sequential(*layers) + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu' + ) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def featuremaps(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward(self, x): + f = self.featuremaps(x) + v = self.global_avgpool(f) + v = v.view(v.size(0), -1) + + if self.fc is not None: + v = self.fc(v) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError("Unsupported loss: {}".format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +"""ResNet""" + + +def resnet18(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ResNet( + num_classes=num_classes, + loss=loss, + block=BasicBlock, + layers=[2, 2, 2, 2], + last_stride=2, + fc_dims=None, + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnet18']) + return model + + +def resnet34(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ResNet( + num_classes=num_classes, + loss=loss, + block=BasicBlock, + layers=[3, 4, 6, 3], + last_stride=2, + fc_dims=None, + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnet34']) + return model + + +def resnet50(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ResNet( + num_classes=num_classes, + loss=loss, + block=Bottleneck, + layers=[3, 4, 6, 3], + last_stride=2, + fc_dims=None, + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnet50']) + return model + + +def resnet101(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ResNet( + num_classes=num_classes, + loss=loss, + block=Bottleneck, + layers=[3, 4, 23, 3], + last_stride=2, + fc_dims=None, + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnet101']) + return model + + +def resnet152(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ResNet( + num_classes=num_classes, + loss=loss, + block=Bottleneck, + layers=[3, 8, 36, 3], + last_stride=2, + fc_dims=None, + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnet152']) + return model + + +"""ResNeXt""" + + +def resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ResNet( + num_classes=num_classes, + loss=loss, + block=Bottleneck, + layers=[3, 4, 6, 3], + last_stride=2, + fc_dims=None, + dropout_p=None, + groups=32, + width_per_group=4, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnext50_32x4d']) + return model + + +def resnext101_32x8d(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ResNet( + num_classes=num_classes, + loss=loss, + block=Bottleneck, + layers=[3, 4, 23, 3], + last_stride=2, + fc_dims=None, + dropout_p=None, + groups=32, + width_per_group=8, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnext101_32x8d']) + return model + + +""" +ResNet + FC +""" + + +def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ResNet( + num_classes=num_classes, + loss=loss, + block=Bottleneck, + layers=[3, 4, 6, 3], + last_stride=1, + fc_dims=[512], + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnet50']) + return model diff --git a/feeder/trackers/strongsort/deep/models/resnet_ibn_a.py b/feeder/trackers/strongsort/deep/models/resnet_ibn_a.py new file mode 100644 index 0000000..d198e7c --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/resnet_ibn_a.py @@ -0,0 +1,289 @@ +""" +Credit to https://github.com/XingangPan/IBN-Net. +""" +from __future__ import division, absolute_import +import math +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['resnet50_ibn_a'] + +model_urls = { + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return nn.Conv2d( + in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False + ) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class IBN(nn.Module): + + def __init__(self, planes): + super(IBN, self).__init__() + half1 = int(planes / 2) + self.half = half1 + half2 = planes - half1 + self.IN = nn.InstanceNorm2d(half1, affine=True) + self.BN = nn.BatchNorm2d(half2) + + def forward(self, x): + split = torch.split(x, self.half, 1) + out1 = self.IN(split[0].contiguous()) + out2 = self.BN(split[1].contiguous()) + out = torch.cat((out1, out2), 1) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, ibn=False, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + if ibn: + self.bn1 = IBN(planes) + else: + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False + ) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d( + planes, planes * self.expansion, kernel_size=1, bias=False + ) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + """Residual network + IBN layer. + + Reference: + - He et al. Deep Residual Learning for Image Recognition. CVPR 2016. + - Pan et al. Two at Once: Enhancing Learning and Generalization + Capacities via IBN-Net. ECCV 2018. + """ + + def __init__( + self, + block, + layers, + num_classes=1000, + loss='softmax', + fc_dims=None, + dropout_p=None, + **kwargs + ): + scale = 64 + self.inplanes = scale + super(ResNet, self).__init__() + self.loss = loss + self.feature_dim = scale * 8 * block.expansion + + self.conv1 = nn.Conv2d( + 3, scale, kernel_size=7, stride=2, padding=3, bias=False + ) + self.bn1 = nn.BatchNorm2d(scale) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, scale, layers[0]) + self.layer2 = self._make_layer(block, scale * 2, layers[1], stride=2) + self.layer3 = self._make_layer(block, scale * 4, layers[2], stride=2) + self.layer4 = self._make_layer(block, scale * 8, layers[3], stride=2) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = self._construct_fc_layer( + fc_dims, scale * 8 * block.expansion, dropout_p + ) + self.classifier = nn.Linear(self.feature_dim, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.InstanceNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False + ), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + ibn = True + if planes == 512: + ibn = False + layers.append(block(self.inplanes, planes, ibn, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, ibn)) + + return nn.Sequential(*layers) + + def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): + """Constructs fully connected layer + + Args: + fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed + input_dim (int): input dimension + dropout_p (float): dropout probability, if None, dropout is unused + """ + if fc_dims is None: + self.feature_dim = input_dim + return None + + assert isinstance( + fc_dims, (list, tuple) + ), 'fc_dims must be either list or tuple, but got {}'.format( + type(fc_dims) + ) + + layers = [] + for dim in fc_dims: + layers.append(nn.Linear(input_dim, dim)) + layers.append(nn.BatchNorm1d(dim)) + layers.append(nn.ReLU(inplace=True)) + if dropout_p is not None: + layers.append(nn.Dropout(p=dropout_p)) + input_dim = dim + + self.feature_dim = fc_dims[-1] + + return nn.Sequential(*layers) + + def featuremaps(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward(self, x): + f = self.featuremaps(x) + v = self.avgpool(f) + v = v.view(v.size(0), -1) + if self.fc is not None: + v = self.fc(v) + if not self.training: + return v + y = self.classifier(v) + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError("Unsupported loss: {}".format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def resnet50_ibn_a(num_classes, loss='softmax', pretrained=False, **kwargs): + model = ResNet( + Bottleneck, [3, 4, 6, 3], num_classes=num_classes, loss=loss, **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnet50']) + return model diff --git a/feeder/trackers/strongsort/deep/models/resnet_ibn_b.py b/feeder/trackers/strongsort/deep/models/resnet_ibn_b.py new file mode 100644 index 0000000..9881cc7 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/resnet_ibn_b.py @@ -0,0 +1,274 @@ +""" +Credit to https://github.com/XingangPan/IBN-Net. +""" +from __future__ import division, absolute_import +import math +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['resnet50_ibn_b'] + +model_urls = { + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return nn.Conv2d( + in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False + ) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, IN=False): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False + ) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d( + planes, planes * self.expansion, kernel_size=1, bias=False + ) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + self.IN = None + if IN: + self.IN = nn.InstanceNorm2d(planes * 4, affine=True) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + if self.IN is not None: + out = self.IN(out) + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + """Residual network + IBN layer. + + Reference: + - He et al. Deep Residual Learning for Image Recognition. CVPR 2016. + - Pan et al. Two at Once: Enhancing Learning and Generalization + Capacities via IBN-Net. ECCV 2018. + """ + + def __init__( + self, + block, + layers, + num_classes=1000, + loss='softmax', + fc_dims=None, + dropout_p=None, + **kwargs + ): + scale = 64 + self.inplanes = scale + super(ResNet, self).__init__() + self.loss = loss + self.feature_dim = scale * 8 * block.expansion + + self.conv1 = nn.Conv2d( + 3, scale, kernel_size=7, stride=2, padding=3, bias=False + ) + self.bn1 = nn.InstanceNorm2d(scale, affine=True) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer( + block, scale, layers[0], stride=1, IN=True + ) + self.layer2 = self._make_layer( + block, scale * 2, layers[1], stride=2, IN=True + ) + self.layer3 = self._make_layer(block, scale * 4, layers[2], stride=2) + self.layer4 = self._make_layer(block, scale * 8, layers[3], stride=2) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = self._construct_fc_layer( + fc_dims, scale * 8 * block.expansion, dropout_p + ) + self.classifier = nn.Linear(self.feature_dim, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.InstanceNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1, IN=False): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False + ), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks - 1): + layers.append(block(self.inplanes, planes)) + layers.append(block(self.inplanes, planes, IN=IN)) + + return nn.Sequential(*layers) + + def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): + """Constructs fully connected layer + + Args: + fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed + input_dim (int): input dimension + dropout_p (float): dropout probability, if None, dropout is unused + """ + if fc_dims is None: + self.feature_dim = input_dim + return None + + assert isinstance( + fc_dims, (list, tuple) + ), 'fc_dims must be either list or tuple, but got {}'.format( + type(fc_dims) + ) + + layers = [] + for dim in fc_dims: + layers.append(nn.Linear(input_dim, dim)) + layers.append(nn.BatchNorm1d(dim)) + layers.append(nn.ReLU(inplace=True)) + if dropout_p is not None: + layers.append(nn.Dropout(p=dropout_p)) + input_dim = dim + + self.feature_dim = fc_dims[-1] + + return nn.Sequential(*layers) + + def featuremaps(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward(self, x): + f = self.featuremaps(x) + v = self.avgpool(f) + v = v.view(v.size(0), -1) + if self.fc is not None: + v = self.fc(v) + if not self.training: + return v + y = self.classifier(v) + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError("Unsupported loss: {}".format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def resnet50_ibn_b(num_classes, loss='softmax', pretrained=False, **kwargs): + model = ResNet( + Bottleneck, [3, 4, 6, 3], num_classes=num_classes, loss=loss, **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnet50']) + return model diff --git a/feeder/trackers/strongsort/deep/models/resnetmid.py b/feeder/trackers/strongsort/deep/models/resnetmid.py new file mode 100644 index 0000000..017f6c6 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/resnetmid.py @@ -0,0 +1,307 @@ +from __future__ import division, absolute_import +import torch +import torch.utils.model_zoo as model_zoo +from torch import nn + +__all__ = ['resnet50mid'] + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d( + in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False + ) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False + ) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d( + planes, planes * self.expansion, kernel_size=1, bias=False + ) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNetMid(nn.Module): + """Residual network + mid-level features. + + Reference: + Yu et al. The Devil is in the Middle: Exploiting Mid-level Representations for + Cross-Domain Instance Matching. arXiv:1711.08106. + + Public keys: + - ``resnet50mid``: ResNet50 + mid-level feature fusion. + """ + + def __init__( + self, + num_classes, + loss, + block, + layers, + last_stride=2, + fc_dims=None, + **kwargs + ): + self.inplanes = 64 + super(ResNetMid, self).__init__() + self.loss = loss + self.feature_dim = 512 * block.expansion + + # backbone network + self.conv1 = nn.Conv2d( + 3, 64, kernel_size=7, stride=2, padding=3, bias=False + ) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer( + block, 512, layers[3], stride=last_stride + ) + + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + assert fc_dims is not None + self.fc_fusion = self._construct_fc_layer( + fc_dims, 512 * block.expansion * 2 + ) + self.feature_dim += 512 * block.expansion + self.classifier = nn.Linear(self.feature_dim, num_classes) + + self._init_params() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False + ), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): + """Constructs fully connected layer + + Args: + fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed + input_dim (int): input dimension + dropout_p (float): dropout probability, if None, dropout is unused + """ + if fc_dims is None: + self.feature_dim = input_dim + return None + + assert isinstance( + fc_dims, (list, tuple) + ), 'fc_dims must be either list or tuple, but got {}'.format( + type(fc_dims) + ) + + layers = [] + for dim in fc_dims: + layers.append(nn.Linear(input_dim, dim)) + layers.append(nn.BatchNorm1d(dim)) + layers.append(nn.ReLU(inplace=True)) + if dropout_p is not None: + layers.append(nn.Dropout(p=dropout_p)) + input_dim = dim + + self.feature_dim = fc_dims[-1] + + return nn.Sequential(*layers) + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu' + ) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def featuremaps(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x4a = self.layer4[0](x) + x4b = self.layer4[1](x4a) + x4c = self.layer4[2](x4b) + return x4a, x4b, x4c + + def forward(self, x): + x4a, x4b, x4c = self.featuremaps(x) + + v4a = self.global_avgpool(x4a) + v4b = self.global_avgpool(x4b) + v4c = self.global_avgpool(x4c) + v4ab = torch.cat([v4a, v4b], 1) + v4ab = v4ab.view(v4ab.size(0), -1) + v4ab = self.fc_fusion(v4ab) + v4c = v4c.view(v4c.size(0), -1) + v = torch.cat([v4ab, v4c], 1) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError('Unsupported loss: {}'.format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +""" +Residual network configurations: +-- +resnet18: block=BasicBlock, layers=[2, 2, 2, 2] +resnet34: block=BasicBlock, layers=[3, 4, 6, 3] +resnet50: block=Bottleneck, layers=[3, 4, 6, 3] +resnet101: block=Bottleneck, layers=[3, 4, 23, 3] +resnet152: block=Bottleneck, layers=[3, 8, 36, 3] +""" + + +def resnet50mid(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ResNetMid( + num_classes=num_classes, + loss=loss, + block=Bottleneck, + layers=[3, 4, 6, 3], + last_stride=2, + fc_dims=[1024], + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['resnet50']) + return model diff --git a/feeder/trackers/strongsort/deep/models/senet.py b/feeder/trackers/strongsort/deep/models/senet.py new file mode 100644 index 0000000..baaf9b0 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/senet.py @@ -0,0 +1,688 @@ +from __future__ import division, absolute_import +import math +from collections import OrderedDict +import torch.nn as nn +from torch.utils import model_zoo + +__all__ = [ + 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', + 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnet50_fc512' +] +""" +Code imported from https://github.com/Cadene/pretrained-models.pytorch +""" + +pretrained_settings = { + 'senet154': { + 'imagenet': { + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, + 'se_resnet50': { + 'imagenet': { + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, + 'se_resnet101': { + 'imagenet': { + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, + 'se_resnet152': { + 'imagenet': { + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, + 'se_resnext50_32x4d': { + 'imagenet': { + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, + 'se_resnext101_32x4d': { + 'imagenet': { + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, +} + + +class SEModule(nn.Module): + + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc1 = nn.Conv2d( + channels, channels // reduction, kernel_size=1, padding=0 + ) + self.relu = nn.ReLU(inplace=True) + self.fc2 = nn.Conv2d( + channels // reduction, channels, kernel_size=1, padding=0 + ) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + module_input = x + x = self.avg_pool(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + return module_input * x + + +class Bottleneck(nn.Module): + """ + Base class for bottlenecks that implements `forward()` method. + """ + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out = self.se_module(out) + residual + out = self.relu(out) + + return out + + +class SEBottleneck(Bottleneck): + """ + Bottleneck for SENet154. + """ + expansion = 4 + + def __init__( + self, inplanes, planes, groups, reduction, stride=1, downsample=None + ): + super(SEBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes * 2) + self.conv2 = nn.Conv2d( + planes * 2, + planes * 4, + kernel_size=3, + stride=stride, + padding=1, + groups=groups, + bias=False + ) + self.bn2 = nn.BatchNorm2d(planes * 4) + self.conv3 = nn.Conv2d( + planes * 4, planes * 4, kernel_size=1, bias=False + ) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBottleneck(Bottleneck): + """ + ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe + implementation and uses `stride=stride` in `conv1` and not in `conv2` + (the latter is used in the torchvision implementation of ResNet). + """ + expansion = 4 + + def __init__( + self, inplanes, planes, groups, reduction, stride=1, downsample=None + ): + super(SEResNetBottleneck, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=1, bias=False, stride=stride + ) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, + planes, + kernel_size=3, + padding=1, + groups=groups, + bias=False + ) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNeXtBottleneck(Bottleneck): + """ResNeXt bottleneck type C with a Squeeze-and-Excitation module""" + expansion = 4 + + def __init__( + self, + inplanes, + planes, + groups, + reduction, + stride=1, + downsample=None, + base_width=4 + ): + super(SEResNeXtBottleneck, self).__init__() + width = int(math.floor(planes * (base_width/64.)) * groups) + self.conv1 = nn.Conv2d( + inplanes, width, kernel_size=1, bias=False, stride=1 + ) + self.bn1 = nn.BatchNorm2d(width) + self.conv2 = nn.Conv2d( + width, + width, + kernel_size=3, + stride=stride, + padding=1, + groups=groups, + bias=False + ) + self.bn2 = nn.BatchNorm2d(width) + self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SENet(nn.Module): + """Squeeze-and-excitation network. + + Reference: + Hu et al. Squeeze-and-Excitation Networks. CVPR 2018. + + Public keys: + - ``senet154``: SENet154. + - ``se_resnet50``: ResNet50 + SE. + - ``se_resnet101``: ResNet101 + SE. + - ``se_resnet152``: ResNet152 + SE. + - ``se_resnext50_32x4d``: ResNeXt50 (groups=32, width=4) + SE. + - ``se_resnext101_32x4d``: ResNeXt101 (groups=32, width=4) + SE. + - ``se_resnet50_fc512``: (ResNet50 + SE) + FC. + """ + + def __init__( + self, + num_classes, + loss, + block, + layers, + groups, + reduction, + dropout_p=0.2, + inplanes=128, + input_3x3=True, + downsample_kernel_size=3, + downsample_padding=1, + last_stride=2, + fc_dims=None, + **kwargs + ): + """ + Parameters + ---------- + block (nn.Module): Bottleneck class. + - For SENet154: SEBottleneck + - For SE-ResNet models: SEResNetBottleneck + - For SE-ResNeXt models: SEResNeXtBottleneck + layers (list of ints): Number of residual blocks for 4 layers of the + network (layer1...layer4). + groups (int): Number of groups for the 3x3 convolution in each + bottleneck block. + - For SENet154: 64 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 32 + reduction (int): Reduction ratio for Squeeze-and-Excitation modules. + - For all models: 16 + dropout_p (float or None): Drop probability for the Dropout layer. + If `None` the Dropout layer is not used. + - For SENet154: 0.2 + - For SE-ResNet models: None + - For SE-ResNeXt models: None + inplanes (int): Number of input channels for layer1. + - For SENet154: 128 + - For SE-ResNet models: 64 + - For SE-ResNeXt models: 64 + input_3x3 (bool): If `True`, use three 3x3 convolutions instead of + a single 7x7 convolution in layer0. + - For SENet154: True + - For SE-ResNet models: False + - For SE-ResNeXt models: False + downsample_kernel_size (int): Kernel size for downsampling convolutions + in layer2, layer3 and layer4. + - For SENet154: 3 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 1 + downsample_padding (int): Padding for downsampling convolutions in + layer2, layer3 and layer4. + - For SENet154: 1 + - For SE-ResNet models: 0 + - For SE-ResNeXt models: 0 + num_classes (int): Number of outputs in `classifier` layer. + """ + super(SENet, self).__init__() + self.inplanes = inplanes + self.loss = loss + + if input_3x3: + layer0_modules = [ + ( + 'conv1', + nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False) + ), + ('bn1', nn.BatchNorm2d(64)), + ('relu1', nn.ReLU(inplace=True)), + ( + 'conv2', + nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False) + ), + ('bn2', nn.BatchNorm2d(64)), + ('relu2', nn.ReLU(inplace=True)), + ( + 'conv3', + nn.Conv2d( + 64, inplanes, 3, stride=1, padding=1, bias=False + ) + ), + ('bn3', nn.BatchNorm2d(inplanes)), + ('relu3', nn.ReLU(inplace=True)), + ] + else: + layer0_modules = [ + ( + 'conv1', + nn.Conv2d( + 3, + inplanes, + kernel_size=7, + stride=2, + padding=3, + bias=False + ) + ), + ('bn1', nn.BatchNorm2d(inplanes)), + ('relu1', nn.ReLU(inplace=True)), + ] + # To preserve compatibility with Caffe weights `ceil_mode=True` + # is used instead of `padding=1`. + layer0_modules.append( + ('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)) + ) + self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) + self.layer1 = self._make_layer( + block, + planes=64, + blocks=layers[0], + groups=groups, + reduction=reduction, + downsample_kernel_size=1, + downsample_padding=0 + ) + self.layer2 = self._make_layer( + block, + planes=128, + blocks=layers[1], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.layer3 = self._make_layer( + block, + planes=256, + blocks=layers[2], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.layer4 = self._make_layer( + block, + planes=512, + blocks=layers[3], + stride=last_stride, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = self._construct_fc_layer( + fc_dims, 512 * block.expansion, dropout_p + ) + self.classifier = nn.Linear(self.feature_dim, num_classes) + + def _make_layer( + self, + block, + planes, + blocks, + groups, + reduction, + stride=1, + downsample_kernel_size=1, + downsample_padding=0 + ): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, + planes * block.expansion, + kernel_size=downsample_kernel_size, + stride=stride, + padding=downsample_padding, + bias=False + ), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append( + block( + self.inplanes, planes, groups, reduction, stride, downsample + ) + ) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, groups, reduction)) + + return nn.Sequential(*layers) + + def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): + """ + Construct fully connected layer + + - fc_dims (list or tuple): dimensions of fc layers, if None, + no fc layers are constructed + - input_dim (int): input dimension + - dropout_p (float): dropout probability, if None, dropout is unused + """ + if fc_dims is None: + self.feature_dim = input_dim + return None + + assert isinstance( + fc_dims, (list, tuple) + ), 'fc_dims must be either list or tuple, but got {}'.format( + type(fc_dims) + ) + + layers = [] + for dim in fc_dims: + layers.append(nn.Linear(input_dim, dim)) + layers.append(nn.BatchNorm1d(dim)) + layers.append(nn.ReLU(inplace=True)) + if dropout_p is not None: + layers.append(nn.Dropout(p=dropout_p)) + input_dim = dim + + self.feature_dim = fc_dims[-1] + + return nn.Sequential(*layers) + + def featuremaps(self, x): + x = self.layer0(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward(self, x): + f = self.featuremaps(x) + v = self.global_avgpool(f) + v = v.view(v.size(0), -1) + + if self.fc is not None: + v = self.fc(v) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError("Unsupported loss: {}".format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def senet154(num_classes, loss='softmax', pretrained=True, **kwargs): + model = SENet( + num_classes=num_classes, + loss=loss, + block=SEBottleneck, + layers=[3, 8, 36, 3], + groups=64, + reduction=16, + dropout_p=0.2, + last_stride=2, + fc_dims=None, + **kwargs + ) + if pretrained: + model_url = pretrained_settings['senet154']['imagenet']['url'] + init_pretrained_weights(model, model_url) + return model + + +def se_resnet50(num_classes, loss='softmax', pretrained=True, **kwargs): + model = SENet( + num_classes=num_classes, + loss=loss, + block=SEResNetBottleneck, + layers=[3, 4, 6, 3], + groups=1, + reduction=16, + dropout_p=None, + inplanes=64, + input_3x3=False, + downsample_kernel_size=1, + downsample_padding=0, + last_stride=2, + fc_dims=None, + **kwargs + ) + if pretrained: + model_url = pretrained_settings['se_resnet50']['imagenet']['url'] + init_pretrained_weights(model, model_url) + return model + + +def se_resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs): + model = SENet( + num_classes=num_classes, + loss=loss, + block=SEResNetBottleneck, + layers=[3, 4, 6, 3], + groups=1, + reduction=16, + dropout_p=None, + inplanes=64, + input_3x3=False, + downsample_kernel_size=1, + downsample_padding=0, + last_stride=1, + fc_dims=[512], + **kwargs + ) + if pretrained: + model_url = pretrained_settings['se_resnet50']['imagenet']['url'] + init_pretrained_weights(model, model_url) + return model + + +def se_resnet101(num_classes, loss='softmax', pretrained=True, **kwargs): + model = SENet( + num_classes=num_classes, + loss=loss, + block=SEResNetBottleneck, + layers=[3, 4, 23, 3], + groups=1, + reduction=16, + dropout_p=None, + inplanes=64, + input_3x3=False, + downsample_kernel_size=1, + downsample_padding=0, + last_stride=2, + fc_dims=None, + **kwargs + ) + if pretrained: + model_url = pretrained_settings['se_resnet101']['imagenet']['url'] + init_pretrained_weights(model, model_url) + return model + + +def se_resnet152(num_classes, loss='softmax', pretrained=True, **kwargs): + model = SENet( + num_classes=num_classes, + loss=loss, + block=SEResNetBottleneck, + layers=[3, 8, 36, 3], + groups=1, + reduction=16, + dropout_p=None, + inplanes=64, + input_3x3=False, + downsample_kernel_size=1, + downsample_padding=0, + last_stride=2, + fc_dims=None, + **kwargs + ) + if pretrained: + model_url = pretrained_settings['se_resnet152']['imagenet']['url'] + init_pretrained_weights(model, model_url) + return model + + +def se_resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs): + model = SENet( + num_classes=num_classes, + loss=loss, + block=SEResNeXtBottleneck, + layers=[3, 4, 6, 3], + groups=32, + reduction=16, + dropout_p=None, + inplanes=64, + input_3x3=False, + downsample_kernel_size=1, + downsample_padding=0, + last_stride=2, + fc_dims=None, + **kwargs + ) + if pretrained: + model_url = pretrained_settings['se_resnext50_32x4d']['imagenet']['url' + ] + init_pretrained_weights(model, model_url) + return model + + +def se_resnext101_32x4d( + num_classes, loss='softmax', pretrained=True, **kwargs +): + model = SENet( + num_classes=num_classes, + loss=loss, + block=SEResNeXtBottleneck, + layers=[3, 4, 23, 3], + groups=32, + reduction=16, + dropout_p=None, + inplanes=64, + input_3x3=False, + downsample_kernel_size=1, + downsample_padding=0, + last_stride=2, + fc_dims=None, + **kwargs + ) + if pretrained: + model_url = pretrained_settings['se_resnext101_32x4d']['imagenet'][ + 'url'] + init_pretrained_weights(model, model_url) + return model diff --git a/feeder/trackers/strongsort/deep/models/shufflenet.py b/feeder/trackers/strongsort/deep/models/shufflenet.py new file mode 100644 index 0000000..bc4d34f --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/shufflenet.py @@ -0,0 +1,198 @@ +from __future__ import division, absolute_import +import torch +import torch.utils.model_zoo as model_zoo +from torch import nn +from torch.nn import functional as F + +__all__ = ['shufflenet'] + +model_urls = { + # training epoch = 90, top1 = 61.8 + 'imagenet': + 'https://mega.nz/#!RDpUlQCY!tr_5xBEkelzDjveIYBBcGcovNCOrgfiJO9kiidz9fZM', +} + + +class ChannelShuffle(nn.Module): + + def __init__(self, num_groups): + super(ChannelShuffle, self).__init__() + self.g = num_groups + + def forward(self, x): + b, c, h, w = x.size() + n = c // self.g + # reshape + x = x.view(b, self.g, n, h, w) + # transpose + x = x.permute(0, 2, 1, 3, 4).contiguous() + # flatten + x = x.view(b, c, h, w) + return x + + +class Bottleneck(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + stride, + num_groups, + group_conv1x1=True + ): + super(Bottleneck, self).__init__() + assert stride in [1, 2], 'Warning: stride must be either 1 or 2' + self.stride = stride + mid_channels = out_channels // 4 + if stride == 2: + out_channels -= in_channels + # group conv is not applied to first conv1x1 at stage 2 + num_groups_conv1x1 = num_groups if group_conv1x1 else 1 + self.conv1 = nn.Conv2d( + in_channels, + mid_channels, + 1, + groups=num_groups_conv1x1, + bias=False + ) + self.bn1 = nn.BatchNorm2d(mid_channels) + self.shuffle1 = ChannelShuffle(num_groups) + self.conv2 = nn.Conv2d( + mid_channels, + mid_channels, + 3, + stride=stride, + padding=1, + groups=mid_channels, + bias=False + ) + self.bn2 = nn.BatchNorm2d(mid_channels) + self.conv3 = nn.Conv2d( + mid_channels, out_channels, 1, groups=num_groups, bias=False + ) + self.bn3 = nn.BatchNorm2d(out_channels) + if stride == 2: + self.shortcut = nn.AvgPool2d(3, stride=2, padding=1) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.shuffle1(out) + out = self.bn2(self.conv2(out)) + out = self.bn3(self.conv3(out)) + if self.stride == 2: + res = self.shortcut(x) + out = F.relu(torch.cat([res, out], 1)) + else: + out = F.relu(x + out) + return out + + +# configuration of (num_groups: #out_channels) based on Table 1 in the paper +cfg = { + 1: [144, 288, 576], + 2: [200, 400, 800], + 3: [240, 480, 960], + 4: [272, 544, 1088], + 8: [384, 768, 1536], +} + + +class ShuffleNet(nn.Module): + """ShuffleNet. + + Reference: + Zhang et al. ShuffleNet: An Extremely Efficient Convolutional Neural + Network for Mobile Devices. CVPR 2018. + + Public keys: + - ``shufflenet``: ShuffleNet (groups=3). + """ + + def __init__(self, num_classes, loss='softmax', num_groups=3, **kwargs): + super(ShuffleNet, self).__init__() + self.loss = loss + + self.conv1 = nn.Sequential( + nn.Conv2d(3, 24, 3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(24), + nn.ReLU(), + nn.MaxPool2d(3, stride=2, padding=1), + ) + + self.stage2 = nn.Sequential( + Bottleneck( + 24, cfg[num_groups][0], 2, num_groups, group_conv1x1=False + ), + Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups), + Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups), + Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups), + ) + + self.stage3 = nn.Sequential( + Bottleneck(cfg[num_groups][0], cfg[num_groups][1], 2, num_groups), + Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), + Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), + Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), + Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), + Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), + Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), + Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), + ) + + self.stage4 = nn.Sequential( + Bottleneck(cfg[num_groups][1], cfg[num_groups][2], 2, num_groups), + Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups), + Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups), + Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups), + ) + + self.classifier = nn.Linear(cfg[num_groups][2], num_classes) + self.feat_dim = cfg[num_groups][2] + + def forward(self, x): + x = self.conv1(x) + x = self.stage2(x) + x = self.stage3(x) + x = self.stage4(x) + x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), -1) + + if not self.training: + return x + + y = self.classifier(x) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, x + else: + raise KeyError('Unsupported loss: {}'.format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def shufflenet(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ShuffleNet(num_classes, loss, **kwargs) + if pretrained: + # init_pretrained_weights(model, model_urls['imagenet']) + import warnings + warnings.warn( + 'The imagenet pretrained weights need to be manually downloaded from {}' + .format(model_urls['imagenet']) + ) + return model diff --git a/feeder/trackers/strongsort/deep/models/shufflenetv2.py b/feeder/trackers/strongsort/deep/models/shufflenetv2.py new file mode 100644 index 0000000..3ff879e --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/shufflenetv2.py @@ -0,0 +1,262 @@ +""" +Code source: https://github.com/pytorch/vision +""" +from __future__ import division, absolute_import +import torch +import torch.utils.model_zoo as model_zoo +from torch import nn + +__all__ = [ + 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', + 'shufflenet_v2_x2_0' +] + +model_urls = { + 'shufflenetv2_x0.5': + 'https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth', + 'shufflenetv2_x1.0': + 'https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth', + 'shufflenetv2_x1.5': None, + 'shufflenetv2_x2.0': None, +} + + +def channel_shuffle(x, groups): + batchsize, num_channels, height, width = x.data.size() + channels_per_group = num_channels // groups + + # reshape + x = x.view(batchsize, groups, channels_per_group, height, width) + + x = torch.transpose(x, 1, 2).contiguous() + + # flatten + x = x.view(batchsize, -1, height, width) + + return x + + +class InvertedResidual(nn.Module): + + def __init__(self, inp, oup, stride): + super(InvertedResidual, self).__init__() + + if not (1 <= stride <= 3): + raise ValueError('illegal stride value') + self.stride = stride + + branch_features = oup // 2 + assert (self.stride != 1) or (inp == branch_features << 1) + + if self.stride > 1: + self.branch1 = nn.Sequential( + self.depthwise_conv( + inp, inp, kernel_size=3, stride=self.stride, padding=1 + ), + nn.BatchNorm2d(inp), + nn.Conv2d( + inp, + branch_features, + kernel_size=1, + stride=1, + padding=0, + bias=False + ), + nn.BatchNorm2d(branch_features), + nn.ReLU(inplace=True), + ) + + self.branch2 = nn.Sequential( + nn.Conv2d( + inp if (self.stride > 1) else branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + bias=False + ), + nn.BatchNorm2d(branch_features), + nn.ReLU(inplace=True), + self.depthwise_conv( + branch_features, + branch_features, + kernel_size=3, + stride=self.stride, + padding=1 + ), + nn.BatchNorm2d(branch_features), + nn.Conv2d( + branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + bias=False + ), + nn.BatchNorm2d(branch_features), + nn.ReLU(inplace=True), + ) + + @staticmethod + def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False): + return nn.Conv2d( + i, o, kernel_size, stride, padding, bias=bias, groups=i + ) + + def forward(self, x): + if self.stride == 1: + x1, x2 = x.chunk(2, dim=1) + out = torch.cat((x1, self.branch2(x2)), dim=1) + else: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + + out = channel_shuffle(out, 2) + + return out + + +class ShuffleNetV2(nn.Module): + """ShuffleNetV2. + + Reference: + Ma et al. ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design. ECCV 2018. + + Public keys: + - ``shufflenet_v2_x0_5``: ShuffleNetV2 x0.5. + - ``shufflenet_v2_x1_0``: ShuffleNetV2 x1.0. + - ``shufflenet_v2_x1_5``: ShuffleNetV2 x1.5. + - ``shufflenet_v2_x2_0``: ShuffleNetV2 x2.0. + """ + + def __init__( + self, num_classes, loss, stages_repeats, stages_out_channels, **kwargs + ): + super(ShuffleNetV2, self).__init__() + self.loss = loss + + if len(stages_repeats) != 3: + raise ValueError( + 'expected stages_repeats as list of 3 positive ints' + ) + if len(stages_out_channels) != 5: + raise ValueError( + 'expected stages_out_channels as list of 5 positive ints' + ) + self._stage_out_channels = stages_out_channels + + input_channels = 3 + output_channels = self._stage_out_channels[0] + self.conv1 = nn.Sequential( + nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False), + nn.BatchNorm2d(output_channels), + nn.ReLU(inplace=True), + ) + input_channels = output_channels + + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + stage_names = ['stage{}'.format(i) for i in [2, 3, 4]] + for name, repeats, output_channels in zip( + stage_names, stages_repeats, self._stage_out_channels[1:] + ): + seq = [InvertedResidual(input_channels, output_channels, 2)] + for i in range(repeats - 1): + seq.append( + InvertedResidual(output_channels, output_channels, 1) + ) + setattr(self, name, nn.Sequential(*seq)) + input_channels = output_channels + + output_channels = self._stage_out_channels[-1] + self.conv5 = nn.Sequential( + nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False), + nn.BatchNorm2d(output_channels), + nn.ReLU(inplace=True), + ) + self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1)) + + self.classifier = nn.Linear(output_channels, num_classes) + + def featuremaps(self, x): + x = self.conv1(x) + x = self.maxpool(x) + x = self.stage2(x) + x = self.stage3(x) + x = self.stage4(x) + x = self.conv5(x) + return x + + def forward(self, x): + f = self.featuremaps(x) + v = self.global_avgpool(f) + v = v.view(v.size(0), -1) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError("Unsupported loss: {}".format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + if model_url is None: + import warnings + warnings.warn( + 'ImageNet pretrained weights are unavailable for this model' + ) + return + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def shufflenet_v2_x0_5(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ShuffleNetV2( + num_classes, loss, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['shufflenetv2_x0.5']) + return model + + +def shufflenet_v2_x1_0(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ShuffleNetV2( + num_classes, loss, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['shufflenetv2_x1.0']) + return model + + +def shufflenet_v2_x1_5(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ShuffleNetV2( + num_classes, loss, [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['shufflenetv2_x1.5']) + return model + + +def shufflenet_v2_x2_0(num_classes, loss='softmax', pretrained=True, **kwargs): + model = ShuffleNetV2( + num_classes, loss, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['shufflenetv2_x2.0']) + return model diff --git a/feeder/trackers/strongsort/deep/models/squeezenet.py b/feeder/trackers/strongsort/deep/models/squeezenet.py new file mode 100644 index 0000000..83e8dc9 --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/squeezenet.py @@ -0,0 +1,236 @@ +""" +Code source: https://github.com/pytorch/vision +""" +from __future__ import division, absolute_import +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['squeezenet1_0', 'squeezenet1_1', 'squeezenet1_0_fc512'] + +model_urls = { + 'squeezenet1_0': + 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth', + 'squeezenet1_1': + 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth', +} + + +class Fire(nn.Module): + + def __init__( + self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes + ): + super(Fire, self).__init__() + self.inplanes = inplanes + self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) + self.squeeze_activation = nn.ReLU(inplace=True) + self.expand1x1 = nn.Conv2d( + squeeze_planes, expand1x1_planes, kernel_size=1 + ) + self.expand1x1_activation = nn.ReLU(inplace=True) + self.expand3x3 = nn.Conv2d( + squeeze_planes, expand3x3_planes, kernel_size=3, padding=1 + ) + self.expand3x3_activation = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.squeeze_activation(self.squeeze(x)) + return torch.cat( + [ + self.expand1x1_activation(self.expand1x1(x)), + self.expand3x3_activation(self.expand3x3(x)) + ], 1 + ) + + +class SqueezeNet(nn.Module): + """SqueezeNet. + + Reference: + Iandola et al. SqueezeNet: AlexNet-level accuracy with 50x fewer parameters + and< 0.5 MB model size. arXiv:1602.07360. + + Public keys: + - ``squeezenet1_0``: SqueezeNet (version=1.0). + - ``squeezenet1_1``: SqueezeNet (version=1.1). + - ``squeezenet1_0_fc512``: SqueezeNet (version=1.0) + FC. + """ + + def __init__( + self, + num_classes, + loss, + version=1.0, + fc_dims=None, + dropout_p=None, + **kwargs + ): + super(SqueezeNet, self).__init__() + self.loss = loss + self.feature_dim = 512 + + if version not in [1.0, 1.1]: + raise ValueError( + 'Unsupported SqueezeNet version {version}:' + '1.0 or 1.1 expected'.format(version=version) + ) + + if version == 1.0: + self.features = nn.Sequential( + nn.Conv2d(3, 96, kernel_size=7, stride=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(96, 16, 64, 64), + Fire(128, 16, 64, 64), + Fire(128, 32, 128, 128), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(256, 32, 128, 128), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(512, 64, 256, 256), + ) + else: + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=3, stride=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(64, 16, 64, 64), + Fire(128, 16, 64, 64), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(128, 32, 128, 128), + Fire(256, 32, 128, 128), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + Fire(512, 64, 256, 256), + ) + + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = self._construct_fc_layer(fc_dims, 512, dropout_p) + self.classifier = nn.Linear(self.feature_dim, num_classes) + + self._init_params() + + def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): + """Constructs fully connected layer + + Args: + fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed + input_dim (int): input dimension + dropout_p (float): dropout probability, if None, dropout is unused + """ + if fc_dims is None: + self.feature_dim = input_dim + return None + + assert isinstance( + fc_dims, (list, tuple) + ), 'fc_dims must be either list or tuple, but got {}'.format( + type(fc_dims) + ) + + layers = [] + for dim in fc_dims: + layers.append(nn.Linear(input_dim, dim)) + layers.append(nn.BatchNorm1d(dim)) + layers.append(nn.ReLU(inplace=True)) + if dropout_p is not None: + layers.append(nn.Dropout(p=dropout_p)) + input_dim = dim + + self.feature_dim = fc_dims[-1] + + return nn.Sequential(*layers) + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu' + ) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + f = self.features(x) + v = self.global_avgpool(f) + v = v.view(v.size(0), -1) + + if self.fc is not None: + v = self.fc(v) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError('Unsupported loss: {}'.format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initializes model with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url, map_location=None) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def squeezenet1_0(num_classes, loss='softmax', pretrained=True, **kwargs): + model = SqueezeNet( + num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['squeezenet1_0']) + return model + + +def squeezenet1_0_fc512( + num_classes, loss='softmax', pretrained=True, **kwargs +): + model = SqueezeNet( + num_classes, + loss, + version=1.0, + fc_dims=[512], + dropout_p=None, + **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['squeezenet1_0']) + return model + + +def squeezenet1_1(num_classes, loss='softmax', pretrained=True, **kwargs): + model = SqueezeNet( + num_classes, loss, version=1.1, fc_dims=None, dropout_p=None, **kwargs + ) + if pretrained: + init_pretrained_weights(model, model_urls['squeezenet1_1']) + return model diff --git a/feeder/trackers/strongsort/deep/models/xception.py b/feeder/trackers/strongsort/deep/models/xception.py new file mode 100644 index 0000000..43db4ab --- /dev/null +++ b/feeder/trackers/strongsort/deep/models/xception.py @@ -0,0 +1,344 @@ +from __future__ import division, absolute_import +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as model_zoo + +__all__ = ['xception'] + +pretrained_settings = { + 'xception': { + 'imagenet': { + 'url': + 'http://data.lip6.fr/cadene/pretrainedmodels/xception-43020ad28.pth', + 'input_space': 'RGB', + 'input_size': [3, 299, 299], + 'input_range': [0, 1], + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'num_classes': 1000, + 'scale': + 0.8975 # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + } + } +} + + +class SeparableConv2d(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + dilation=1, + bias=False + ): + super(SeparableConv2d, self).__init__() + + self.conv1 = nn.Conv2d( + in_channels, + in_channels, + kernel_size, + stride, + padding, + dilation, + groups=in_channels, + bias=bias + ) + self.pointwise = nn.Conv2d( + in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias + ) + + def forward(self, x): + x = self.conv1(x) + x = self.pointwise(x) + return x + + +class Block(nn.Module): + + def __init__( + self, + in_filters, + out_filters, + reps, + strides=1, + start_with_relu=True, + grow_first=True + ): + super(Block, self).__init__() + + if out_filters != in_filters or strides != 1: + self.skip = nn.Conv2d( + in_filters, out_filters, 1, stride=strides, bias=False + ) + self.skipbn = nn.BatchNorm2d(out_filters) + else: + self.skip = None + + self.relu = nn.ReLU(inplace=True) + rep = [] + + filters = in_filters + if grow_first: + rep.append(self.relu) + rep.append( + SeparableConv2d( + in_filters, + out_filters, + 3, + stride=1, + padding=1, + bias=False + ) + ) + rep.append(nn.BatchNorm2d(out_filters)) + filters = out_filters + + for i in range(reps - 1): + rep.append(self.relu) + rep.append( + SeparableConv2d( + filters, filters, 3, stride=1, padding=1, bias=False + ) + ) + rep.append(nn.BatchNorm2d(filters)) + + if not grow_first: + rep.append(self.relu) + rep.append( + SeparableConv2d( + in_filters, + out_filters, + 3, + stride=1, + padding=1, + bias=False + ) + ) + rep.append(nn.BatchNorm2d(out_filters)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3, strides, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + + x += skip + return x + + +class Xception(nn.Module): + """Xception. + + Reference: + Chollet. Xception: Deep Learning with Depthwise + Separable Convolutions. CVPR 2017. + + Public keys: + - ``xception``: Xception. + """ + + def __init__( + self, num_classes, loss, fc_dims=None, dropout_p=None, **kwargs + ): + super(Xception, self).__init__() + self.loss = loss + + self.conv1 = nn.Conv2d(3, 32, 3, 2, 0, bias=False) + self.bn1 = nn.BatchNorm2d(32) + + self.conv2 = nn.Conv2d(32, 64, 3, bias=False) + self.bn2 = nn.BatchNorm2d(64) + + self.block1 = Block( + 64, 128, 2, 2, start_with_relu=False, grow_first=True + ) + self.block2 = Block( + 128, 256, 2, 2, start_with_relu=True, grow_first=True + ) + self.block3 = Block( + 256, 728, 2, 2, start_with_relu=True, grow_first=True + ) + + self.block4 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True + ) + self.block5 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True + ) + self.block6 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True + ) + self.block7 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True + ) + + self.block8 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True + ) + self.block9 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True + ) + self.block10 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True + ) + self.block11 = Block( + 728, 728, 3, 1, start_with_relu=True, grow_first=True + ) + + self.block12 = Block( + 728, 1024, 2, 2, start_with_relu=True, grow_first=False + ) + + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) + self.bn3 = nn.BatchNorm2d(1536) + + self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1) + self.bn4 = nn.BatchNorm2d(2048) + + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.feature_dim = 2048 + self.fc = self._construct_fc_layer(fc_dims, 2048, dropout_p) + self.classifier = nn.Linear(self.feature_dim, num_classes) + + self._init_params() + + def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): + """Constructs fully connected layer. + + Args: + fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed + input_dim (int): input dimension + dropout_p (float): dropout probability, if None, dropout is unused + """ + if fc_dims is None: + self.feature_dim = input_dim + return None + + assert isinstance( + fc_dims, (list, tuple) + ), 'fc_dims must be either list or tuple, but got {}'.format( + type(fc_dims) + ) + + layers = [] + for dim in fc_dims: + layers.append(nn.Linear(input_dim, dim)) + layers.append(nn.BatchNorm1d(dim)) + layers.append(nn.ReLU(inplace=True)) + if dropout_p is not None: + layers.append(nn.Dropout(p=dropout_p)) + input_dim = dim + + self.feature_dim = fc_dims[-1] + + return nn.Sequential(*layers) + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu' + ) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def featuremaps(self, input): + x = self.conv1(input) + x = self.bn1(x) + x = F.relu(x, inplace=True) + + x = self.conv2(x) + x = self.bn2(x) + x = F.relu(x, inplace=True) + + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + + x = self.conv3(x) + x = self.bn3(x) + x = F.relu(x, inplace=True) + + x = self.conv4(x) + x = self.bn4(x) + x = F.relu(x, inplace=True) + return x + + def forward(self, x): + f = self.featuremaps(x) + v = self.global_avgpool(f) + v = v.view(v.size(0), -1) + + if self.fc is not None: + v = self.fc(v) + + if not self.training: + return v + + y = self.classifier(v) + + if self.loss == 'softmax': + return y + elif self.loss == 'triplet': + return y, v + else: + raise KeyError('Unsupported loss: {}'.format(self.loss)) + + +def init_pretrained_weights(model, model_url): + """Initialize models with pretrained weights. + + Layers that don't match with pretrained layers in name or size are kept unchanged. + """ + pretrain_dict = model_zoo.load_url(model_url) + model_dict = model.state_dict() + pretrain_dict = { + k: v + for k, v in pretrain_dict.items() + if k in model_dict and model_dict[k].size() == v.size() + } + model_dict.update(pretrain_dict) + model.load_state_dict(model_dict) + + +def xception(num_classes, loss='softmax', pretrained=True, **kwargs): + model = Xception(num_classes, loss, fc_dims=None, dropout_p=None, **kwargs) + if pretrained: + model_url = pretrained_settings['xception']['imagenet']['url'] + init_pretrained_weights(model, model_url) + return model diff --git a/feeder/trackers/strongsort/deep/reid_model_factory.py b/feeder/trackers/strongsort/deep/reid_model_factory.py new file mode 100644 index 0000000..ed0542d --- /dev/null +++ b/feeder/trackers/strongsort/deep/reid_model_factory.py @@ -0,0 +1,215 @@ +import torch +from collections import OrderedDict + + + +__model_types = [ + 'resnet50', 'mlfn', 'hacnn', 'mobilenetv2_x1_0', 'mobilenetv2_x1_4', + 'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', + 'osnet_ibn_x1_0', 'osnet_ain_x1_0'] + +__trained_urls = { + + # market1501 models ######################################################## + 'resnet50_market1501.pt': + 'https://drive.google.com/uc?id=1dUUZ4rHDWohmsQXCRe2C_HbYkzz94iBV', + 'resnet50_dukemtmcreid.pt': + 'https://drive.google.com/uc?id=17ymnLglnc64NRvGOitY3BqMRS9UWd1wg', + 'resnet50_msmt17.pt': + 'https://drive.google.com/uc?id=1ep7RypVDOthCRIAqDnn4_N-UhkkFHJsj', + + 'resnet50_fc512_market1501.pt': + 'https://drive.google.com/uc?id=1kv8l5laX_YCdIGVCetjlNdzKIA3NvsSt', + 'resnet50_fc512_dukemtmcreid.pt': + 'https://drive.google.com/uc?id=13QN8Mp3XH81GK4BPGXobKHKyTGH50Rtx', + 'resnet50_fc512_msmt17.pt': + 'https://drive.google.com/uc?id=1fDJLcz4O5wxNSUvImIIjoaIF9u1Rwaud', + + 'mlfn_market1501.pt': + 'https://drive.google.com/uc?id=1wXcvhA_b1kpDfrt9s2Pma-MHxtj9pmvS', + 'mlfn_dukemtmcreid.pt': + 'https://drive.google.com/uc?id=1rExgrTNb0VCIcOnXfMsbwSUW1h2L1Bum', + 'mlfn_msmt17.pt': + 'https://drive.google.com/uc?id=18JzsZlJb3Wm7irCbZbZ07TN4IFKvR6p-', + + 'hacnn_market1501.pt': + 'https://drive.google.com/uc?id=1LRKIQduThwGxMDQMiVkTScBwR7WidmYF', + 'hacnn_dukemtmcreid.pt': + 'https://drive.google.com/uc?id=1zNm6tP4ozFUCUQ7Sv1Z98EAJWXJEhtYH', + 'hacnn_msmt17.pt': + 'https://drive.google.com/uc?id=1MsKRtPM5WJ3_Tk2xC0aGOO7pM3VaFDNZ', + + 'mobilenetv2_x1_0_market1501.pt': + 'https://drive.google.com/uc?id=18DgHC2ZJkjekVoqBWszD8_Xiikz-fewp', + 'mobilenetv2_x1_0_dukemtmcreid.pt': + 'https://drive.google.com/uc?id=1q1WU2FETRJ3BXcpVtfJUuqq4z3psetds', + 'mobilenetv2_x1_0_msmt17.pt': + 'https://drive.google.com/uc?id=1j50Hv14NOUAg7ZeB3frzfX-WYLi7SrhZ', + + 'mobilenetv2_x1_4_market1501.pt': + 'https://drive.google.com/uc?id=1t6JCqphJG-fwwPVkRLmGGyEBhGOf2GO5', + 'mobilenetv2_x1_4_dukemtmcreid.pt': + 'https://drive.google.com/uc?id=12uD5FeVqLg9-AFDju2L7SQxjmPb4zpBN', + 'mobilenetv2_x1_4_msmt17.pt': + 'https://drive.google.com/uc?id=1ZY5P2Zgm-3RbDpbXM0kIBMPvspeNIbXz', + + 'osnet_x1_0_market1501.pt': + 'https://drive.google.com/uc?id=1vduhq5DpN2q1g4fYEZfPI17MJeh9qyrA', + 'osnet_x1_0_dukemtmcreid.pt': + 'https://drive.google.com/uc?id=1QZO_4sNf4hdOKKKzKc-TZU9WW1v6zQbq', + 'osnet_x1_0_msmt17.pt': + 'https://drive.google.com/uc?id=112EMUfBPYeYg70w-syK6V6Mx8-Qb9Q1M', + + 'osnet_x0_75_market1501.pt': + 'https://drive.google.com/uc?id=1ozRaDSQw_EQ8_93OUmjDbvLXw9TnfPer', + 'osnet_x0_75_dukemtmcreid.pt': + 'https://drive.google.com/uc?id=1IE3KRaTPp4OUa6PGTFL_d5_KQSJbP0Or', + 'osnet_x0_75_msmt17.pt': + 'https://drive.google.com/uc?id=1QEGO6WnJ-BmUzVPd3q9NoaO_GsPNlmWc', + + 'osnet_x0_5_market1501.pt': + 'https://drive.google.com/uc?id=1PLB9rgqrUM7blWrg4QlprCuPT7ILYGKT', + 'osnet_x0_5_dukemtmcreid.pt': + 'https://drive.google.com/uc?id=1KoUVqmiST175hnkALg9XuTi1oYpqcyTu', + 'osnet_x0_5_msmt17.pt': + 'https://drive.google.com/uc?id=1UT3AxIaDvS2PdxzZmbkLmjtiqq7AIKCv', + + 'osnet_x0_25_market1501.pt': + 'https://drive.google.com/uc?id=1z1UghYvOTtjx7kEoRfmqSMu-z62J6MAj', + 'osnet_x0_25_dukemtmcreid.pt': + 'https://drive.google.com/uc?id=1eumrtiXT4NOspjyEV4j8cHmlOaaCGk5l', + 'osnet_x0_25_msmt17.pt': + 'https://drive.google.com/uc?id=1sSwXSUlj4_tHZequ_iZ8w_Jh0VaRQMqF', + + ####### market1501 models ################################################## + 'resnet50_msmt17.pt': + 'https://drive.google.com/uc?id=1yiBteqgIZoOeywE8AhGmEQl7FTVwrQmf', + 'osnet_x1_0_msmt17.pt': + 'https://drive.google.com/uc?id=1IosIFlLiulGIjwW3H8uMRmx3MzPwf86x', + 'osnet_x0_75_msmt17.pt': + 'https://drive.google.com/uc?id=1fhjSS_7SUGCioIf2SWXaRGPqIY9j7-uw', + + 'osnet_x0_5_msmt17.pt': + 'https://drive.google.com/uc?id=1DHgmb6XV4fwG3n-CnCM0zdL9nMsZ9_RF', + 'osnet_x0_25_msmt17.pt': + 'https://drive.google.com/uc?id=1Kkx2zW89jq_NETu4u42CFZTMVD5Hwm6e', + 'osnet_ibn_x1_0_msmt17.pt': + 'https://drive.google.com/uc?id=1q3Sj2ii34NlfxA4LvmHdWO_75NDRmECJ', + 'osnet_ain_x1_0_msmt17.pt': + 'https://drive.google.com/uc?id=1SigwBE6mPdqiJMqhuIY4aqC7--5CsMal', +} + + +def show_downloadeable_models(): + print('\nAvailable .pt ReID models for automatic download') + print(list(__trained_urls.keys())) + + +def get_model_url(model): + if model.name in __trained_urls: + return __trained_urls[model.name] + else: + None + + +def is_model_in_model_types(model): + if model.name in __model_types: + return True + else: + return False + + +def get_model_name(model): + for x in __model_types: + if x in model.name: + return x + return None + + +def download_url(url, dst): + """Downloads file from a url to a destination. + + Args: + url (str): url to download file. + dst (str): destination path. + """ + from six.moves import urllib + print('* url="{}"'.format(url)) + print('* destination="{}"'.format(dst)) + + def _reporthook(count, block_size, total_size): + global start_time + if count == 0: + start_time = time.time() + return + duration = time.time() - start_time + progress_size = int(count * block_size) + speed = int(progress_size / (1024*duration)) + percent = int(count * block_size * 100 / total_size) + sys.stdout.write( + '\r...%d%%, %d MB, %d KB/s, %d seconds passed' % + (percent, progress_size / (1024*1024), speed, duration) + ) + sys.stdout.flush() + + urllib.request.urlretrieve(url, dst, _reporthook) + sys.stdout.write('\n') + + +def load_pretrained_weights(model, weight_path): + r"""Loads pretrianed weights to model. + + Features:: + - Incompatible layers (unmatched in name or size) will be ignored. + - Can automatically deal with keys containing "module.". + + Args: + model (nn.Module): network model. + weight_path (str): path to pretrained weights. + + Examples:: + >>> from torchreid.utils import load_pretrained_weights + >>> weight_path = 'log/my_model/model-best.pth.tar' + >>> load_pretrained_weights(model, weight_path) + """ + checkpoint = torch.load(weight_path) + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + model_dict = model.state_dict() + new_state_dict = OrderedDict() + matched_layers, discarded_layers = [], [] + + for k, v in state_dict.items(): + if k.startswith('module.'): + k = k[7:] # discard module. + + if k in model_dict and model_dict[k].size() == v.size(): + new_state_dict[k] = v + matched_layers.append(k) + else: + discarded_layers.append(k) + + model_dict.update(new_state_dict) + model.load_state_dict(model_dict) + + if len(matched_layers) == 0: + warnings.warn( + 'The pretrained weights "{}" cannot be loaded, ' + 'please check the key names manually ' + '(** ignored and continue **)'.format(weight_path) + ) + else: + print( + 'Successfully loaded pretrained weights from "{}"'. + format(weight_path) + ) + if len(discarded_layers) > 0: + print( + '** The following layers are discarded ' + 'due to unmatched keys or layer size: {}'. + format(discarded_layers) + ) + diff --git a/feeder/trackers/strongsort/reid_multibackend.py b/feeder/trackers/strongsort/reid_multibackend.py new file mode 100644 index 0000000..58d2fbb --- /dev/null +++ b/feeder/trackers/strongsort/reid_multibackend.py @@ -0,0 +1,237 @@ +import torch.nn as nn +import torch +from pathlib import Path +import numpy as np +from itertools import islice +import torchvision.transforms as transforms +import cv2 +import sys +import torchvision.transforms as T +from collections import OrderedDict, namedtuple +import gdown +from os.path import exists as file_exists + + +from ultralytics.yolo.utils.checks import check_requirements, check_version +from ultralytics.yolo.utils import LOGGER +from trackers.strongsort.deep.reid_model_factory import (show_downloadeable_models, get_model_url, get_model_name, + download_url, load_pretrained_weights) +from trackers.strongsort.deep.models import build_model + + +def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): + # Check file(s) for acceptable suffix + if file and suffix: + if isinstance(suffix, str): + suffix = [suffix] + for f in file if isinstance(file, (list, tuple)) else [file]: + s = Path(f).suffix.lower() # file suffix + if len(s): + assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + + +class ReIDDetectMultiBackend(nn.Module): + # ReID models MultiBackend class for python inference on various backends + def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'), fp16=False): + super().__init__() + + w = weights[0] if isinstance(weights, list) else weights + self.pt, self.jit, self.onnx, self.xml, self.engine, self.tflite = self.model_type(w) # get backend + self.fp16 = fp16 + self.fp16 &= self.pt or self.jit or self.engine # FP16 + + # Build transform functions + self.device = device + self.image_size=(256, 128) + self.pixel_mean=[0.485, 0.456, 0.406] + self.pixel_std=[0.229, 0.224, 0.225] + self.transforms = [] + self.transforms += [T.Resize(self.image_size)] + self.transforms += [T.ToTensor()] + self.transforms += [T.Normalize(mean=self.pixel_mean, std=self.pixel_std)] + self.preprocess = T.Compose(self.transforms) + self.to_pil = T.ToPILImage() + + model_name = get_model_name(w) + + if w.suffix == '.pt': + model_url = get_model_url(w) + if not file_exists(w) and model_url is not None: + gdown.download(model_url, str(w), quiet=False) + elif file_exists(w): + pass + else: + print(f'No URL associated to the chosen StrongSORT weights ({w}). Choose between:') + show_downloadeable_models() + exit() + + # Build model + self.model = build_model( + model_name, + num_classes=1, + pretrained=not (w and w.is_file()), + use_gpu=device + ) + + if self.pt: # PyTorch + # populate model arch with weights + if w and w.is_file() and w.suffix == '.pt': + load_pretrained_weights(self.model, w) + + self.model.to(device).eval() + self.model.half() if self.fp16 else self.model.float() + elif self.jit: + LOGGER.info(f'Loading {w} for TorchScript inference...') + self.model = torch.jit.load(w) + self.model.half() if self.fp16 else self.model.float() + elif self.onnx: # ONNX Runtime + LOGGER.info(f'Loading {w} for ONNX Runtime inference...') + cuda = torch.cuda.is_available() and device.type != 'cpu' + #check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) + import onnxruntime + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + self.session = onnxruntime.InferenceSession(str(w), providers=providers) + elif self.engine: # TensorRT + LOGGER.info(f'Loading {w} for TensorRT inference...') + import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 + if device.type == 'cpu': + device = torch.device('cuda:0') + Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + logger = trt.Logger(trt.Logger.INFO) + with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + self.model_ = runtime.deserialize_cuda_engine(f.read()) + self.context = self.model_.create_execution_context() + self.bindings = OrderedDict() + self.fp16 = False # default updated below + dynamic = False + for index in range(self.model_.num_bindings): + name = self.model_.get_binding_name(index) + dtype = trt.nptype(self.model_.get_binding_dtype(index)) + if self.model_.binding_is_input(index): + if -1 in tuple(self.model_.get_binding_shape(index)): # dynamic + dynamic = True + self.context.set_binding_shape(index, tuple(self.model_.get_profile_shape(0, index)[2])) + if dtype == np.float16: + self.fp16 = True + shape = tuple(self.context.get_binding_shape(index)) + im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) + self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) + self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items()) + batch_size = self.bindings['images'].shape[0] # if dynamic, this is instead max batch size + elif self.xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + from openvino.runtime import Core, Layout, get_batch + ie = Core() + if not Path(w).is_file(): # if not *.xml + w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir + network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) + if network.get_parameters()[0].get_layout().empty: + network.get_parameters()[0].set_layout(Layout("NCWH")) + batch_dim = get_batch(network) + if batch_dim.is_static: + batch_size = batch_dim.get_length() + self.executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 + self.output_layer = next(iter(self.executable_network.outputs)) + + elif self.tflite: + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu + from tflite_runtime.interpreter import Interpreter, load_delegate + except ImportError: + import tensorflow as tf + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, + self.interpreter = tf.lite.Interpreter(model_path=w) + self.interpreter.allocate_tensors() + # Get input and output tensors. + self.input_details = self.interpreter.get_input_details() + self.output_details = self.interpreter.get_output_details() + + # Test model on random input data. + input_data = np.array(np.random.random_sample((1,256,128,3)), dtype=np.float32) + self.interpreter.set_tensor(self.input_details[0]['index'], input_data) + + self.interpreter.invoke() + + # The function `get_tensor()` returns a copy of the tensor data. + output_data = self.interpreter.get_tensor(self.output_details[0]['index']) + else: + print('This model framework is not supported yet!') + exit() + + + @staticmethod + def model_type(p='path/to/model.pt'): + # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx + from trackers.reid_export import export_formats + sf = list(export_formats().Suffix) # export suffixes + check_suffix(p, sf) # checks + types = [s in Path(p).name for s in sf] + return types + + def _preprocess(self, im_batch): + + images = [] + for element in im_batch: + image = self.to_pil(element) + image = self.preprocess(image) + images.append(image) + + images = torch.stack(images, dim=0) + images = images.to(self.device) + + return images + + + def forward(self, im_batch): + + # preprocess batch + im_batch = self._preprocess(im_batch) + + # batch to half + if self.fp16 and im_batch.dtype != torch.float16: + im_batch = im_batch.half() + + # batch processing + features = [] + if self.pt: + features = self.model(im_batch) + elif self.jit: # TorchScript + features = self.model(im_batch) + elif self.onnx: # ONNX Runtime + im_batch = im_batch.cpu().numpy() # torch to numpy + features = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im_batch})[0] + elif self.engine: # TensorRT + if True and im_batch.shape != self.bindings['images'].shape: + i_in, i_out = (self.model_.get_binding_index(x) for x in ('images', 'output')) + self.context.set_binding_shape(i_in, im_batch.shape) # reshape if dynamic + self.bindings['images'] = self.bindings['images']._replace(shape=im_batch.shape) + self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) + s = self.bindings['images'].shape + assert im_batch.shape == s, f"input size {im_batch.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" + self.binding_addrs['images'] = int(im_batch.data_ptr()) + self.context.execute_v2(list(self.binding_addrs.values())) + features = self.bindings['output'].data + elif self.xml: # OpenVINO + im_batch = im_batch.cpu().numpy() # FP32 + features = self.executable_network([im_batch])[self.output_layer] + else: + print('Framework not supported at the moment, we are working on it...') + exit() + + if isinstance(features, (list, tuple)): + return self.from_numpy(features[0]) if len(features) == 1 else [self.from_numpy(x) for x in features] + else: + return self.from_numpy(features) + + def from_numpy(self, x): + return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x + + def warmup(self, imgsz=[(256, 128, 3)]): + # Warmup model by running inference once + warmup_types = self.pt, self.jit, self.onnx, self.engine, self.tflite + if any(warmup_types) and self.device.type != 'cpu': + im = [np.empty(*imgsz).astype(np.uint8)] # input + for _ in range(2 if self.jit else 1): # + self.forward(im) # warmup \ No newline at end of file diff --git a/feeder/trackers/strongsort/results/output_04.gif b/feeder/trackers/strongsort/results/output_04.gif new file mode 100644 index 0000000..d379b76 Binary files /dev/null and b/feeder/trackers/strongsort/results/output_04.gif differ diff --git a/feeder/trackers/strongsort/results/output_th025.gif b/feeder/trackers/strongsort/results/output_th025.gif new file mode 100644 index 0000000..fb5e7c1 Binary files /dev/null and b/feeder/trackers/strongsort/results/output_th025.gif differ diff --git a/feeder/trackers/strongsort/results/track_all_1280_025conf.gif b/feeder/trackers/strongsort/results/track_all_1280_025conf.gif new file mode 100644 index 0000000..f0b5718 Binary files /dev/null and b/feeder/trackers/strongsort/results/track_all_1280_025conf.gif differ diff --git a/feeder/trackers/strongsort/results/track_all_seg_1280_025conf.gif b/feeder/trackers/strongsort/results/track_all_seg_1280_025conf.gif new file mode 100644 index 0000000..d58476d Binary files /dev/null and b/feeder/trackers/strongsort/results/track_all_seg_1280_025conf.gif differ diff --git a/feeder/trackers/strongsort/results/track_pedestrians_1280_05conf.gif b/feeder/trackers/strongsort/results/track_pedestrians_1280_05conf.gif new file mode 100644 index 0000000..81ea91b Binary files /dev/null and b/feeder/trackers/strongsort/results/track_pedestrians_1280_05conf.gif differ diff --git a/feeder/trackers/strongsort/sort/__init__.py b/feeder/trackers/strongsort/sort/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/feeder/trackers/strongsort/sort/detection.py b/feeder/trackers/strongsort/sort/detection.py new file mode 100644 index 0000000..1fb05f8 --- /dev/null +++ b/feeder/trackers/strongsort/sort/detection.py @@ -0,0 +1,58 @@ +# vim: expandtab:ts=4:sw=4 +import numpy as np + + +class Detection(object): + """ + This class represents a bounding box detection in a single image. + + Parameters + ---------- + tlwh : array_like + Bounding box in format `(x, y, w, h)`. + confidence : float + Detector confidence score. + feature : array_like + A feature vector that describes the object contained in this image. + + Attributes + ---------- + tlwh : ndarray + Bounding box in format `(top left x, top left y, width, height)`. + confidence : ndarray + Detector confidence score. + feature : ndarray | NoneType + A feature vector that describes the object contained in this image. + + """ + + def __init__(self, tlwh, confidence, feature): + self.tlwh = np.asarray(tlwh, dtype=np.float32) + self.confidence = float(confidence) + self.feature = np.asarray(feature.cpu(), dtype=np.float32) + + def to_tlbr(self): + """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., + `(top left, bottom right)`. + """ + ret = self.tlwh.copy() + ret[2:] += ret[:2] + return ret + + def to_xyah(self): + """Convert bounding box to format `(center x, center y, aspect ratio, + height)`, where the aspect ratio is `width / height`. + """ + ret = self.tlwh.copy() + ret[:2] += ret[2:] / 2 + ret[2] /= ret[3] + return ret + +def to_xyah_ext(bbox): + """Convert bounding box to format `(center x, center y, aspect ratio, + height)`, where the aspect ratio is `width / height`. + """ + ret = bbox.copy() + ret[:2] += ret[2:] / 2 + ret[2] /= ret[3] + return ret diff --git a/feeder/trackers/strongsort/sort/iou_matching.py b/feeder/trackers/strongsort/sort/iou_matching.py new file mode 100644 index 0000000..62d5a3f --- /dev/null +++ b/feeder/trackers/strongsort/sort/iou_matching.py @@ -0,0 +1,82 @@ +# vim: expandtab:ts=4:sw=4 +from __future__ import absolute_import +import numpy as np +from . import linear_assignment + + +def iou(bbox, candidates): + """Computer intersection over union. + + Parameters + ---------- + bbox : ndarray + A bounding box in format `(top left x, top left y, width, height)`. + candidates : ndarray + A matrix of candidate bounding boxes (one per row) in the same format + as `bbox`. + + Returns + ------- + ndarray + The intersection over union in [0, 1] between the `bbox` and each + candidate. A higher score means a larger fraction of the `bbox` is + occluded by the candidate. + + """ + bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:] + candidates_tl = candidates[:, :2] + candidates_br = candidates[:, :2] + candidates[:, 2:] + + tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis], + np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]] + br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis], + np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]] + wh = np.maximum(0., br - tl) + + area_intersection = wh.prod(axis=1) + area_bbox = bbox[2:].prod() + area_candidates = candidates[:, 2:].prod(axis=1) + return area_intersection / (area_bbox + area_candidates - area_intersection) + + +def iou_cost(tracks, detections, track_indices=None, + detection_indices=None): + """An intersection over union distance metric. + + Parameters + ---------- + tracks : List[deep_sort.track.Track] + A list of tracks. + detections : List[deep_sort.detection.Detection] + A list of detections. + track_indices : Optional[List[int]] + A list of indices to tracks that should be matched. Defaults to + all `tracks`. + detection_indices : Optional[List[int]] + A list of indices to detections that should be matched. Defaults + to all `detections`. + + Returns + ------- + ndarray + Returns a cost matrix of shape + len(track_indices), len(detection_indices) where entry (i, j) is + `1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`. + + """ + if track_indices is None: + track_indices = np.arange(len(tracks)) + if detection_indices is None: + detection_indices = np.arange(len(detections)) + + cost_matrix = np.zeros((len(track_indices), len(detection_indices))) + for row, track_idx in enumerate(track_indices): + if tracks[track_idx].time_since_update > 1: + cost_matrix[row, :] = linear_assignment.INFTY_COST + continue + + bbox = tracks[track_idx].to_tlwh() + candidates = np.asarray( + [detections[i].tlwh for i in detection_indices]) + cost_matrix[row, :] = 1. - iou(bbox, candidates) + return cost_matrix diff --git a/feeder/trackers/strongsort/sort/kalman_filter.py b/feeder/trackers/strongsort/sort/kalman_filter.py new file mode 100644 index 0000000..87c48d7 --- /dev/null +++ b/feeder/trackers/strongsort/sort/kalman_filter.py @@ -0,0 +1,214 @@ +# vim: expandtab:ts=4:sw=4 +import numpy as np +import scipy.linalg +""" +Table for the 0.95 quantile of the chi-square distribution with N degrees of +freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv +function and used as Mahalanobis gating threshold. +""" +chi2inv95 = { + 1: 3.8415, + 2: 5.9915, + 3: 7.8147, + 4: 9.4877, + 5: 11.070, + 6: 12.592, + 7: 14.067, + 8: 15.507, + 9: 16.919} + + +class KalmanFilter(object): + """ + A simple Kalman filter for tracking bounding boxes in image space. + The 8-dimensional state space + x, y, a, h, vx, vy, va, vh + contains the bounding box center position (x, y), aspect ratio a, height h, + and their respective velocities. + Object motion follows a constant velocity model. The bounding box location + (x, y, a, h) is taken as direct observation of the state space (linear + observation model). + """ + + def __init__(self): + ndim, dt = 4, 1. + + # Create Kalman filter model matrices. + self._motion_mat = np.eye(2 * ndim, 2 * ndim) + for i in range(ndim): + self._motion_mat[i, ndim + i] = dt + + self._update_mat = np.eye(ndim, 2 * ndim) + + # Motion and observation uncertainty are chosen relative to the current + # state estimate. These weights control the amount of uncertainty in + # the model. This is a bit hacky. + self._std_weight_position = 1. / 20 + self._std_weight_velocity = 1. / 160 + + def initiate(self, measurement): + """Create track from unassociated measurement. + Parameters + ---------- + measurement : ndarray + Bounding box coordinates (x, y, a, h) with center position (x, y), + aspect ratio a, and height h. + Returns + ------- + (ndarray, ndarray) + Returns the mean vector (8 dimensional) and covariance matrix (8x8 + dimensional) of the new track. Unobserved velocities are initialized + to 0 mean. + """ + mean_pos = measurement + mean_vel = np.zeros_like(mean_pos) + mean = np.r_[mean_pos, mean_vel] + + std = [ + 2 * self._std_weight_position * measurement[0], # the center point x + 2 * self._std_weight_position * measurement[1], # the center point y + 1 * measurement[2], # the ratio of width/height + 2 * self._std_weight_position * measurement[3], # the height + 10 * self._std_weight_velocity * measurement[0], + 10 * self._std_weight_velocity * measurement[1], + 0.1 * measurement[2], + 10 * self._std_weight_velocity * measurement[3]] + covariance = np.diag(np.square(std)) + return mean, covariance + + def predict(self, mean, covariance): + """Run Kalman filter prediction step. + Parameters + ---------- + mean : ndarray + The 8 dimensional mean vector of the object state at the previous + time step. + covariance : ndarray + The 8x8 dimensional covariance matrix of the object state at the + previous time step. + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + """ + std_pos = [ + self._std_weight_position * mean[0], + self._std_weight_position * mean[1], + 1 * mean[2], + self._std_weight_position * mean[3]] + std_vel = [ + self._std_weight_velocity * mean[0], + self._std_weight_velocity * mean[1], + 0.1 * mean[2], + self._std_weight_velocity * mean[3]] + motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) + + mean = np.dot(self._motion_mat, mean) + covariance = np.linalg.multi_dot(( + self._motion_mat, covariance, self._motion_mat.T)) + motion_cov + + return mean, covariance + + def project(self, mean, covariance, confidence=.0): + """Project state distribution to measurement space. + Parameters + ---------- + mean : ndarray + The state's mean vector (8 dimensional array). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + confidence: (dyh) 检测框置信度 + Returns + ------- + (ndarray, ndarray) + Returns the projected mean and covariance matrix of the given state + estimate. + """ + std = [ + self._std_weight_position * mean[3], + self._std_weight_position * mean[3], + 1e-1, + self._std_weight_position * mean[3]] + + + std = [(1 - confidence) * x for x in std] + + innovation_cov = np.diag(np.square(std)) + + mean = np.dot(self._update_mat, mean) + covariance = np.linalg.multi_dot(( + self._update_mat, covariance, self._update_mat.T)) + return mean, covariance + innovation_cov + + def update(self, mean, covariance, measurement, confidence=.0): + """Run Kalman filter correction step. + Parameters + ---------- + mean : ndarray + The predicted state's mean vector (8 dimensional). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + measurement : ndarray + The 4 dimensional measurement vector (x, y, a, h), where (x, y) + is the center position, a the aspect ratio, and h the height of the + bounding box. + confidence: (dyh)检测框置信度 + Returns + ------- + (ndarray, ndarray) + Returns the measurement-corrected state distribution. + """ + projected_mean, projected_cov = self.project(mean, covariance, confidence) + + chol_factor, lower = scipy.linalg.cho_factor( + projected_cov, lower=True, check_finite=False) + kalman_gain = scipy.linalg.cho_solve( + (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, + check_finite=False).T + innovation = measurement - projected_mean + + new_mean = mean + np.dot(innovation, kalman_gain.T) + new_covariance = covariance - np.linalg.multi_dot(( + kalman_gain, projected_cov, kalman_gain.T)) + return new_mean, new_covariance + + def gating_distance(self, mean, covariance, measurements, + only_position=False): + """Compute gating distance between state distribution and measurements. + A suitable distance threshold can be obtained from `chi2inv95`. If + `only_position` is False, the chi-square distribution has 4 degrees of + freedom, otherwise 2. + Parameters + ---------- + mean : ndarray + Mean vector over the state distribution (8 dimensional). + covariance : ndarray + Covariance of the state distribution (8x8 dimensional). + measurements : ndarray + An Nx4 dimensional matrix of N measurements, each in + format (x, y, a, h) where (x, y) is the bounding box center + position, a the aspect ratio, and h the height. + only_position : Optional[bool] + If True, distance computation is done with respect to the bounding + box center position only. + Returns + ------- + ndarray + Returns an array of length N, where the i-th element contains the + squared Mahalanobis distance between (mean, covariance) and + `measurements[i]`. + """ + mean, covariance = self.project(mean, covariance) + + if only_position: + mean, covariance = mean[:2], covariance[:2, :2] + measurements = measurements[:, :2] + + cholesky_factor = np.linalg.cholesky(covariance) + d = measurements - mean + z = scipy.linalg.solve_triangular( + cholesky_factor, d.T, lower=True, check_finite=False, + overwrite_b=True) + squared_maha = np.sum(z * z, axis=0) + return squared_maha \ No newline at end of file diff --git a/feeder/trackers/strongsort/sort/linear_assignment.py b/feeder/trackers/strongsort/sort/linear_assignment.py new file mode 100644 index 0000000..9ab92c5 --- /dev/null +++ b/feeder/trackers/strongsort/sort/linear_assignment.py @@ -0,0 +1,174 @@ +# vim: expandtab:ts=4:sw=4 +from __future__ import absolute_import +import numpy as np +from scipy.optimize import linear_sum_assignment +from . import kalman_filter + + +INFTY_COST = 1e+5 + + +def min_cost_matching( + distance_metric, max_distance, tracks, detections, track_indices=None, + detection_indices=None): + """Solve linear assignment problem. + Parameters + ---------- + distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray + The distance metric is given a list of tracks and detections as well as + a list of N track indices and M detection indices. The metric should + return the NxM dimensional cost matrix, where element (i, j) is the + association cost between the i-th track in the given track indices and + the j-th detection in the given detection_indices. + max_distance : float + Gating threshold. Associations with cost larger than this value are + disregarded. + tracks : List[track.Track] + A list of predicted tracks at the current time step. + detections : List[detection.Detection] + A list of detections at the current time step. + track_indices : List[int] + List of track indices that maps rows in `cost_matrix` to tracks in + `tracks` (see description above). + detection_indices : List[int] + List of detection indices that maps columns in `cost_matrix` to + detections in `detections` (see description above). + Returns + ------- + (List[(int, int)], List[int], List[int]) + Returns a tuple with the following three entries: + * A list of matched track and detection indices. + * A list of unmatched track indices. + * A list of unmatched detection indices. + """ + if track_indices is None: + track_indices = np.arange(len(tracks)) + if detection_indices is None: + detection_indices = np.arange(len(detections)) + + if len(detection_indices) == 0 or len(track_indices) == 0: + return [], track_indices, detection_indices # Nothing to match. + + cost_matrix = distance_metric( + tracks, detections, track_indices, detection_indices) + cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5 + row_indices, col_indices = linear_sum_assignment(cost_matrix) + + matches, unmatched_tracks, unmatched_detections = [], [], [] + for col, detection_idx in enumerate(detection_indices): + if col not in col_indices: + unmatched_detections.append(detection_idx) + for row, track_idx in enumerate(track_indices): + if row not in row_indices: + unmatched_tracks.append(track_idx) + for row, col in zip(row_indices, col_indices): + track_idx = track_indices[row] + detection_idx = detection_indices[col] + if cost_matrix[row, col] > max_distance: + unmatched_tracks.append(track_idx) + unmatched_detections.append(detection_idx) + else: + matches.append((track_idx, detection_idx)) + return matches, unmatched_tracks, unmatched_detections + + +def matching_cascade( + distance_metric, max_distance, cascade_depth, tracks, detections, + track_indices=None, detection_indices=None): + """Run matching cascade. + Parameters + ---------- + distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray + The distance metric is given a list of tracks and detections as well as + a list of N track indices and M detection indices. The metric should + return the NxM dimensional cost matrix, where element (i, j) is the + association cost between the i-th track in the given track indices and + the j-th detection in the given detection indices. + max_distance : float + Gating threshold. Associations with cost larger than this value are + disregarded. + cascade_depth: int + The cascade depth, should be se to the maximum track age. + tracks : List[track.Track] + A list of predicted tracks at the current time step. + detections : List[detection.Detection] + A list of detections at the current time step. + track_indices : Optional[List[int]] + List of track indices that maps rows in `cost_matrix` to tracks in + `tracks` (see description above). Defaults to all tracks. + detection_indices : Optional[List[int]] + List of detection indices that maps columns in `cost_matrix` to + detections in `detections` (see description above). Defaults to all + detections. + Returns + ------- + (List[(int, int)], List[int], List[int]) + Returns a tuple with the following three entries: + * A list of matched track and detection indices. + * A list of unmatched track indices. + * A list of unmatched detection indices. + """ + if track_indices is None: + track_indices = list(range(len(tracks))) + if detection_indices is None: + detection_indices = list(range(len(detections))) + + unmatched_detections = detection_indices + matches = [] + track_indices_l = [ + k for k in track_indices + # if tracks[k].time_since_update == 1 + level + ] + matches_l, _, unmatched_detections = \ + min_cost_matching( + distance_metric, max_distance, tracks, detections, + track_indices_l, unmatched_detections) + matches += matches_l + unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches)) + return matches, unmatched_tracks, unmatched_detections + + +def gate_cost_matrix( + cost_matrix, tracks, detections, track_indices, detection_indices, mc_lambda, + gated_cost=INFTY_COST, only_position=False): + """Invalidate infeasible entries in cost matrix based on the state + distributions obtained by Kalman filtering. + Parameters + ---------- + kf : The Kalman filter. + cost_matrix : ndarray + The NxM dimensional cost matrix, where N is the number of track indices + and M is the number of detection indices, such that entry (i, j) is the + association cost between `tracks[track_indices[i]]` and + `detections[detection_indices[j]]`. + tracks : List[track.Track] + A list of predicted tracks at the current time step. + detections : List[detection.Detection] + A list of detections at the current time step. + track_indices : List[int] + List of track indices that maps rows in `cost_matrix` to tracks in + `tracks` (see description above). + detection_indices : List[int] + List of detection indices that maps columns in `cost_matrix` to + detections in `detections` (see description above). + gated_cost : Optional[float] + Entries in the cost matrix corresponding to infeasible associations are + set this value. Defaults to a very large value. + only_position : Optional[bool] + If True, only the x, y position of the state distribution is considered + during gating. Defaults to False. + Returns + ------- + ndarray + Returns the modified cost matrix. + """ + gating_dim = 2 if only_position else 4 + gating_threshold = kalman_filter.chi2inv95[gating_dim] + measurements = np.asarray( + [detections[i].to_xyah() for i in detection_indices]) + for row, track_idx in enumerate(track_indices): + track = tracks[track_idx] + gating_distance = track.kf.gating_distance(track.mean, track.covariance, measurements, only_position) + cost_matrix[row, gating_distance > gating_threshold] = gated_cost + cost_matrix[row] = mc_lambda * cost_matrix[row] + (1 - mc_lambda) * gating_distance + return cost_matrix diff --git a/feeder/trackers/strongsort/sort/nn_matching.py b/feeder/trackers/strongsort/sort/nn_matching.py new file mode 100644 index 0000000..154f854 --- /dev/null +++ b/feeder/trackers/strongsort/sort/nn_matching.py @@ -0,0 +1,162 @@ +# vim: expandtab:ts=4:sw=4 +import numpy as np +import sys +import torch + + +def _pdist(a, b): + """Compute pair-wise squared distance between points in `a` and `b`. + Parameters + ---------- + a : array_like + An NxM matrix of N samples of dimensionality M. + b : array_like + An LxM matrix of L samples of dimensionality M. + Returns + ------- + ndarray + Returns a matrix of size len(a), len(b) such that eleement (i, j) + contains the squared distance between `a[i]` and `b[j]`. + """ + a, b = np.asarray(a), np.asarray(b) + if len(a) == 0 or len(b) == 0: + return np.zeros((len(a), len(b))) + a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1) + r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :] + r2 = np.clip(r2, 0., float(np.inf)) + return r2 + + +def _cosine_distance(a, b, data_is_normalized=False): + """Compute pair-wise cosine distance between points in `a` and `b`. + Parameters + ---------- + a : array_like + An NxM matrix of N samples of dimensionality M. + b : array_like + An LxM matrix of L samples of dimensionality M. + data_is_normalized : Optional[bool] + If True, assumes rows in a and b are unit length vectors. + Otherwise, a and b are explicitly normalized to lenght 1. + Returns + ------- + ndarray + Returns a matrix of size len(a), len(b) such that eleement (i, j) + contains the squared distance between `a[i]` and `b[j]`. + """ + if not data_is_normalized: + a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True) + b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True) + return 1. - np.dot(a, b.T) + + +def _nn_euclidean_distance(x, y): + """ Helper function for nearest neighbor distance metric (Euclidean). + Parameters + ---------- + x : ndarray + A matrix of N row-vectors (sample points). + y : ndarray + A matrix of M row-vectors (query points). + Returns + ------- + ndarray + A vector of length M that contains for each entry in `y` the + smallest Euclidean distance to a sample in `x`. + """ + # x_ = torch.from_numpy(np.asarray(x) / np.linalg.norm(x, axis=1, keepdims=True)) + # y_ = torch.from_numpy(np.asarray(y) / np.linalg.norm(y, axis=1, keepdims=True)) + distances = distances = _pdist(x, y) + return np.maximum(0.0, torch.min(distances, axis=0)[0].numpy()) + + +def _nn_cosine_distance(x, y): + """ Helper function for nearest neighbor distance metric (cosine). + Parameters + ---------- + x : ndarray + A matrix of N row-vectors (sample points). + y : ndarray + A matrix of M row-vectors (query points). + Returns + ------- + ndarray + A vector of length M that contains for each entry in `y` the + smallest cosine distance to a sample in `x`. + """ + x_ = torch.from_numpy(np.asarray(x)) + y_ = torch.from_numpy(np.asarray(y)) + distances = _cosine_distance(x_, y_) + distances = distances + return distances.min(axis=0) + + +class NearestNeighborDistanceMetric(object): + """ + A nearest neighbor distance metric that, for each target, returns + the closest distance to any sample that has been observed so far. + Parameters + ---------- + metric : str + Either "euclidean" or "cosine". + matching_threshold: float + The matching threshold. Samples with larger distance are considered an + invalid match. + budget : Optional[int] + If not None, fix samples per class to at most this number. Removes + the oldest samples when the budget is reached. + Attributes + ---------- + samples : Dict[int -> List[ndarray]] + A dictionary that maps from target identities to the list of samples + that have been observed so far. + """ + + def __init__(self, metric, matching_threshold, budget=None): + if metric == "euclidean": + self._metric = _nn_euclidean_distance + elif metric == "cosine": + self._metric = _nn_cosine_distance + else: + raise ValueError( + "Invalid metric; must be either 'euclidean' or 'cosine'") + self.matching_threshold = matching_threshold + self.budget = budget + self.samples = {} + + def partial_fit(self, features, targets, active_targets): + """Update the distance metric with new data. + Parameters + ---------- + features : ndarray + An NxM matrix of N features of dimensionality M. + targets : ndarray + An integer array of associated target identities. + active_targets : List[int] + A list of targets that are currently present in the scene. + """ + for feature, target in zip(features, targets): + self.samples.setdefault(target, []).append(feature) + if self.budget is not None: + self.samples[target] = self.samples[target][-self.budget:] + self.samples = {k: self.samples[k] for k in active_targets} + + def distance(self, features, targets): + """Compute distance between features and targets. + Parameters + ---------- + features : ndarray + An NxM matrix of N features of dimensionality M. + targets : List[int] + A list of targets to match the given `features` against. + Returns + ------- + ndarray + Returns a cost matrix of shape len(targets), len(features), where + element (i, j) contains the closest squared distance between + `targets[i]` and `features[j]`. + """ + cost_matrix = np.zeros((len(targets), len(features))) + for i, target in enumerate(targets): + cost_matrix[i, :] = self._metric(self.samples[target], features) + return cost_matrix \ No newline at end of file diff --git a/feeder/trackers/strongsort/sort/preprocessing.py b/feeder/trackers/strongsort/sort/preprocessing.py new file mode 100644 index 0000000..5493b12 --- /dev/null +++ b/feeder/trackers/strongsort/sort/preprocessing.py @@ -0,0 +1,73 @@ +# vim: expandtab:ts=4:sw=4 +import numpy as np +import cv2 + + +def non_max_suppression(boxes, max_bbox_overlap, scores=None): + """Suppress overlapping detections. + + Original code from [1]_ has been adapted to include confidence score. + + .. [1] http://www.pyimagesearch.com/2015/02/16/ + faster-non-maximum-suppression-python/ + + Examples + -------- + + >>> boxes = [d.roi for d in detections] + >>> scores = [d.confidence for d in detections] + >>> indices = non_max_suppression(boxes, max_bbox_overlap, scores) + >>> detections = [detections[i] for i in indices] + + Parameters + ---------- + boxes : ndarray + Array of ROIs (x, y, width, height). + max_bbox_overlap : float + ROIs that overlap more than this values are suppressed. + scores : Optional[array_like] + Detector confidence score. + + Returns + ------- + List[int] + Returns indices of detections that have survived non-maxima suppression. + + """ + if len(boxes) == 0: + return [] + + boxes = boxes.astype(np.float) + pick = [] + + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + boxes[:, 0] + y2 = boxes[:, 3] + boxes[:, 1] + + area = (x2 - x1 + 1) * (y2 - y1 + 1) + if scores is not None: + idxs = np.argsort(scores) + else: + idxs = np.argsort(y2) + + while len(idxs) > 0: + last = len(idxs) - 1 + i = idxs[last] + pick.append(i) + + xx1 = np.maximum(x1[i], x1[idxs[:last]]) + yy1 = np.maximum(y1[i], y1[idxs[:last]]) + xx2 = np.minimum(x2[i], x2[idxs[:last]]) + yy2 = np.minimum(y2[i], y2[idxs[:last]]) + + w = np.maximum(0, xx2 - xx1 + 1) + h = np.maximum(0, yy2 - yy1 + 1) + + overlap = (w * h) / area[idxs[:last]] + + idxs = np.delete( + idxs, np.concatenate( + ([last], np.where(overlap > max_bbox_overlap)[0]))) + + return pick diff --git a/feeder/trackers/strongsort/sort/track.py b/feeder/trackers/strongsort/sort/track.py new file mode 100644 index 0000000..bb6773f --- /dev/null +++ b/feeder/trackers/strongsort/sort/track.py @@ -0,0 +1,317 @@ +# vim: expandtab:ts=4:sw=4 +import cv2 +import numpy as np +from trackers.strongsort.sort.kalman_filter import KalmanFilter +from collections import deque + + +class TrackState: + """ + Enumeration type for the single target track state. Newly created tracks are + classified as `tentative` until enough evidence has been collected. Then, + the track state is changed to `confirmed`. Tracks that are no longer alive + are classified as `deleted` to mark them for removal from the set of active + tracks. + + """ + + Tentative = 1 + Confirmed = 2 + Deleted = 3 + + +class Track: + """ + A single target track with state space `(x, y, a, h)` and associated + velocities, where `(x, y)` is the center of the bounding box, `a` is the + aspect ratio and `h` is the height. + + Parameters + ---------- + mean : ndarray + Mean vector of the initial state distribution. + covariance : ndarray + Covariance matrix of the initial state distribution. + track_id : int + A unique track identifier. + n_init : int + Number of consecutive detections before the track is confirmed. The + track state is set to `Deleted` if a miss occurs within the first + `n_init` frames. + max_age : int + The maximum number of consecutive misses before the track state is + set to `Deleted`. + feature : Optional[ndarray] + Feature vector of the detection this track originates from. If not None, + this feature is added to the `features` cache. + + Attributes + ---------- + mean : ndarray + Mean vector of the initial state distribution. + covariance : ndarray + Covariance matrix of the initial state distribution. + track_id : int + A unique track identifier. + hits : int + Total number of measurement updates. + age : int + Total number of frames since first occurance. + time_since_update : int + Total number of frames since last measurement update. + state : TrackState + The current track state. + features : List[ndarray] + A cache of features. On each measurement update, the associated feature + vector is added to this list. + + """ + + def __init__(self, detection, track_id, class_id, conf, n_init, max_age, ema_alpha, + feature=None): + self.track_id = track_id + self.class_id = int(class_id) + self.hits = 1 + self.age = 1 + self.time_since_update = 0 + self.max_num_updates_wo_assignment = 7 + self.updates_wo_assignment = 0 + self.ema_alpha = ema_alpha + + self.state = TrackState.Tentative + self.features = [] + if feature is not None: + feature /= np.linalg.norm(feature) + self.features.append(feature) + + self.conf = conf + self._n_init = n_init + self._max_age = max_age + + self.kf = KalmanFilter() + self.mean, self.covariance = self.kf.initiate(detection) + + # Initializing trajectory queue + self.q = deque(maxlen=25) + + def to_tlwh(self): + """Get current position in bounding box format `(top left x, top left y, + width, height)`. + + Returns + ------- + ndarray + The bounding box. + + """ + ret = self.mean[:4].copy() + ret[2] *= ret[3] + ret[:2] -= ret[2:] / 2 + return ret + + def to_tlbr(self): + """Get kf estimated current position in bounding box format `(min x, miny, max x, + max y)`. + + Returns + ------- + ndarray + The predicted kf bounding box. + + """ + ret = self.to_tlwh() + ret[2:] = ret[:2] + ret[2:] + return ret + + + def ECC(self, src, dst, warp_mode = cv2.MOTION_EUCLIDEAN, eps = 1e-5, + max_iter = 100, scale = 0.1, align = False): + """Compute the warp matrix from src to dst. + Parameters + ---------- + src : ndarray + An NxM matrix of source img(BGR or Gray), it must be the same format as dst. + dst : ndarray + An NxM matrix of target img(BGR or Gray). + warp_mode: flags of opencv + translation: cv2.MOTION_TRANSLATION + rotated and shifted: cv2.MOTION_EUCLIDEAN + affine(shift,rotated,shear): cv2.MOTION_AFFINE + homography(3d): cv2.MOTION_HOMOGRAPHY + eps: float + the threshold of the increment in the correlation coefficient between two iterations + max_iter: int + the number of iterations. + scale: float or [int, int] + scale_ratio: float + scale_size: [W, H] + align: bool + whether to warp affine or perspective transforms to the source image + Returns + ------- + warp matrix : ndarray + Returns the warp matrix from src to dst. + if motion models is homography, the warp matrix will be 3x3, otherwise 2x3 + src_aligned: ndarray + aligned source image of gray + """ + + # BGR2GRAY + if src.ndim == 3: + # Convert images to grayscale + src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) + dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY) + + # make the imgs smaller to speed up + if scale is not None: + if isinstance(scale, float) or isinstance(scale, int): + if scale != 1: + src_r = cv2.resize(src, (0, 0), fx = scale, fy = scale,interpolation = cv2.INTER_LINEAR) + dst_r = cv2.resize(dst, (0, 0), fx = scale, fy = scale,interpolation = cv2.INTER_LINEAR) + scale = [scale, scale] + else: + src_r, dst_r = src, dst + scale = None + else: + if scale[0] != src.shape[1] and scale[1] != src.shape[0]: + src_r = cv2.resize(src, (scale[0], scale[1]), interpolation = cv2.INTER_LINEAR) + dst_r = cv2.resize(dst, (scale[0], scale[1]), interpolation=cv2.INTER_LINEAR) + scale = [scale[0] / src.shape[1], scale[1] / src.shape[0]] + else: + src_r, dst_r = src, dst + scale = None + else: + src_r, dst_r = src, dst + + # Define 2x3 or 3x3 matrices and initialize the matrix to identity + if warp_mode == cv2.MOTION_HOMOGRAPHY : + warp_matrix = np.eye(3, 3, dtype=np.float32) + else : + warp_matrix = np.eye(2, 3, dtype=np.float32) + + # Define termination criteria + criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, max_iter, eps) + + # Run the ECC algorithm. The results are stored in warp_matrix. + try: + (cc, warp_matrix) = cv2.findTransformECC (src_r, dst_r, warp_matrix, warp_mode, criteria, None, 1) + except cv2.error as e: + print('ecc transform failed') + return None, None + + if scale is not None: + warp_matrix[0, 2] = warp_matrix[0, 2] / scale[0] + warp_matrix[1, 2] = warp_matrix[1, 2] / scale[1] + + if align: + sz = src.shape + if warp_mode == cv2.MOTION_HOMOGRAPHY: + # Use warpPerspective for Homography + src_aligned = cv2.warpPerspective(src, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR) + else : + # Use warpAffine for Translation, Euclidean and Affine + src_aligned = cv2.warpAffine(src, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR) + return warp_matrix, src_aligned + else: + return warp_matrix, None + + + def get_matrix(self, matrix): + eye = np.eye(3) + dist = np.linalg.norm(eye - matrix) + if dist < 100: + return matrix + else: + return eye + + def camera_update(self, previous_frame, next_frame): + warp_matrix, src_aligned = self.ECC(previous_frame, next_frame) + if warp_matrix is None and src_aligned is None: + return + [a,b] = warp_matrix + warp_matrix=np.array([a,b,[0,0,1]]) + warp_matrix = warp_matrix.tolist() + matrix = self.get_matrix(warp_matrix) + + x1, y1, x2, y2 = self.to_tlbr() + x1_, y1_, _ = matrix @ np.array([x1, y1, 1]).T + x2_, y2_, _ = matrix @ np.array([x2, y2, 1]).T + w, h = x2_ - x1_, y2_ - y1_ + cx, cy = x1_ + w / 2, y1_ + h / 2 + self.mean[:4] = [cx, cy, w / h, h] + + + def increment_age(self): + self.age += 1 + self.time_since_update += 1 + + def predict(self, kf): + """Propagate the state distribution to the current time step using a + Kalman filter prediction step. + + Parameters + ---------- + kf : kalman_filter.KalmanFilter + The Kalman filter. + + """ + self.mean, self.covariance = self.kf.predict(self.mean, self.covariance) + self.age += 1 + self.time_since_update += 1 + + def update_kf(self, bbox, confidence=0.5): + self.updates_wo_assignment = self.updates_wo_assignment + 1 + self.mean, self.covariance = self.kf.update(self.mean, self.covariance, bbox, confidence) + tlbr = self.to_tlbr() + x_c = int((tlbr[0] + tlbr[2]) / 2) + y_c = int((tlbr[1] + tlbr[3]) / 2) + self.q.append(('predupdate', (x_c, y_c))) + + def update(self, detection, class_id, conf): + """Perform Kalman filter measurement update step and update the feature + cache. + Parameters + ---------- + detection : Detection + The associated detection. + """ + self.conf = conf + self.class_id = class_id.int() + self.mean, self.covariance = self.kf.update(self.mean, self.covariance, detection.to_xyah(), detection.confidence) + + feature = detection.feature / np.linalg.norm(detection.feature) + + smooth_feat = self.ema_alpha * self.features[-1] + (1 - self.ema_alpha) * feature + smooth_feat /= np.linalg.norm(smooth_feat) + self.features = [smooth_feat] + + self.hits += 1 + self.time_since_update = 0 + if self.state == TrackState.Tentative and self.hits >= self._n_init: + self.state = TrackState.Confirmed + + tlbr = self.to_tlbr() + x_c = int((tlbr[0] + tlbr[2]) / 2) + y_c = int((tlbr[1] + tlbr[3]) / 2) + self.q.append(('observationupdate', (x_c, y_c))) + + def mark_missed(self): + """Mark this track as missed (no association at the current time step). + """ + if self.state == TrackState.Tentative: + self.state = TrackState.Deleted + elif self.time_since_update > self._max_age: + self.state = TrackState.Deleted + + def is_tentative(self): + """Returns True if this track is tentative (unconfirmed). + """ + return self.state == TrackState.Tentative + + def is_confirmed(self): + """Returns True if this track is confirmed.""" + return self.state == TrackState.Confirmed + + def is_deleted(self): + """Returns True if this track is dead and should be deleted.""" + return self.state == TrackState.Deleted diff --git a/feeder/trackers/strongsort/sort/tracker.py b/feeder/trackers/strongsort/sort/tracker.py new file mode 100644 index 0000000..d889277 --- /dev/null +++ b/feeder/trackers/strongsort/sort/tracker.py @@ -0,0 +1,192 @@ +# vim: expandtab:ts=4:sw=4 +from __future__ import absolute_import +import numpy as np +from . import kalman_filter +from . import linear_assignment +from . import iou_matching +from . import detection +from .track import Track + + +class Tracker: + """ + This is the multi-target tracker. + Parameters + ---------- + metric : nn_matching.NearestNeighborDistanceMetric + A distance metric for measurement-to-track association. + max_age : int + Maximum number of missed misses before a track is deleted. + n_init : int + Number of consecutive detections before the track is confirmed. The + track state is set to `Deleted` if a miss occurs within the first + `n_init` frames. + Attributes + ---------- + metric : nn_matching.NearestNeighborDistanceMetric + The distance metric used for measurement to track association. + max_age : int + Maximum number of missed misses before a track is deleted. + n_init : int + Number of frames that a track remains in initialization phase. + kf : kalman_filter.KalmanFilter + A Kalman filter to filter target trajectories in image space. + tracks : List[Track] + The list of active tracks at the current time step. + """ + GATING_THRESHOLD = np.sqrt(kalman_filter.chi2inv95[4]) + + def __init__(self, metric, max_iou_dist=0.9, max_age=30, max_unmatched_preds=7, n_init=3, _lambda=0, ema_alpha=0.9, mc_lambda=0.995): + self.metric = metric + self.max_iou_dist = max_iou_dist + self.max_age = max_age + self.n_init = n_init + self._lambda = _lambda + self.ema_alpha = ema_alpha + self.mc_lambda = mc_lambda + self.max_unmatched_preds = max_unmatched_preds + + self.kf = kalman_filter.KalmanFilter() + self.tracks = [] + self._next_id = 1 + + def predict(self): + """Propagate track state distributions one time step forward. + + This function should be called once every time step, before `update`. + """ + for track in self.tracks: + track.predict(self.kf) + + def increment_ages(self): + for track in self.tracks: + track.increment_age() + track.mark_missed() + + def camera_update(self, previous_img, current_img): + for track in self.tracks: + track.camera_update(previous_img, current_img) + + def pred_n_update_all_tracks(self): + """Perform predictions and updates for all tracks by its own predicted state. + + """ + self.predict() + for t in self.tracks: + if self.max_unmatched_preds != 0 and t.updates_wo_assignment < t.max_num_updates_wo_assignment: + bbox = t.to_tlwh() + t.update_kf(detection.to_xyah_ext(bbox)) + + def update(self, detections, classes, confidences): + """Perform measurement update and track management. + + Parameters + ---------- + detections : List[deep_sort.detection.Detection] + A list of detections at the current time step. + + """ + # Run matching cascade. + matches, unmatched_tracks, unmatched_detections = \ + self._match(detections) + + # Update track set. + for track_idx, detection_idx in matches: + self.tracks[track_idx].update( + detections[detection_idx], classes[detection_idx], confidences[detection_idx]) + for track_idx in unmatched_tracks: + self.tracks[track_idx].mark_missed() + if self.max_unmatched_preds != 0 and self.tracks[track_idx].updates_wo_assignment < self.tracks[track_idx].max_num_updates_wo_assignment: + bbox = self.tracks[track_idx].to_tlwh() + self.tracks[track_idx].update_kf(detection.to_xyah_ext(bbox)) + for detection_idx in unmatched_detections: + self._initiate_track(detections[detection_idx], classes[detection_idx].item(), confidences[detection_idx].item()) + self.tracks = [t for t in self.tracks if not t.is_deleted()] + + # Update distance metric. + active_targets = [t.track_id for t in self.tracks if t.is_confirmed()] + features, targets = [], [] + for track in self.tracks: + if not track.is_confirmed(): + continue + features += track.features + targets += [track.track_id for _ in track.features] + self.metric.partial_fit(np.asarray(features), np.asarray(targets), active_targets) + + def _full_cost_metric(self, tracks, dets, track_indices, detection_indices): + """ + This implements the full lambda-based cost-metric. However, in doing so, it disregards + the possibility to gate the position only which is provided by + linear_assignment.gate_cost_matrix(). Instead, I gate by everything. + Note that the Mahalanobis distance is itself an unnormalised metric. Given the cosine + distance being normalised, we employ a quick and dirty normalisation based on the + threshold: that is, we divide the positional-cost by the gating threshold, thus ensuring + that the valid values range 0-1. + Note also that the authors work with the squared distance. I also sqrt this, so that it + is more intuitive in terms of values. + """ + # Compute First the Position-based Cost Matrix + pos_cost = np.empty([len(track_indices), len(detection_indices)]) + msrs = np.asarray([dets[i].to_xyah() for i in detection_indices]) + for row, track_idx in enumerate(track_indices): + pos_cost[row, :] = np.sqrt( + self.kf.gating_distance( + tracks[track_idx].mean, tracks[track_idx].covariance, msrs, False + ) + ) / self.GATING_THRESHOLD + pos_gate = pos_cost > 1.0 + # Now Compute the Appearance-based Cost Matrix + app_cost = self.metric.distance( + np.array([dets[i].feature for i in detection_indices]), + np.array([tracks[i].track_id for i in track_indices]), + ) + app_gate = app_cost > self.metric.matching_threshold + # Now combine and threshold + cost_matrix = self._lambda * pos_cost + (1 - self._lambda) * app_cost + cost_matrix[np.logical_or(pos_gate, app_gate)] = linear_assignment.INFTY_COST + # Return Matrix + return cost_matrix + + def _match(self, detections): + + def gated_metric(tracks, dets, track_indices, detection_indices): + features = np.array([dets[i].feature for i in detection_indices]) + targets = np.array([tracks[i].track_id for i in track_indices]) + cost_matrix = self.metric.distance(features, targets) + cost_matrix = linear_assignment.gate_cost_matrix(cost_matrix, tracks, dets, track_indices, detection_indices, self.mc_lambda) + + return cost_matrix + + # Split track set into confirmed and unconfirmed tracks. + confirmed_tracks = [ + i for i, t in enumerate(self.tracks) if t.is_confirmed()] + unconfirmed_tracks = [ + i for i, t in enumerate(self.tracks) if not t.is_confirmed()] + + # Associate confirmed tracks using appearance features. + matches_a, unmatched_tracks_a, unmatched_detections = \ + linear_assignment.matching_cascade( + gated_metric, self.metric.matching_threshold, self.max_age, + self.tracks, detections, confirmed_tracks) + + # Associate remaining tracks together with unconfirmed tracks using IOU. + iou_track_candidates = unconfirmed_tracks + [ + k for k in unmatched_tracks_a if + self.tracks[k].time_since_update == 1] + unmatched_tracks_a = [ + k for k in unmatched_tracks_a if + self.tracks[k].time_since_update != 1] + matches_b, unmatched_tracks_b, unmatched_detections = \ + linear_assignment.min_cost_matching( + iou_matching.iou_cost, self.max_iou_dist, self.tracks, + detections, iou_track_candidates, unmatched_detections) + + matches = matches_a + matches_b + unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b)) + return matches, unmatched_tracks, unmatched_detections + + def _initiate_track(self, detection, class_id, conf): + self.tracks.append(Track( + detection.to_xyah(), self._next_id, class_id, conf, self.n_init, self.max_age, self.ema_alpha, + detection.feature)) + self._next_id += 1 diff --git a/feeder/trackers/strongsort/strong_sort.py b/feeder/trackers/strongsort/strong_sort.py new file mode 100644 index 0000000..352d2c1 --- /dev/null +++ b/feeder/trackers/strongsort/strong_sort.py @@ -0,0 +1,151 @@ +import numpy as np +import torch +import sys +import cv2 +import gdown +from os.path import exists as file_exists, join +import torchvision.transforms as transforms + +from sort.nn_matching import NearestNeighborDistanceMetric +from sort.detection import Detection +from sort.tracker import Tracker + +from reid_multibackend import ReIDDetectMultiBackend + +from ultralytics.yolo.utils.ops import xyxy2xywh + + +class StrongSORT(object): + def __init__(self, + model_weights, + device, + fp16, + max_dist=0.2, + max_iou_dist=0.7, + max_age=70, + max_unmatched_preds=7, + n_init=3, + nn_budget=100, + mc_lambda=0.995, + ema_alpha=0.9 + ): + + self.model = ReIDDetectMultiBackend(weights=model_weights, device=device, fp16=fp16) + + self.max_dist = max_dist + metric = NearestNeighborDistanceMetric( + "cosine", self.max_dist, nn_budget) + self.tracker = Tracker( + metric, max_iou_dist=max_iou_dist, max_age=max_age, n_init=n_init, max_unmatched_preds=max_unmatched_preds, mc_lambda=mc_lambda, ema_alpha=ema_alpha) + + def update(self, dets, ori_img): + + xyxys = dets[:, 0:4] + confs = dets[:, 4] + clss = dets[:, 5] + + classes = clss.numpy() + xywhs = xyxy2xywh(xyxys.numpy()) + confs = confs.numpy() + self.height, self.width = ori_img.shape[:2] + + # generate detections + features = self._get_features(xywhs, ori_img) + bbox_tlwh = self._xywh_to_tlwh(xywhs) + detections = [Detection(bbox_tlwh[i], conf, features[i]) for i, conf in enumerate( + confs)] + + # run on non-maximum supression + boxes = np.array([d.tlwh for d in detections]) + scores = np.array([d.confidence for d in detections]) + + # update tracker + self.tracker.predict() + self.tracker.update(detections, clss, confs) + + # output bbox identities + outputs = [] + for track in self.tracker.tracks: + if not track.is_confirmed() or track.time_since_update > 1: + continue + + box = track.to_tlwh() + x1, y1, x2, y2 = self._tlwh_to_xyxy(box) + + track_id = track.track_id + class_id = track.class_id + conf = track.conf + queue = track.q + outputs.append(np.array([x1, y1, x2, y2, track_id, class_id, conf, queue], dtype=object)) + if len(outputs) > 0: + outputs = np.stack(outputs, axis=0) + return outputs + + """ + TODO: + Convert bbox from xc_yc_w_h to xtl_ytl_w_h + Thanks JieChen91@github.com for reporting this bug! + """ + @staticmethod + def _xywh_to_tlwh(bbox_xywh): + if isinstance(bbox_xywh, np.ndarray): + bbox_tlwh = bbox_xywh.copy() + elif isinstance(bbox_xywh, torch.Tensor): + bbox_tlwh = bbox_xywh.clone() + bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2. + bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2. + return bbox_tlwh + + def _xywh_to_xyxy(self, bbox_xywh): + x, y, w, h = bbox_xywh + x1 = max(int(x - w / 2), 0) + x2 = min(int(x + w / 2), self.width - 1) + y1 = max(int(y - h / 2), 0) + y2 = min(int(y + h / 2), self.height - 1) + return x1, y1, x2, y2 + + def _tlwh_to_xyxy(self, bbox_tlwh): + """ + TODO: + Convert bbox from xtl_ytl_w_h to xc_yc_w_h + Thanks JieChen91@github.com for reporting this bug! + """ + x, y, w, h = bbox_tlwh + x1 = max(int(x), 0) + x2 = min(int(x+w), self.width - 1) + y1 = max(int(y), 0) + y2 = min(int(y+h), self.height - 1) + return x1, y1, x2, y2 + + def increment_ages(self): + self.tracker.increment_ages() + + def _xyxy_to_tlwh(self, bbox_xyxy): + x1, y1, x2, y2 = bbox_xyxy + + t = x1 + l = y1 + w = int(x2 - x1) + h = int(y2 - y1) + return t, l, w, h + + def _get_features(self, bbox_xywh, ori_img): + im_crops = [] + for box in bbox_xywh: + x1, y1, x2, y2 = self._xywh_to_xyxy(box) + im = ori_img[y1:y2, x1:x2] + im_crops.append(im) + if im_crops: + features = self.model(im_crops) + else: + features = np.array([]) + return features + + def trajectory(self, im0, q, color): + # Add rectangle to image (PIL-only) + for i, p in enumerate(q): + thickness = int(np.sqrt(float (i + 1)) * 1.5) + if p[0] == 'observationupdate': + cv2.circle(im0, p[1], 2, color=color, thickness=thickness) + else: + cv2.circle(im0, p[1], 2, color=(255,255,255), thickness=thickness) diff --git a/feeder/trackers/strongsort/utils/__init__.py b/feeder/trackers/strongsort/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/feeder/trackers/strongsort/utils/asserts.py b/feeder/trackers/strongsort/utils/asserts.py new file mode 100644 index 0000000..59a73cc --- /dev/null +++ b/feeder/trackers/strongsort/utils/asserts.py @@ -0,0 +1,13 @@ +from os import environ + + +def assert_in(file, files_to_check): + if file not in files_to_check: + raise AssertionError("{} does not exist in the list".format(str(file))) + return True + + +def assert_in_env(check_list: list): + for item in check_list: + assert_in(item, environ.keys()) + return True diff --git a/feeder/trackers/strongsort/utils/draw.py b/feeder/trackers/strongsort/utils/draw.py new file mode 100644 index 0000000..bc7cb53 --- /dev/null +++ b/feeder/trackers/strongsort/utils/draw.py @@ -0,0 +1,36 @@ +import numpy as np +import cv2 + +palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1) + + +def compute_color_for_labels(label): + """ + Simple function that adds fixed color depending on the class + """ + color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette] + return tuple(color) + + +def draw_boxes(img, bbox, identities=None, offset=(0,0)): + for i,box in enumerate(bbox): + x1,y1,x2,y2 = [int(i) for i in box] + x1 += offset[0] + x2 += offset[0] + y1 += offset[1] + y2 += offset[1] + # box text and bar + id = int(identities[i]) if identities is not None else 0 + color = compute_color_for_labels(id) + label = '{}{:d}'.format("", id) + t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0] + cv2.rectangle(img,(x1, y1),(x2,y2),color,3) + cv2.rectangle(img,(x1, y1),(x1+t_size[0]+3,y1+t_size[1]+4), color,-1) + cv2.putText(img,label,(x1,y1+t_size[1]+4), cv2.FONT_HERSHEY_PLAIN, 2, [255,255,255], 2) + return img + + + +if __name__ == '__main__': + for i in range(82): + print(compute_color_for_labels(i)) diff --git a/feeder/trackers/strongsort/utils/evaluation.py b/feeder/trackers/strongsort/utils/evaluation.py new file mode 100644 index 0000000..1001794 --- /dev/null +++ b/feeder/trackers/strongsort/utils/evaluation.py @@ -0,0 +1,103 @@ +import os +import numpy as np +import copy +import motmetrics as mm +mm.lap.default_solver = 'lap' +from utils.io import read_results, unzip_objs + + +class Evaluator(object): + + def __init__(self, data_root, seq_name, data_type): + self.data_root = data_root + self.seq_name = seq_name + self.data_type = data_type + + self.load_annotations() + self.reset_accumulator() + + def load_annotations(self): + assert self.data_type == 'mot' + + gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt') + self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True) + self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True) + + def reset_accumulator(self): + self.acc = mm.MOTAccumulator(auto_id=True) + + def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False): + # results + trk_tlwhs = np.copy(trk_tlwhs) + trk_ids = np.copy(trk_ids) + + # gts + gt_objs = self.gt_frame_dict.get(frame_id, []) + gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2] + + # ignore boxes + ignore_objs = self.gt_ignore_frame_dict.get(frame_id, []) + ignore_tlwhs = unzip_objs(ignore_objs)[0] + + + # remove ignored results + keep = np.ones(len(trk_tlwhs), dtype=bool) + iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5) + if len(iou_distance) > 0: + match_is, match_js = mm.lap.linear_sum_assignment(iou_distance) + match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js]) + match_ious = iou_distance[match_is, match_js] + + match_js = np.asarray(match_js, dtype=int) + match_js = match_js[np.logical_not(np.isnan(match_ious))] + keep[match_js] = False + trk_tlwhs = trk_tlwhs[keep] + trk_ids = trk_ids[keep] + + # get distance matrix + iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5) + + # acc + self.acc.update(gt_ids, trk_ids, iou_distance) + + if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'): + events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics + else: + events = None + return events + + def eval_file(self, filename): + self.reset_accumulator() + + result_frame_dict = read_results(filename, self.data_type, is_gt=False) + frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys()))) + for frame_id in frames: + trk_objs = result_frame_dict.get(frame_id, []) + trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2] + self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False) + + return self.acc + + @staticmethod + def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')): + names = copy.deepcopy(names) + if metrics is None: + metrics = mm.metrics.motchallenge_metrics + metrics = copy.deepcopy(metrics) + + mh = mm.metrics.create() + summary = mh.compute_many( + accs, + metrics=metrics, + names=names, + generate_overall=True + ) + + return summary + + @staticmethod + def save_summary(summary, filename): + import pandas as pd + writer = pd.ExcelWriter(filename) + summary.to_excel(writer) + writer.save() diff --git a/feeder/trackers/strongsort/utils/io.py b/feeder/trackers/strongsort/utils/io.py new file mode 100644 index 0000000..2dc9afd --- /dev/null +++ b/feeder/trackers/strongsort/utils/io.py @@ -0,0 +1,133 @@ +import os +from typing import Dict +import numpy as np + +# from utils.log import get_logger + + +def write_results(filename, results, data_type): + if data_type == 'mot': + save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n' + elif data_type == 'kitti': + save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n' + else: + raise ValueError(data_type) + + with open(filename, 'w') as f: + for frame_id, tlwhs, track_ids in results: + if data_type == 'kitti': + frame_id -= 1 + for tlwh, track_id in zip(tlwhs, track_ids): + if track_id < 0: + continue + x1, y1, w, h = tlwh + x2, y2 = x1 + w, y1 + h + line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h) + f.write(line) + + +# def write_results(filename, results_dict: Dict, data_type: str): +# if not filename: +# return +# path = os.path.dirname(filename) +# if not os.path.exists(path): +# os.makedirs(path) + +# if data_type in ('mot', 'mcmot', 'lab'): +# save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n' +# elif data_type == 'kitti': +# save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n' +# else: +# raise ValueError(data_type) + +# with open(filename, 'w') as f: +# for frame_id, frame_data in results_dict.items(): +# if data_type == 'kitti': +# frame_id -= 1 +# for tlwh, track_id in frame_data: +# if track_id < 0: +# continue +# x1, y1, w, h = tlwh +# x2, y2 = x1 + w, y1 + h +# line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0) +# f.write(line) +# logger.info('Save results to {}'.format(filename)) + + +def read_results(filename, data_type: str, is_gt=False, is_ignore=False): + if data_type in ('mot', 'lab'): + read_fun = read_mot_results + else: + raise ValueError('Unknown data type: {}'.format(data_type)) + + return read_fun(filename, is_gt, is_ignore) + + +""" +labels={'ped', ... % 1 +'person_on_vhcl', ... % 2 +'car', ... % 3 +'bicycle', ... % 4 +'mbike', ... % 5 +'non_mot_vhcl', ... % 6 +'static_person', ... % 7 +'distractor', ... % 8 +'occluder', ... % 9 +'occluder_on_grnd', ... %10 +'occluder_full', ... % 11 +'reflection', ... % 12 +'crowd' ... % 13 +}; +""" + + +def read_mot_results(filename, is_gt, is_ignore): + valid_labels = {1} + ignore_labels = {2, 7, 8, 12} + results_dict = dict() + if os.path.isfile(filename): + with open(filename, 'r') as f: + for line in f.readlines(): + linelist = line.split(',') + if len(linelist) < 7: + continue + fid = int(linelist[0]) + if fid < 1: + continue + results_dict.setdefault(fid, list()) + + if is_gt: + if 'MOT16-' in filename or 'MOT17-' in filename: + label = int(float(linelist[7])) + mark = int(float(linelist[6])) + if mark == 0 or label not in valid_labels: + continue + score = 1 + elif is_ignore: + if 'MOT16-' in filename or 'MOT17-' in filename: + label = int(float(linelist[7])) + vis_ratio = float(linelist[8]) + if label not in ignore_labels and vis_ratio >= 0: + continue + else: + continue + score = 1 + else: + score = float(linelist[6]) + + tlwh = tuple(map(float, linelist[2:6])) + target_id = int(linelist[1]) + + results_dict[fid].append((tlwh, target_id, score)) + + return results_dict + + +def unzip_objs(objs): + if len(objs) > 0: + tlwhs, ids, scores = zip(*objs) + else: + tlwhs, ids, scores = [], [], [] + tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4) + + return tlwhs, ids, scores \ No newline at end of file diff --git a/feeder/trackers/strongsort/utils/json_logger.py b/feeder/trackers/strongsort/utils/json_logger.py new file mode 100644 index 0000000..0afd0b4 --- /dev/null +++ b/feeder/trackers/strongsort/utils/json_logger.py @@ -0,0 +1,383 @@ +""" +References: + https://medium.com/analytics-vidhya/creating-a-custom-logging-mechanism-for-real-time-object-detection-using-tdd-4ca2cfcd0a2f +""" +import json +from os import makedirs +from os.path import exists, join +from datetime import datetime + + +class JsonMeta(object): + HOURS = 3 + MINUTES = 59 + SECONDS = 59 + PATH_TO_SAVE = 'LOGS' + DEFAULT_FILE_NAME = 'remaining' + + +class BaseJsonLogger(object): + """ + This is the base class that returns __dict__ of its own + it also returns the dicts of objects in the attributes that are list instances + + """ + + def dic(self): + # returns dicts of objects + out = {} + for k, v in self.__dict__.items(): + if hasattr(v, 'dic'): + out[k] = v.dic() + elif isinstance(v, list): + out[k] = self.list(v) + else: + out[k] = v + return out + + @staticmethod + def list(values): + # applies the dic method on items in the list + return [v.dic() if hasattr(v, 'dic') else v for v in values] + + +class Label(BaseJsonLogger): + """ + For each bounding box there are various categories with confidences. Label class keeps track of that information. + """ + + def __init__(self, category: str, confidence: float): + self.category = category + self.confidence = confidence + + +class Bbox(BaseJsonLogger): + """ + This module stores the information for each frame and use them in JsonParser + Attributes: + labels (list): List of label module. + top (int): + left (int): + width (int): + height (int): + + Args: + bbox_id (float): + top (int): + left (int): + width (int): + height (int): + + References: + Check Label module for better understanding. + + + """ + + def __init__(self, bbox_id, top, left, width, height): + self.labels = [] + self.bbox_id = bbox_id + self.top = top + self.left = left + self.width = width + self.height = height + + def add_label(self, category, confidence): + # adds category and confidence only if top_k is not exceeded. + self.labels.append(Label(category, confidence)) + + def labels_full(self, value): + return len(self.labels) == value + + +class Frame(BaseJsonLogger): + """ + This module stores the information for each frame and use them in JsonParser + Attributes: + timestamp (float): The elapsed time of captured frame + frame_id (int): The frame number of the captured video + bboxes (list of Bbox objects): Stores the list of bbox objects. + + References: + Check Bbox class for better information + + Args: + timestamp (float): + frame_id (int): + + """ + + def __init__(self, frame_id: int, timestamp: float = None): + self.frame_id = frame_id + self.timestamp = timestamp + self.bboxes = [] + + def add_bbox(self, bbox_id: int, top: int, left: int, width: int, height: int): + bboxes_ids = [bbox.bbox_id for bbox in self.bboxes] + if bbox_id not in bboxes_ids: + self.bboxes.append(Bbox(bbox_id, top, left, width, height)) + else: + raise ValueError("Frame with id: {} already has a Bbox with id: {}".format(self.frame_id, bbox_id)) + + def add_label_to_bbox(self, bbox_id: int, category: str, confidence: float): + bboxes = {bbox.id: bbox for bbox in self.bboxes} + if bbox_id in bboxes.keys(): + res = bboxes.get(bbox_id) + res.add_label(category, confidence) + else: + raise ValueError('the bbox with id: {} does not exists!'.format(bbox_id)) + + +class BboxToJsonLogger(BaseJsonLogger): + """ + ُ This module is designed to automate the task of logging jsons. An example json is used + to show the contents of json file shortly + Example: + { + "video_details": { + "frame_width": 1920, + "frame_height": 1080, + "frame_rate": 20, + "video_name": "/home/gpu/codes/MSD/pedestrian_2/project/public/camera1.avi" + }, + "frames": [ + { + "frame_id": 329, + "timestamp": 3365.1254 + "bboxes": [ + { + "labels": [ + { + "category": "pedestrian", + "confidence": 0.9 + } + ], + "bbox_id": 0, + "top": 1257, + "left": 138, + "width": 68, + "height": 109 + } + ] + }], + + Attributes: + frames (dict): It's a dictionary that maps each frame_id to json attributes. + video_details (dict): information about video file. + top_k_labels (int): shows the allowed number of labels + start_time (datetime object): we use it to automate the json output by time. + + Args: + top_k_labels (int): shows the allowed number of labels + + """ + + def __init__(self, top_k_labels: int = 1): + self.frames = {} + self.video_details = self.video_details = dict(frame_width=None, frame_height=None, frame_rate=None, + video_name=None) + self.top_k_labels = top_k_labels + self.start_time = datetime.now() + + def set_top_k(self, value): + self.top_k_labels = value + + def frame_exists(self, frame_id: int) -> bool: + """ + Args: + frame_id (int): + + Returns: + bool: true if frame_id is recognized + """ + return frame_id in self.frames.keys() + + def add_frame(self, frame_id: int, timestamp: float = None) -> None: + """ + Args: + frame_id (int): + timestamp (float): opencv captured frame time property + + Raises: + ValueError: if frame_id would not exist in class frames attribute + + Returns: + None + + """ + if not self.frame_exists(frame_id): + self.frames[frame_id] = Frame(frame_id, timestamp) + else: + raise ValueError("Frame id: {} already exists".format(frame_id)) + + def bbox_exists(self, frame_id: int, bbox_id: int) -> bool: + """ + Args: + frame_id: + bbox_id: + + Returns: + bool: if bbox exists in frame bboxes list + """ + bboxes = [] + if self.frame_exists(frame_id=frame_id): + bboxes = [bbox.bbox_id for bbox in self.frames[frame_id].bboxes] + return bbox_id in bboxes + + def find_bbox(self, frame_id: int, bbox_id: int): + """ + + Args: + frame_id: + bbox_id: + + Returns: + bbox_id (int): + + Raises: + ValueError: if bbox_id does not exist in the bbox list of specific frame. + """ + if not self.bbox_exists(frame_id, bbox_id): + raise ValueError("frame with id: {} does not contain bbox with id: {}".format(frame_id, bbox_id)) + bboxes = {bbox.bbox_id: bbox for bbox in self.frames[frame_id].bboxes} + return bboxes.get(bbox_id) + + def add_bbox_to_frame(self, frame_id: int, bbox_id: int, top: int, left: int, width: int, height: int) -> None: + """ + + Args: + frame_id (int): + bbox_id (int): + top (int): + left (int): + width (int): + height (int): + + Returns: + None + + Raises: + ValueError: if bbox_id already exist in frame information with frame_id + ValueError: if frame_id does not exist in frames attribute + """ + if self.frame_exists(frame_id): + frame = self.frames[frame_id] + if not self.bbox_exists(frame_id, bbox_id): + frame.add_bbox(bbox_id, top, left, width, height) + else: + raise ValueError( + "frame with frame_id: {} already contains the bbox with id: {} ".format(frame_id, bbox_id)) + else: + raise ValueError("frame with frame_id: {} does not exist".format(frame_id)) + + def add_label_to_bbox(self, frame_id: int, bbox_id: int, category: str, confidence: float): + """ + Args: + frame_id: + bbox_id: + category: + confidence: the confidence value returned from yolo detection + + Returns: + None + + Raises: + ValueError: if labels quota (top_k_labels) exceeds. + """ + bbox = self.find_bbox(frame_id, bbox_id) + if not bbox.labels_full(self.top_k_labels): + bbox.add_label(category, confidence) + else: + raise ValueError("labels in frame_id: {}, bbox_id: {} is fulled".format(frame_id, bbox_id)) + + def add_video_details(self, frame_width: int = None, frame_height: int = None, frame_rate: int = None, + video_name: str = None): + self.video_details['frame_width'] = frame_width + self.video_details['frame_height'] = frame_height + self.video_details['frame_rate'] = frame_rate + self.video_details['video_name'] = video_name + + def output(self): + output = {'video_details': self.video_details} + result = list(self.frames.values()) + output['frames'] = [item.dic() for item in result] + return output + + def json_output(self, output_name): + """ + Args: + output_name: + + Returns: + None + + Notes: + It creates the json output with `output_name` name. + """ + if not output_name.endswith('.json'): + output_name += '.json' + with open(output_name, 'w') as file: + json.dump(self.output(), file) + file.close() + + def set_start(self): + self.start_time = datetime.now() + + def schedule_output_by_time(self, output_dir=JsonMeta.PATH_TO_SAVE, hours: int = 0, minutes: int = 0, + seconds: int = 60) -> None: + """ + Notes: + Creates folder and then periodically stores the jsons on that address. + + Args: + output_dir (str): the directory where output files will be stored + hours (int): + minutes (int): + seconds (int): + + Returns: + None + + """ + end = datetime.now() + interval = 0 + interval += abs(min([hours, JsonMeta.HOURS]) * 3600) + interval += abs(min([minutes, JsonMeta.MINUTES]) * 60) + interval += abs(min([seconds, JsonMeta.SECONDS])) + diff = (end - self.start_time).seconds + + if diff > interval: + output_name = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '.json' + if not exists(output_dir): + makedirs(output_dir) + output = join(output_dir, output_name) + self.json_output(output_name=output) + self.frames = {} + self.start_time = datetime.now() + + def schedule_output_by_frames(self, frames_quota, frame_counter, output_dir=JsonMeta.PATH_TO_SAVE): + """ + saves as the number of frames quota increases higher. + :param frames_quota: + :param frame_counter: + :param output_dir: + :return: + """ + pass + + def flush(self, output_dir): + """ + Notes: + We use this function to output jsons whenever possible. + like the time that we exit the while loop of opencv. + + Args: + output_dir: + + Returns: + None + + """ + filename = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '-remaining.json' + output = join(output_dir, filename) + self.json_output(output_name=output) diff --git a/feeder/trackers/strongsort/utils/log.py b/feeder/trackers/strongsort/utils/log.py new file mode 100644 index 0000000..0d48757 --- /dev/null +++ b/feeder/trackers/strongsort/utils/log.py @@ -0,0 +1,17 @@ +import logging + + +def get_logger(name='root'): + formatter = logging.Formatter( + # fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s') + fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') + + handler = logging.StreamHandler() + handler.setFormatter(formatter) + + logger = logging.getLogger(name) + logger.setLevel(logging.INFO) + logger.addHandler(handler) + return logger + + diff --git a/feeder/trackers/strongsort/utils/parser.py b/feeder/trackers/strongsort/utils/parser.py new file mode 100644 index 0000000..c29ed84 --- /dev/null +++ b/feeder/trackers/strongsort/utils/parser.py @@ -0,0 +1,41 @@ +import os +import yaml +from easydict import EasyDict as edict + + +class YamlParser(edict): + """ + This is yaml parser based on EasyDict. + """ + + def __init__(self, cfg_dict=None, config_file=None): + if cfg_dict is None: + cfg_dict = {} + + if config_file is not None: + assert(os.path.isfile(config_file)) + with open(config_file, 'r') as fo: + yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader) + cfg_dict.update(yaml_) + + super(YamlParser, self).__init__(cfg_dict) + + def merge_from_file(self, config_file): + with open(config_file, 'r') as fo: + yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader) + self.update(yaml_) + + def merge_from_dict(self, config_dict): + self.update(config_dict) + + +def get_config(config_file=None): + return YamlParser(config_file=config_file) + + +if __name__ == "__main__": + cfg = YamlParser(config_file="../configs/yolov3.yaml") + cfg.merge_from_file("../configs/strong_sort.yaml") + + import ipdb + ipdb.set_trace() diff --git a/feeder/trackers/strongsort/utils/tools.py b/feeder/trackers/strongsort/utils/tools.py new file mode 100644 index 0000000..965fb69 --- /dev/null +++ b/feeder/trackers/strongsort/utils/tools.py @@ -0,0 +1,39 @@ +from functools import wraps +from time import time + + +def is_video(ext: str): + """ + Returns true if ext exists in + allowed_exts for video files. + + Args: + ext: + + Returns: + + """ + + allowed_exts = ('.mp4', '.webm', '.ogg', '.avi', '.wmv', '.mkv', '.3gp') + return any((ext.endswith(x) for x in allowed_exts)) + + +def tik_tok(func): + """ + keep track of time for each process. + Args: + func: + + Returns: + + """ + @wraps(func) + def _time_it(*args, **kwargs): + start = time() + try: + return func(*args, **kwargs) + finally: + end_ = time() + print("time: {:.03f}s, fps: {:.03f}".format(end_ - start, 1 / (end_ - start))) + + return _time_it diff --git a/feeder/video/sample.mp4 b/feeder/video/sample.mp4 new file mode 100644 index 0000000..d89256d Binary files /dev/null and b/feeder/video/sample.mp4 differ diff --git a/feeder/video/sample.mp4-strongsort.log b/feeder/video/sample.mp4-strongsort.log new file mode 100644 index 0000000..2126c5e --- /dev/null +++ b/feeder/video/sample.mp4-strongsort.log @@ -0,0 +1,612 @@ +{"bbox": [1208, 574, 1312, 640], "id": 1, "cls": 2, "conf": 0.7392573952674866, "frame_idx": 2, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1206, 573, 1311, 639], "id": 1, "cls": 2, "conf": 0.7638279795646667, "frame_idx": 3, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1205, 573, 1310, 640], "id": 1, "cls": 2, "conf": 0.745888352394104, "frame_idx": 4, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1205, 572, 1310, 640], "id": 1, "cls": 2, "conf": 0.7273551821708679, "frame_idx": 5, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1204, 572, 1310, 641], "id": 1, "cls": 2, "conf": 0.7593294382095337, "frame_idx": 6, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1203, 571, 1309, 641], "id": 1, "cls": 2, "conf": 0.7566904425621033, "frame_idx": 7, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1202, 570, 1309, 642], "id": 1, "cls": 2, "conf": 0.7727674245834351, "frame_idx": 8, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1202, 570, 1308, 642], "id": 1, "cls": 2, "conf": 0.7940199375152588, "frame_idx": 9, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1200, 570, 1308, 642], "id": 1, "cls": 2, "conf": 0.7740529179573059, "frame_idx": 10, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1200, 570, 1308, 642], "id": 1, "cls": 2, "conf": 0.7652700543403625, "frame_idx": 11, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1201, 571, 1307, 642], "id": 1, "cls": 2, "conf": 0.8012721538543701, "frame_idx": 12, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1200, 570, 1309, 642], "id": 1, "cls": 2, "conf": 0.7976530194282532, "frame_idx": 13, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1199, 569, 1311, 643], "id": 1, "cls": 2, "conf": 0.812846302986145, "frame_idx": 14, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1198, 570, 1310, 643], "id": 1, "cls": 2, "conf": 0.8232163190841675, "frame_idx": 15, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1194, 569, 1309, 644], "id": 1, "cls": 2, "conf": 0.8198840022087097, "frame_idx": 16, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1195, 569, 1306, 643], "id": 1, "cls": 2, "conf": 0.7693840861320496, "frame_idx": 17, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1193, 569, 1305, 645], "id": 1, "cls": 2, "conf": 0.7881284356117249, "frame_idx": 18, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1192, 570, 1305, 645], "id": 1, "cls": 2, "conf": 0.8157638311386108, "frame_idx": 19, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1192, 570, 1305, 644], "id": 1, "cls": 2, "conf": 0.8246914744377136, "frame_idx": 20, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1190, 569, 1305, 645], "id": 1, "cls": 2, "conf": 0.828994631767273, "frame_idx": 21, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1190, 569, 1304, 644], "id": 1, "cls": 2, "conf": 0.8013927936553955, "frame_idx": 22, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1190, 568, 1303, 644], "id": 1, "cls": 2, "conf": 0.8276790380477905, "frame_idx": 23, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1188, 568, 1304, 645], "id": 1, "cls": 2, "conf": 0.8594380021095276, "frame_idx": 24, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1186, 568, 1304, 645], "id": 1, "cls": 2, "conf": 0.8706213235855103, "frame_idx": 25, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1187, 568, 1303, 644], "id": 1, "cls": 2, "conf": 0.8731331825256348, "frame_idx": 26, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1182, 568, 1303, 645], "id": 1, "cls": 2, "conf": 0.87749844789505, "frame_idx": 27, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1182, 569, 1302, 645], "id": 1, "cls": 2, "conf": 0.8746338486671448, "frame_idx": 28, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1181, 568, 1303, 646], "id": 1, "cls": 2, "conf": 0.8688514828681946, "frame_idx": 29, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1180, 569, 1301, 646], "id": 1, "cls": 2, "conf": 0.8689095973968506, "frame_idx": 30, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1179, 568, 1302, 647], "id": 1, "cls": 2, "conf": 0.8720865249633789, "frame_idx": 31, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1178, 568, 1301, 647], "id": 1, "cls": 2, "conf": 0.8609508275985718, "frame_idx": 32, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1177, 568, 1300, 647], "id": 1, "cls": 2, "conf": 0.8541733026504517, "frame_idx": 33, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1178, 569, 1299, 648], "id": 1, "cls": 2, "conf": 0.8305150270462036, "frame_idx": 34, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1177, 569, 1297, 647], "id": 1, "cls": 2, "conf": 0.8163544535636902, "frame_idx": 35, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1175, 568, 1298, 648], "id": 1, "cls": 2, "conf": 0.8103095293045044, "frame_idx": 36, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1174, 568, 1297, 648], "id": 1, "cls": 2, "conf": 0.8175411820411682, "frame_idx": 37, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1171, 569, 1297, 648], "id": 1, "cls": 2, "conf": 0.8210935592651367, "frame_idx": 38, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1171, 568, 1295, 648], "id": 1, "cls": 2, "conf": 0.8320956826210022, "frame_idx": 39, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1167, 568, 1294, 649], "id": 1, "cls": 2, "conf": 0.7790266275405884, "frame_idx": 40, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1166, 568, 1293, 648], "id": 1, "cls": 2, "conf": 0.7791686058044434, "frame_idx": 41, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1166, 568, 1292, 648], "id": 1, "cls": 2, "conf": 0.7617875933647156, "frame_idx": 42, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1164, 567, 1293, 649], "id": 1, "cls": 2, "conf": 0.7618439793586731, "frame_idx": 43, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1162, 567, 1293, 649], "id": 1, "cls": 2, "conf": 0.7654961347579956, "frame_idx": 44, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1161, 567, 1292, 649], "id": 1, "cls": 2, "conf": 0.7552655935287476, "frame_idx": 45, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1160, 568, 1290, 649], "id": 1, "cls": 2, "conf": 0.7659391164779663, "frame_idx": 46, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1158, 570, 1289, 650], "id": 1, "cls": 2, "conf": 0.7770782709121704, "frame_idx": 47, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1156, 569, 1290, 651], "id": 1, "cls": 2, "conf": 0.776265025138855, "frame_idx": 48, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1156, 568, 1289, 649], "id": 1, "cls": 2, "conf": 0.7784299850463867, "frame_idx": 49, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1153, 567, 1289, 650], "id": 1, "cls": 2, "conf": 0.7925119400024414, "frame_idx": 50, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1154, 568, 1290, 651], "id": 1, "cls": 2, "conf": 0.7904253005981445, "frame_idx": 51, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1152, 569, 1291, 651], "id": 1, "cls": 2, "conf": 0.7655163407325745, "frame_idx": 52, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1151, 569, 1291, 651], "id": 1, "cls": 2, "conf": 0.7518490552902222, "frame_idx": 53, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1149, 569, 1289, 652], "id": 1, "cls": 2, "conf": 0.7494193911552429, "frame_idx": 54, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1147, 570, 1289, 654], "id": 1, "cls": 2, "conf": 0.7891559600830078, "frame_idx": 55, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1145, 570, 1289, 655], "id": 1, "cls": 2, "conf": 0.7939369082450867, "frame_idx": 56, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1142, 569, 1289, 656], "id": 1, "cls": 2, "conf": 0.8129497170448303, "frame_idx": 57, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1141, 570, 1287, 656], "id": 1, "cls": 2, "conf": 0.8340080380439758, "frame_idx": 58, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1140, 569, 1288, 657], "id": 1, "cls": 2, "conf": 0.8393167853355408, "frame_idx": 59, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1141, 570, 1287, 657], "id": 1, "cls": 2, "conf": 0.8389145135879517, "frame_idx": 60, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1139, 569, 1285, 658], "id": 1, "cls": 2, "conf": 0.8342702388763428, "frame_idx": 61, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1138, 570, 1284, 658], "id": 1, "cls": 2, "conf": 0.8394166827201843, "frame_idx": 62, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1135, 569, 1284, 658], "id": 1, "cls": 2, "conf": 0.8471781611442566, "frame_idx": 63, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1131, 568, 1281, 659], "id": 1, "cls": 2, "conf": 0.8232806921005249, "frame_idx": 64, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1129, 568, 1279, 660], "id": 1, "cls": 2, "conf": 0.865515410900116, "frame_idx": 65, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1128, 569, 1282, 661], "id": 1, "cls": 2, "conf": 0.8378810882568359, "frame_idx": 66, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1126, 569, 1282, 661], "id": 1, "cls": 2, "conf": 0.8417340517044067, "frame_idx": 67, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1126, 569, 1281, 661], "id": 1, "cls": 2, "conf": 0.8533654808998108, "frame_idx": 68, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1125, 569, 1281, 660], "id": 1, "cls": 2, "conf": 0.8475178480148315, "frame_idx": 69, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1123, 569, 1280, 661], "id": 1, "cls": 2, "conf": 0.8625006675720215, "frame_idx": 70, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1120, 568, 1278, 662], "id": 1, "cls": 2, "conf": 0.8567495346069336, "frame_idx": 71, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1120, 569, 1276, 663], "id": 1, "cls": 2, "conf": 0.8443597555160522, "frame_idx": 72, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1118, 568, 1276, 663], "id": 1, "cls": 2, "conf": 0.8420413732528687, "frame_idx": 73, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1115, 567, 1276, 663], "id": 1, "cls": 2, "conf": 0.8549453020095825, "frame_idx": 74, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1115, 567, 1275, 664], "id": 1, "cls": 2, "conf": 0.8429552316665649, "frame_idx": 75, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1112, 567, 1273, 665], "id": 1, "cls": 2, "conf": 0.8485922813415527, "frame_idx": 76, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1111, 567, 1273, 666], "id": 1, "cls": 2, "conf": 0.8699796199798584, "frame_idx": 77, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1109, 565, 1273, 666], "id": 1, "cls": 2, "conf": 0.8823856115341187, "frame_idx": 78, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1107, 564, 1274, 667], "id": 1, "cls": 2, "conf": 0.8547831177711487, "frame_idx": 79, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1106, 565, 1271, 667], "id": 1, "cls": 2, "conf": 0.8556330800056458, "frame_idx": 80, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1105, 564, 1271, 667], "id": 1, "cls": 2, "conf": 0.8522816896438599, "frame_idx": 81, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1103, 562, 1271, 668], "id": 1, "cls": 2, "conf": 0.8402776718139648, "frame_idx": 82, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1098, 561, 1272, 669], "id": 1, "cls": 2, "conf": 0.849938154220581, "frame_idx": 83, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1095, 561, 1272, 669], "id": 1, "cls": 2, "conf": 0.8956634998321533, "frame_idx": 84, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1092, 561, 1272, 670], "id": 1, "cls": 2, "conf": 0.9015648365020752, "frame_idx": 85, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1093, 562, 1271, 670], "id": 1, "cls": 2, "conf": 0.8583961725234985, "frame_idx": 86, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1091, 562, 1271, 672], "id": 1, "cls": 2, "conf": 0.8442841172218323, "frame_idx": 87, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1089, 562, 1270, 672], "id": 1, "cls": 2, "conf": 0.8542094230651855, "frame_idx": 88, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1085, 560, 1267, 672], "id": 1, "cls": 2, "conf": 0.8753722310066223, "frame_idx": 89, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1081, 559, 1266, 673], "id": 1, "cls": 2, "conf": 0.8686020970344543, "frame_idx": 90, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1079, 558, 1266, 673], "id": 1, "cls": 2, "conf": 0.8676679134368896, "frame_idx": 91, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1075, 558, 1265, 674], "id": 1, "cls": 2, "conf": 0.8485567569732666, "frame_idx": 92, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1074, 558, 1264, 674], "id": 1, "cls": 2, "conf": 0.8431268334388733, "frame_idx": 93, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1074, 557, 1264, 674], "id": 1, "cls": 2, "conf": 0.8517748713493347, "frame_idx": 94, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1070, 559, 1262, 675], "id": 1, "cls": 2, "conf": 0.8630310297012329, "frame_idx": 95, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1068, 559, 1260, 676], "id": 1, "cls": 2, "conf": 0.8517524003982544, "frame_idx": 96, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1065, 557, 1260, 676], "id": 1, "cls": 2, "conf": 0.8309876918792725, "frame_idx": 97, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1062, 558, 1257, 676], "id": 1, "cls": 2, "conf": 0.820047914981842, "frame_idx": 98, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1058, 558, 1258, 680], "id": 1, "cls": 2, "conf": 0.8312326073646545, "frame_idx": 99, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1057, 557, 1255, 681], "id": 1, "cls": 2, "conf": 0.84773850440979, "frame_idx": 100, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1055, 558, 1253, 682], "id": 1, "cls": 2, "conf": 0.8278942108154297, "frame_idx": 101, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1052, 557, 1254, 682], "id": 1, "cls": 2, "conf": 0.8419964909553528, "frame_idx": 102, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1048, 554, 1253, 682], "id": 1, "cls": 2, "conf": 0.8698597550392151, "frame_idx": 103, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1045, 553, 1251, 683], "id": 1, "cls": 2, "conf": 0.8451534509658813, "frame_idx": 104, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1041, 553, 1250, 685], "id": 1, "cls": 2, "conf": 0.8478474617004395, "frame_idx": 105, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1037, 552, 1250, 685], "id": 1, "cls": 2, "conf": 0.8371977210044861, "frame_idx": 106, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1034, 552, 1249, 686], "id": 1, "cls": 2, "conf": 0.8587230443954468, "frame_idx": 107, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1032, 552, 1246, 687], "id": 1, "cls": 2, "conf": 0.8486429452896118, "frame_idx": 108, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1026, 552, 1246, 688], "id": 1, "cls": 2, "conf": 0.8577057123184204, "frame_idx": 109, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1024, 551, 1244, 687], "id": 1, "cls": 2, "conf": 0.847007155418396, "frame_idx": 110, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1020, 551, 1244, 689], "id": 1, "cls": 2, "conf": 0.8531818985939026, "frame_idx": 111, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1014, 550, 1245, 691], "id": 1, "cls": 2, "conf": 0.8777499794960022, "frame_idx": 112, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1011, 550, 1242, 692], "id": 1, "cls": 2, "conf": 0.8970717787742615, "frame_idx": 113, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1009, 550, 1241, 694], "id": 1, "cls": 2, "conf": 0.8887585401535034, "frame_idx": 114, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1007, 549, 1239, 695], "id": 1, "cls": 2, "conf": 0.8952226638793945, "frame_idx": 115, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1002, 549, 1240, 698], "id": 1, "cls": 2, "conf": 0.9019944667816162, "frame_idx": 116, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1000, 550, 1237, 699], "id": 1, "cls": 2, "conf": 0.8975278735160828, "frame_idx": 117, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [993, 549, 1237, 700], "id": 1, "cls": 2, "conf": 0.9004268646240234, "frame_idx": 118, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [988, 550, 1233, 701], "id": 1, "cls": 2, "conf": 0.8971960544586182, "frame_idx": 119, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [986, 549, 1231, 702], "id": 1, "cls": 2, "conf": 0.8989416360855103, "frame_idx": 120, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [980, 548, 1229, 704], "id": 1, "cls": 2, "conf": 0.889881432056427, "frame_idx": 121, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [975, 548, 1228, 708], "id": 1, "cls": 2, "conf": 0.8943332433700562, "frame_idx": 122, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [971, 548, 1228, 710], "id": 1, "cls": 2, "conf": 0.898472785949707, "frame_idx": 123, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [967, 547, 1226, 712], "id": 1, "cls": 2, "conf": 0.8931097388267517, "frame_idx": 124, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [963, 546, 1225, 713], "id": 1, "cls": 2, "conf": 0.8915606141090393, "frame_idx": 125, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [959, 546, 1223, 715], "id": 1, "cls": 2, "conf": 0.8841129541397095, "frame_idx": 126, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [955, 546, 1223, 717], "id": 1, "cls": 2, "conf": 0.850002646446228, "frame_idx": 127, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [950, 545, 1221, 718], "id": 1, "cls": 2, "conf": 0.8723787069320679, "frame_idx": 128, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [942, 544, 1220, 719], "id": 1, "cls": 2, "conf": 0.8795301914215088, "frame_idx": 129, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [940, 544, 1217, 720], "id": 1, "cls": 2, "conf": 0.8854840993881226, "frame_idx": 130, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [932, 543, 1217, 722], "id": 1, "cls": 2, "conf": 0.8812260031700134, "frame_idx": 131, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [927, 544, 1217, 725], "id": 1, "cls": 2, "conf": 0.8683909773826599, "frame_idx": 132, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [918, 543, 1216, 727], "id": 1, "cls": 2, "conf": 0.853493869304657, "frame_idx": 133, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [914, 543, 1214, 728], "id": 1, "cls": 2, "conf": 0.8531240224838257, "frame_idx": 134, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [908, 543, 1213, 730], "id": 1, "cls": 2, "conf": 0.8651628494262695, "frame_idx": 135, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [902, 542, 1209, 732], "id": 1, "cls": 2, "conf": 0.8718039989471436, "frame_idx": 136, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [894, 541, 1208, 735], "id": 1, "cls": 2, "conf": 0.848781943321228, "frame_idx": 137, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [888, 541, 1206, 736], "id": 1, "cls": 2, "conf": 0.8739963173866272, "frame_idx": 138, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [881, 541, 1204, 737], "id": 1, "cls": 2, "conf": 0.8722886443138123, "frame_idx": 139, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [872, 539, 1203, 738], "id": 1, "cls": 2, "conf": 0.8997212052345276, "frame_idx": 140, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [866, 539, 1200, 739], "id": 1, "cls": 2, "conf": 0.8821484446525574, "frame_idx": 141, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [860, 538, 1198, 744], "id": 1, "cls": 2, "conf": 0.8928354978561401, "frame_idx": 142, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [852, 536, 1197, 746], "id": 1, "cls": 2, "conf": 0.8943573832511902, "frame_idx": 143, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [843, 537, 1195, 748], "id": 1, "cls": 2, "conf": 0.8848525285720825, "frame_idx": 144, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [835, 536, 1194, 749], "id": 1, "cls": 2, "conf": 0.8749076724052429, "frame_idx": 145, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [826, 536, 1190, 751], "id": 1, "cls": 2, "conf": 0.8655844330787659, "frame_idx": 146, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [818, 538, 1186, 757], "id": 1, "cls": 2, "conf": 0.8978791236877441, "frame_idx": 147, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [810, 536, 1184, 759], "id": 1, "cls": 2, "conf": 0.9050822257995605, "frame_idx": 148, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [801, 533, 1181, 758], "id": 1, "cls": 2, "conf": 0.9211980104446411, "frame_idx": 149, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [791, 532, 1180, 762], "id": 1, "cls": 2, "conf": 0.9195648431777954, "frame_idx": 150, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [781, 530, 1177, 770], "id": 1, "cls": 2, "conf": 0.9223189353942871, "frame_idx": 151, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [769, 530, 1177, 772], "id": 1, "cls": 2, "conf": 0.9049766063690186, "frame_idx": 152, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [760, 528, 1175, 772], "id": 1, "cls": 2, "conf": 0.9004610776901245, "frame_idx": 153, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [749, 528, 1174, 776], "id": 1, "cls": 2, "conf": 0.9073677062988281, "frame_idx": 154, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [738, 526, 1171, 783], "id": 1, "cls": 2, "conf": 0.9120516777038574, "frame_idx": 155, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1254, 566, 1426, 643], "id": 2, "cls": 2, "conf": 0.702964186668396, "frame_idx": 155, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [725, 526, 1170, 785], "id": 1, "cls": 2, "conf": 0.9064223766326904, "frame_idx": 156, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1253, 568, 1422, 643], "id": 2, "cls": 2, "conf": 0.7038942575454712, "frame_idx": 156, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [712, 527, 1165, 789], "id": 1, "cls": 2, "conf": 0.9063256978988647, "frame_idx": 157, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1252, 568, 1421, 643], "id": 2, "cls": 2, "conf": 0.7038942575454712, "frame_idx": 157, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [699, 524, 1160, 793], "id": 1, "cls": 2, "conf": 0.8908406496047974, "frame_idx": 158, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [685, 524, 1159, 795], "id": 1, "cls": 2, "conf": 0.8844937682151794, "frame_idx": 159, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [673, 525, 1156, 799], "id": 1, "cls": 2, "conf": 0.8897193670272827, "frame_idx": 160, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [659, 524, 1152, 802], "id": 1, "cls": 2, "conf": 0.905559241771698, "frame_idx": 161, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [644, 522, 1149, 809], "id": 1, "cls": 2, "conf": 0.89296555519104, "frame_idx": 162, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [628, 522, 1146, 820], "id": 1, "cls": 2, "conf": 0.8848194479942322, "frame_idx": 163, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1242, 567, 1420, 642], "id": 2, "cls": 2, "conf": 0.717244029045105, "frame_idx": 163, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [611, 519, 1145, 821], "id": 1, "cls": 2, "conf": 0.9121138453483582, "frame_idx": 164, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1242, 568, 1418, 643], "id": 2, "cls": 2, "conf": 0.733672559261322, "frame_idx": 164, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [594, 520, 1141, 827], "id": 1, "cls": 2, "conf": 0.890241801738739, "frame_idx": 165, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1245, 569, 1416, 642], "id": 2, "cls": 2, "conf": 0.7150111794471741, "frame_idx": 165, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [574, 519, 1136, 832], "id": 1, "cls": 2, "conf": 0.9198168516159058, "frame_idx": 166, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1244, 569, 1415, 642], "id": 2, "cls": 2, "conf": 0.7150111794471741, "frame_idx": 166, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [555, 518, 1133, 839], "id": 1, "cls": 2, "conf": 0.9146777987480164, "frame_idx": 167, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [537, 515, 1129, 845], "id": 1, "cls": 2, "conf": 0.9021809101104736, "frame_idx": 168, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [516, 513, 1127, 854], "id": 1, "cls": 2, "conf": 0.9111503958702087, "frame_idx": 169, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [495, 510, 1126, 863], "id": 1, "cls": 2, "conf": 0.9124228954315186, "frame_idx": 170, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [471, 512, 1121, 872], "id": 1, "cls": 2, "conf": 0.9291900396347046, "frame_idx": 171, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [447, 509, 1116, 875], "id": 1, "cls": 2, "conf": 0.8657183051109314, "frame_idx": 172, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [423, 506, 1111, 881], "id": 1, "cls": 2, "conf": 0.8687337636947632, "frame_idx": 173, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [393, 505, 1105, 893], "id": 1, "cls": 2, "conf": 0.9182578921318054, "frame_idx": 174, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [368, 503, 1101, 899], "id": 1, "cls": 2, "conf": 0.9256529808044434, "frame_idx": 175, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [340, 502, 1096, 912], "id": 1, "cls": 2, "conf": 0.9282132983207703, "frame_idx": 176, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [303, 500, 1091, 924], "id": 1, "cls": 2, "conf": 0.9329380989074707, "frame_idx": 177, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [274, 499, 1087, 937], "id": 1, "cls": 2, "conf": 0.9455896019935608, "frame_idx": 178, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [233, 498, 1083, 946], "id": 1, "cls": 2, "conf": 0.9385244846343994, "frame_idx": 179, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [194, 496, 1077, 960], "id": 1, "cls": 2, "conf": 0.9393031001091003, "frame_idx": 180, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [153, 495, 1076, 972], "id": 1, "cls": 2, "conf": 0.9307792782783508, "frame_idx": 181, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [110, 492, 1067, 988], "id": 1, "cls": 2, "conf": 0.9395390748977661, "frame_idx": 182, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [57, 493, 1060, 1008], "id": 1, "cls": 2, "conf": 0.9405025243759155, "frame_idx": 183, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [4, 492, 1053, 1029], "id": 1, "cls": 2, "conf": 0.9425285458564758, "frame_idx": 184, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 490, 1047, 1043], "id": 1, "cls": 2, "conf": 0.9343565106391907, "frame_idx": 185, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 488, 1043, 1061], "id": 1, "cls": 2, "conf": 0.9273869395256042, "frame_idx": 186, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 484, 1035, 1071], "id": 1, "cls": 2, "conf": 0.9321094751358032, "frame_idx": 187, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 475, 1030, 1071], "id": 1, "cls": 2, "conf": 0.9317752122879028, "frame_idx": 188, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 473, 1025, 1073], "id": 1, "cls": 2, "conf": 0.9486481547355652, "frame_idx": 189, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1210, 567, 1396, 640], "id": 2, "cls": 2, "conf": 0.7311104536056519, "frame_idx": 189, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 472, 1016, 1073], "id": 1, "cls": 2, "conf": 0.952238917350769, "frame_idx": 190, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1211, 569, 1397, 642], "id": 2, "cls": 2, "conf": 0.7499367594718933, "frame_idx": 190, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 463, 1008, 1070], "id": 1, "cls": 2, "conf": 0.9457194209098816, "frame_idx": 191, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1219, 570, 1396, 641], "id": 2, "cls": 2, "conf": 0.7276124954223633, "frame_idx": 191, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 454, 1001, 1071], "id": 1, "cls": 2, "conf": 0.9511743187904358, "frame_idx": 192, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1218, 570, 1396, 641], "id": 2, "cls": 2, "conf": 0.7206576466560364, "frame_idx": 192, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 450, 994, 1069], "id": 1, "cls": 2, "conf": 0.9420279264450073, "frame_idx": 193, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1214, 570, 1395, 642], "id": 2, "cls": 2, "conf": 0.7134021520614624, "frame_idx": 193, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 446, 985, 1067], "id": 1, "cls": 2, "conf": 0.9500812292098999, "frame_idx": 194, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1215, 570, 1393, 642], "id": 2, "cls": 2, "conf": 0.7069892287254333, "frame_idx": 194, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 442, 976, 1066], "id": 1, "cls": 2, "conf": 0.9406448006629944, "frame_idx": 195, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1203, 568, 1391, 642], "id": 2, "cls": 2, "conf": 0.7376792430877686, "frame_idx": 195, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 441, 968, 1069], "id": 1, "cls": 2, "conf": 0.9537635445594788, "frame_idx": 196, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1202, 567, 1391, 642], "id": 2, "cls": 2, "conf": 0.7550773024559021, "frame_idx": 196, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 440, 960, 1069], "id": 1, "cls": 2, "conf": 0.9586692452430725, "frame_idx": 197, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1200, 566, 1392, 642], "id": 2, "cls": 2, "conf": 0.7765669822692871, "frame_idx": 197, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 431, 950, 1069], "id": 1, "cls": 2, "conf": 0.9550426006317139, "frame_idx": 198, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1198, 565, 1393, 643], "id": 2, "cls": 2, "conf": 0.7722377777099609, "frame_idx": 198, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 424, 938, 1065], "id": 1, "cls": 2, "conf": 0.9508339762687683, "frame_idx": 199, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1196, 565, 1392, 643], "id": 2, "cls": 2, "conf": 0.751980185508728, "frame_idx": 199, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 419, 927, 1065], "id": 1, "cls": 2, "conf": 0.9454301595687866, "frame_idx": 200, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1196, 566, 1392, 643], "id": 2, "cls": 2, "conf": 0.7461082935333252, "frame_idx": 200, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 413, 916, 1065], "id": 1, "cls": 2, "conf": 0.957693874835968, "frame_idx": 201, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1194, 565, 1392, 644], "id": 2, "cls": 2, "conf": 0.7643528580665588, "frame_idx": 201, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 407, 905, 1065], "id": 1, "cls": 2, "conf": 0.945280134677887, "frame_idx": 202, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1196, 565, 1392, 644], "id": 2, "cls": 2, "conf": 0.7613423466682434, "frame_idx": 202, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 409, 890, 1065], "id": 1, "cls": 2, "conf": 0.9535142183303833, "frame_idx": 203, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1194, 565, 1391, 644], "id": 2, "cls": 2, "conf": 0.7633638978004456, "frame_idx": 203, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 400, 875, 1065], "id": 1, "cls": 2, "conf": 0.9448526501655579, "frame_idx": 204, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1192, 565, 1391, 644], "id": 2, "cls": 2, "conf": 0.7550344467163086, "frame_idx": 204, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 395, 863, 1064], "id": 1, "cls": 2, "conf": 0.9526091814041138, "frame_idx": 205, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1192, 565, 1390, 644], "id": 2, "cls": 2, "conf": 0.7387273907661438, "frame_idx": 205, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 391, 851, 1062], "id": 1, "cls": 2, "conf": 0.9561181664466858, "frame_idx": 206, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1191, 565, 1390, 644], "id": 2, "cls": 2, "conf": 0.7227319478988647, "frame_idx": 206, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 385, 830, 1059], "id": 1, "cls": 2, "conf": 0.9433083534240723, "frame_idx": 207, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1189, 565, 1388, 644], "id": 2, "cls": 2, "conf": 0.703997015953064, "frame_idx": 207, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 369, 812, 1064], "id": 1, "cls": 2, "conf": 0.9332630634307861, "frame_idx": 208, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1192, 566, 1387, 644], "id": 2, "cls": 2, "conf": 0.7098210453987122, "frame_idx": 208, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 364, 792, 1067], "id": 1, "cls": 2, "conf": 0.945813775062561, "frame_idx": 209, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1189, 565, 1388, 644], "id": 2, "cls": 2, "conf": 0.7005091905593872, "frame_idx": 209, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 354, 774, 1068], "id": 1, "cls": 2, "conf": 0.9388237595558167, "frame_idx": 210, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1187, 565, 1385, 643], "id": 2, "cls": 2, "conf": 0.7079640030860901, "frame_idx": 210, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 351, 755, 1070], "id": 1, "cls": 2, "conf": 0.9397347569465637, "frame_idx": 211, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1185, 564, 1385, 644], "id": 2, "cls": 2, "conf": 0.7079640030860901, "frame_idx": 211, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 350, 729, 1068], "id": 1, "cls": 2, "conf": 0.949310839176178, "frame_idx": 212, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1183, 564, 1381, 643], "id": 2, "cls": 2, "conf": 0.7306272983551025, "frame_idx": 212, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 350, 703, 1068], "id": 1, "cls": 2, "conf": 0.9424352645874023, "frame_idx": 213, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1183, 564, 1383, 643], "id": 2, "cls": 2, "conf": 0.7504119873046875, "frame_idx": 213, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 350, 679, 1066], "id": 1, "cls": 2, "conf": 0.9429755806922913, "frame_idx": 214, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1181, 565, 1377, 644], "id": 2, "cls": 2, "conf": 0.7851810455322266, "frame_idx": 214, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 354, 650, 1069], "id": 1, "cls": 2, "conf": 0.9048929214477539, "frame_idx": 215, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1181, 565, 1378, 643], "id": 2, "cls": 2, "conf": 0.7938785552978516, "frame_idx": 215, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 378, 620, 1070], "id": 1, "cls": 2, "conf": 0.9180529713630676, "frame_idx": 216, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1182, 566, 1376, 643], "id": 2, "cls": 2, "conf": 0.7817256450653076, "frame_idx": 216, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 395, 588, 1069], "id": 1, "cls": 2, "conf": 0.9412034749984741, "frame_idx": 217, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1182, 565, 1374, 644], "id": 2, "cls": 2, "conf": 0.8047704100608826, "frame_idx": 217, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 427, 551, 1071], "id": 1, "cls": 2, "conf": 0.9319164752960205, "frame_idx": 218, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1182, 565, 1375, 643], "id": 2, "cls": 2, "conf": 0.7836374640464783, "frame_idx": 218, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 453, 510, 1072], "id": 1, "cls": 2, "conf": 0.9232752919197083, "frame_idx": 219, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1181, 566, 1371, 642], "id": 2, "cls": 2, "conf": 0.8103419542312622, "frame_idx": 219, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 503, 467, 1071], "id": 1, "cls": 2, "conf": 0.904760479927063, "frame_idx": 220, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1179, 566, 1371, 642], "id": 2, "cls": 2, "conf": 0.8125634789466858, "frame_idx": 220, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 549, 418, 1070], "id": 1, "cls": 2, "conf": 0.9279927611351013, "frame_idx": 221, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1179, 566, 1376, 642], "id": 2, "cls": 2, "conf": 0.8272838592529297, "frame_idx": 221, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 583, 363, 1068], "id": 1, "cls": 2, "conf": 0.9242643117904663, "frame_idx": 222, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1178, 565, 1374, 642], "id": 2, "cls": 2, "conf": 0.8221709132194519, "frame_idx": 222, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 593, 303, 1068], "id": 1, "cls": 2, "conf": 0.9143214821815491, "frame_idx": 223, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1177, 565, 1375, 644], "id": 2, "cls": 2, "conf": 0.8016420602798462, "frame_idx": 223, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 600, 238, 1069], "id": 1, "cls": 2, "conf": 0.8708683252334595, "frame_idx": 224, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1177, 565, 1376, 644], "id": 2, "cls": 2, "conf": 0.7917031645774841, "frame_idx": 224, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 616, 197, 1069], "id": 1, "cls": 2, "conf": 0.8708683252334595, "frame_idx": 225, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1178, 565, 1376, 643], "id": 2, "cls": 2, "conf": 0.78056401014328, "frame_idx": 225, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1177, 564, 1377, 644], "id": 2, "cls": 2, "conf": 0.7785735130310059, "frame_idx": 226, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1176, 565, 1370, 644], "id": 2, "cls": 2, "conf": 0.7929512858390808, "frame_idx": 227, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1174, 564, 1371, 645], "id": 2, "cls": 2, "conf": 0.8178865909576416, "frame_idx": 228, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1173, 564, 1371, 645], "id": 2, "cls": 2, "conf": 0.8109760284423828, "frame_idx": 229, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1173, 565, 1370, 645], "id": 2, "cls": 2, "conf": 0.7563623189926147, "frame_idx": 230, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1174, 565, 1370, 645], "id": 2, "cls": 2, "conf": 0.7083349227905273, "frame_idx": 231, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1173, 565, 1368, 645], "id": 2, "cls": 2, "conf": 0.7430815100669861, "frame_idx": 232, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1172, 564, 1359, 643], "id": 2, "cls": 2, "conf": 0.7816348075866699, "frame_idx": 233, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1171, 565, 1356, 642], "id": 2, "cls": 2, "conf": 0.8003019094467163, "frame_idx": 234, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1171, 563, 1360, 644], "id": 2, "cls": 2, "conf": 0.8223402500152588, "frame_idx": 235, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1169, 562, 1362, 645], "id": 2, "cls": 2, "conf": 0.8306653499603271, "frame_idx": 236, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1168, 562, 1359, 645], "id": 2, "cls": 2, "conf": 0.8245570659637451, "frame_idx": 237, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1170, 563, 1359, 645], "id": 2, "cls": 2, "conf": 0.818155825138092, "frame_idx": 238, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1168, 563, 1360, 645], "id": 2, "cls": 2, "conf": 0.8151793479919434, "frame_idx": 239, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1166, 564, 1357, 645], "id": 2, "cls": 2, "conf": 0.8082919120788574, "frame_idx": 240, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1165, 564, 1356, 645], "id": 2, "cls": 2, "conf": 0.8219642043113708, "frame_idx": 241, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1164, 564, 1353, 645], "id": 2, "cls": 2, "conf": 0.7999997138977051, "frame_idx": 242, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1164, 564, 1352, 645], "id": 2, "cls": 2, "conf": 0.7364180088043213, "frame_idx": 243, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1165, 565, 1349, 645], "id": 2, "cls": 2, "conf": 0.7858971357345581, "frame_idx": 244, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1164, 564, 1354, 646], "id": 2, "cls": 2, "conf": 0.7886779308319092, "frame_idx": 245, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1164, 564, 1348, 646], "id": 2, "cls": 2, "conf": 0.818172812461853, "frame_idx": 246, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1163, 564, 1348, 646], "id": 2, "cls": 2, "conf": 0.8523472547531128, "frame_idx": 247, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1164, 564, 1348, 645], "id": 2, "cls": 2, "conf": 0.8364881873130798, "frame_idx": 248, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1163, 563, 1346, 646], "id": 2, "cls": 2, "conf": 0.8150932788848877, "frame_idx": 249, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1163, 564, 1346, 646], "id": 2, "cls": 2, "conf": 0.8284506797790527, "frame_idx": 250, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1163, 563, 1347, 645], "id": 2, "cls": 2, "conf": 0.8243890404701233, "frame_idx": 251, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1163, 564, 1344, 646], "id": 2, "cls": 2, "conf": 0.848281741142273, "frame_idx": 252, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1163, 563, 1341, 646], "id": 2, "cls": 2, "conf": 0.8477445840835571, "frame_idx": 253, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1162, 563, 1339, 648], "id": 2, "cls": 2, "conf": 0.8400436043739319, "frame_idx": 254, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1161, 561, 1336, 647], "id": 2, "cls": 2, "conf": 0.7861170768737793, "frame_idx": 255, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1161, 562, 1338, 649], "id": 2, "cls": 2, "conf": 0.8120461702346802, "frame_idx": 256, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1161, 562, 1336, 648], "id": 2, "cls": 2, "conf": 0.7770818471908569, "frame_idx": 257, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1161, 561, 1332, 648], "id": 2, "cls": 2, "conf": 0.7602912187576294, "frame_idx": 258, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 560, 1331, 649], "id": 2, "cls": 2, "conf": 0.7476798295974731, "frame_idx": 259, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 560, 1330, 649], "id": 2, "cls": 2, "conf": 0.7798804640769958, "frame_idx": 260, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 560, 1328, 649], "id": 2, "cls": 2, "conf": 0.7794782519340515, "frame_idx": 261, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 561, 1328, 649], "id": 2, "cls": 2, "conf": 0.7535544037818909, "frame_idx": 262, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 561, 1326, 649], "id": 2, "cls": 2, "conf": 0.7481237649917603, "frame_idx": 263, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 561, 1325, 647], "id": 2, "cls": 2, "conf": 0.7650920152664185, "frame_idx": 264, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1158, 562, 1324, 647], "id": 2, "cls": 2, "conf": 0.8215755224227905, "frame_idx": 265, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 561, 1324, 647], "id": 2, "cls": 2, "conf": 0.8252439498901367, "frame_idx": 266, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1158, 561, 1323, 648], "id": 2, "cls": 2, "conf": 0.8128286004066467, "frame_idx": 267, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1157, 560, 1323, 649], "id": 2, "cls": 2, "conf": 0.8222718238830566, "frame_idx": 268, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1158, 560, 1323, 649], "id": 2, "cls": 2, "conf": 0.8110289573669434, "frame_idx": 269, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 560, 1323, 649], "id": 2, "cls": 2, "conf": 0.8318296074867249, "frame_idx": 270, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 561, 1321, 649], "id": 2, "cls": 2, "conf": 0.8325403332710266, "frame_idx": 271, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 560, 1323, 650], "id": 2, "cls": 2, "conf": 0.8335207104682922, "frame_idx": 272, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 560, 1321, 650], "id": 2, "cls": 2, "conf": 0.8333126902580261, "frame_idx": 273, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1158, 561, 1320, 650], "id": 2, "cls": 2, "conf": 0.8144757151603699, "frame_idx": 274, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 561, 1319, 650], "id": 2, "cls": 2, "conf": 0.809233546257019, "frame_idx": 275, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1160, 561, 1317, 650], "id": 2, "cls": 2, "conf": 0.7907527685165405, "frame_idx": 276, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 560, 1318, 650], "id": 2, "cls": 2, "conf": 0.8115890026092529, "frame_idx": 277, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1158, 559, 1317, 651], "id": 2, "cls": 2, "conf": 0.7833464741706848, "frame_idx": 278, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1158, 559, 1317, 651], "id": 2, "cls": 2, "conf": 0.7954601645469666, "frame_idx": 279, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 559, 1317, 651], "id": 2, "cls": 2, "conf": 0.774968683719635, "frame_idx": 280, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 559, 1316, 651], "id": 2, "cls": 2, "conf": 0.7699628472328186, "frame_idx": 281, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1158, 559, 1316, 651], "id": 2, "cls": 2, "conf": 0.7739447951316833, "frame_idx": 282, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1159, 559, 1315, 650], "id": 2, "cls": 2, "conf": 0.803051769733429, "frame_idx": 283, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1158, 558, 1312, 652], "id": 2, "cls": 2, "conf": 0.810187041759491, "frame_idx": 284, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1157, 557, 1311, 653], "id": 2, "cls": 2, "conf": 0.8035591840744019, "frame_idx": 285, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1157, 558, 1311, 653], "id": 2, "cls": 2, "conf": 0.8188391923904419, "frame_idx": 286, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1156, 558, 1311, 653], "id": 2, "cls": 2, "conf": 0.8180844187736511, "frame_idx": 287, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1157, 559, 1310, 653], "id": 2, "cls": 2, "conf": 0.8250501155853271, "frame_idx": 288, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1156, 559, 1309, 654], "id": 2, "cls": 2, "conf": 0.8236573338508606, "frame_idx": 289, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1156, 559, 1308, 654], "id": 2, "cls": 2, "conf": 0.8105210661888123, "frame_idx": 290, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1157, 560, 1307, 654], "id": 2, "cls": 2, "conf": 0.8106025457382202, "frame_idx": 291, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1155, 560, 1307, 655], "id": 2, "cls": 2, "conf": 0.788083016872406, "frame_idx": 292, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1157, 560, 1305, 654], "id": 2, "cls": 2, "conf": 0.7796603441238403, "frame_idx": 293, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1157, 560, 1304, 655], "id": 2, "cls": 2, "conf": 0.7901594638824463, "frame_idx": 294, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1155, 560, 1305, 656], "id": 2, "cls": 2, "conf": 0.7907295823097229, "frame_idx": 295, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1156, 560, 1303, 655], "id": 2, "cls": 2, "conf": 0.7933876514434814, "frame_idx": 296, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1157, 559, 1301, 655], "id": 2, "cls": 2, "conf": 0.7832263708114624, "frame_idx": 297, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1156, 559, 1301, 656], "id": 2, "cls": 2, "conf": 0.795276403427124, "frame_idx": 298, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1155, 559, 1301, 656], "id": 2, "cls": 2, "conf": 0.8082300424575806, "frame_idx": 299, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1155, 560, 1299, 656], "id": 2, "cls": 2, "conf": 0.7965103387832642, "frame_idx": 300, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1154, 560, 1300, 657], "id": 2, "cls": 2, "conf": 0.8124801516532898, "frame_idx": 301, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1153, 560, 1300, 657], "id": 2, "cls": 2, "conf": 0.8144661784172058, "frame_idx": 302, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1153, 561, 1299, 658], "id": 2, "cls": 2, "conf": 0.8181474208831787, "frame_idx": 303, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1152, 561, 1298, 658], "id": 2, "cls": 2, "conf": 0.8187706470489502, "frame_idx": 304, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1154, 560, 1298, 656], "id": 2, "cls": 2, "conf": 0.8268204927444458, "frame_idx": 305, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1155, 560, 1297, 655], "id": 2, "cls": 2, "conf": 0.8292365074157715, "frame_idx": 306, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1155, 560, 1295, 656], "id": 2, "cls": 2, "conf": 0.8298918008804321, "frame_idx": 307, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1154, 559, 1297, 657], "id": 2, "cls": 2, "conf": 0.8282919526100159, "frame_idx": 308, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1155, 559, 1298, 657], "id": 2, "cls": 2, "conf": 0.8358256816864014, "frame_idx": 309, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1154, 559, 1297, 657], "id": 2, "cls": 2, "conf": 0.8314154744148254, "frame_idx": 310, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1154, 559, 1297, 657], "id": 2, "cls": 2, "conf": 0.8324777483940125, "frame_idx": 311, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1154, 560, 1294, 657], "id": 2, "cls": 2, "conf": 0.8399393558502197, "frame_idx": 312, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1153, 559, 1295, 658], "id": 2, "cls": 2, "conf": 0.8377672433853149, "frame_idx": 313, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1152, 559, 1294, 658], "id": 2, "cls": 2, "conf": 0.8295931816101074, "frame_idx": 314, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1151, 559, 1293, 658], "id": 2, "cls": 2, "conf": 0.8257358074188232, "frame_idx": 315, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1151, 559, 1292, 658], "id": 2, "cls": 2, "conf": 0.8370307087898254, "frame_idx": 316, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1151, 560, 1291, 658], "id": 2, "cls": 2, "conf": 0.818547785282135, "frame_idx": 317, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1150, 559, 1292, 659], "id": 2, "cls": 2, "conf": 0.7911444306373596, "frame_idx": 318, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1150, 559, 1292, 659], "id": 2, "cls": 2, "conf": 0.7788093686103821, "frame_idx": 319, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1150, 559, 1293, 659], "id": 2, "cls": 2, "conf": 0.7597206830978394, "frame_idx": 320, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1150, 560, 1291, 659], "id": 2, "cls": 2, "conf": 0.7717625498771667, "frame_idx": 321, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1148, 559, 1291, 660], "id": 2, "cls": 2, "conf": 0.7833176255226135, "frame_idx": 322, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1148, 559, 1292, 660], "id": 2, "cls": 2, "conf": 0.7886781096458435, "frame_idx": 323, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1148, 559, 1292, 660], "id": 2, "cls": 2, "conf": 0.7795507311820984, "frame_idx": 324, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1149, 560, 1291, 660], "id": 2, "cls": 2, "conf": 0.7811378240585327, "frame_idx": 325, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1148, 560, 1291, 661], "id": 2, "cls": 2, "conf": 0.7874495387077332, "frame_idx": 326, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1149, 560, 1290, 662], "id": 2, "cls": 2, "conf": 0.8070158958435059, "frame_idx": 327, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1147, 560, 1291, 664], "id": 2, "cls": 2, "conf": 0.8095881342887878, "frame_idx": 328, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1146, 560, 1290, 663], "id": 2, "cls": 2, "conf": 0.8032857775688171, "frame_idx": 329, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1145, 560, 1290, 664], "id": 2, "cls": 2, "conf": 0.826309084892273, "frame_idx": 330, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1145, 560, 1291, 665], "id": 2, "cls": 2, "conf": 0.799944281578064, "frame_idx": 331, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1145, 561, 1290, 665], "id": 2, "cls": 2, "conf": 0.7787960767745972, "frame_idx": 332, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1144, 560, 1290, 665], "id": 2, "cls": 2, "conf": 0.7718071937561035, "frame_idx": 333, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1142, 559, 1291, 666], "id": 2, "cls": 2, "conf": 0.7858945727348328, "frame_idx": 334, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1143, 559, 1290, 665], "id": 2, "cls": 2, "conf": 0.809407114982605, "frame_idx": 335, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1142, 559, 1290, 666], "id": 2, "cls": 2, "conf": 0.8050354719161987, "frame_idx": 336, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1141, 559, 1289, 666], "id": 2, "cls": 2, "conf": 0.8001269102096558, "frame_idx": 337, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1140, 558, 1289, 667], "id": 2, "cls": 2, "conf": 0.8002896308898926, "frame_idx": 338, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1140, 559, 1288, 667], "id": 2, "cls": 2, "conf": 0.8237987160682678, "frame_idx": 339, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1139, 558, 1289, 667], "id": 2, "cls": 2, "conf": 0.8150033950805664, "frame_idx": 340, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1136, 558, 1291, 667], "id": 2, "cls": 2, "conf": 0.7948818802833557, "frame_idx": 341, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1138, 559, 1289, 668], "id": 2, "cls": 2, "conf": 0.8127124905586243, "frame_idx": 342, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1136, 558, 1290, 668], "id": 2, "cls": 2, "conf": 0.8126155138015747, "frame_idx": 343, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1135, 558, 1290, 668], "id": 2, "cls": 2, "conf": 0.8102937936782837, "frame_idx": 344, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1136, 558, 1290, 668], "id": 2, "cls": 2, "conf": 0.7925915718078613, "frame_idx": 345, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1138, 559, 1288, 669], "id": 2, "cls": 2, "conf": 0.7755674123764038, "frame_idx": 346, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1136, 558, 1288, 670], "id": 2, "cls": 2, "conf": 0.7737069129943848, "frame_idx": 347, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1136, 558, 1286, 669], "id": 2, "cls": 2, "conf": 0.7875550389289856, "frame_idx": 348, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1131, 557, 1286, 670], "id": 2, "cls": 2, "conf": 0.7827519178390503, "frame_idx": 349, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1131, 556, 1286, 670], "id": 2, "cls": 2, "conf": 0.7984418272972107, "frame_idx": 350, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1130, 555, 1287, 671], "id": 2, "cls": 2, "conf": 0.7734009027481079, "frame_idx": 351, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1130, 556, 1285, 671], "id": 2, "cls": 2, "conf": 0.7766426205635071, "frame_idx": 352, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1128, 555, 1286, 672], "id": 2, "cls": 2, "conf": 0.7817273139953613, "frame_idx": 353, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1131, 555, 1284, 671], "id": 2, "cls": 2, "conf": 0.7750544548034668, "frame_idx": 354, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1128, 554, 1287, 672], "id": 2, "cls": 2, "conf": 0.7669058442115784, "frame_idx": 355, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1130, 555, 1284, 672], "id": 2, "cls": 2, "conf": 0.7651919722557068, "frame_idx": 356, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1128, 554, 1283, 672], "id": 2, "cls": 2, "conf": 0.7686755061149597, "frame_idx": 357, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1126, 553, 1284, 673], "id": 2, "cls": 2, "conf": 0.7569704055786133, "frame_idx": 358, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1126, 554, 1283, 673], "id": 2, "cls": 2, "conf": 0.788491427898407, "frame_idx": 359, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1123, 553, 1285, 673], "id": 2, "cls": 2, "conf": 0.796739935874939, "frame_idx": 360, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1124, 553, 1284, 674], "id": 2, "cls": 2, "conf": 0.7600229382514954, "frame_idx": 361, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1122, 552, 1285, 675], "id": 2, "cls": 2, "conf": 0.7608688473701477, "frame_idx": 362, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1121, 553, 1285, 676], "id": 2, "cls": 2, "conf": 0.7610014081001282, "frame_idx": 363, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1120, 552, 1285, 675], "id": 2, "cls": 2, "conf": 0.7238069772720337, "frame_idx": 364, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1119, 553, 1284, 675], "id": 2, "cls": 2, "conf": 0.789625883102417, "frame_idx": 365, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1118, 552, 1283, 675], "id": 2, "cls": 2, "conf": 0.7700904607772827, "frame_idx": 366, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1117, 552, 1282, 677], "id": 2, "cls": 2, "conf": 0.7024756669998169, "frame_idx": 367, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1116, 550, 1282, 677], "id": 2, "cls": 2, "conf": 0.7285512685775757, "frame_idx": 368, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1115, 549, 1281, 675], "id": 2, "cls": 2, "conf": 0.7092558145523071, "frame_idx": 369, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1113, 549, 1282, 675], "id": 2, "cls": 2, "conf": 0.7147558331489563, "frame_idx": 370, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1114, 548, 1280, 675], "id": 2, "cls": 2, "conf": 0.7318784594535828, "frame_idx": 371, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1112, 549, 1279, 676], "id": 2, "cls": 2, "conf": 0.7841340899467468, "frame_idx": 372, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1113, 549, 1278, 675], "id": 2, "cls": 2, "conf": 0.7626461386680603, "frame_idx": 373, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1111, 550, 1278, 677], "id": 2, "cls": 2, "conf": 0.7657148241996765, "frame_idx": 374, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1108, 550, 1280, 677], "id": 2, "cls": 2, "conf": 0.7782973647117615, "frame_idx": 375, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1108, 550, 1280, 677], "id": 2, "cls": 2, "conf": 0.7754068970680237, "frame_idx": 376, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1107, 551, 1279, 677], "id": 2, "cls": 2, "conf": 0.7901440858840942, "frame_idx": 377, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1105, 550, 1280, 678], "id": 2, "cls": 2, "conf": 0.811150848865509, "frame_idx": 378, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1105, 550, 1279, 678], "id": 2, "cls": 2, "conf": 0.7904564142227173, "frame_idx": 379, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1105, 550, 1278, 678], "id": 2, "cls": 2, "conf": 0.7392836809158325, "frame_idx": 380, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1104, 548, 1279, 678], "id": 2, "cls": 2, "conf": 0.7411684989929199, "frame_idx": 381, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1104, 551, 1277, 680], "id": 2, "cls": 2, "conf": 0.7404786944389343, "frame_idx": 382, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1102, 550, 1276, 680], "id": 2, "cls": 2, "conf": 0.7326121926307678, "frame_idx": 383, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1102, 550, 1277, 681], "id": 2, "cls": 2, "conf": 0.7641636729240417, "frame_idx": 384, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1101, 549, 1276, 681], "id": 2, "cls": 2, "conf": 0.7742770314216614, "frame_idx": 385, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1099, 549, 1276, 682], "id": 2, "cls": 2, "conf": 0.7556547522544861, "frame_idx": 386, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1098, 548, 1277, 682], "id": 2, "cls": 2, "conf": 0.702316164970398, "frame_idx": 387, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1096, 548, 1275, 683], "id": 2, "cls": 2, "conf": 0.7168530225753784, "frame_idx": 388, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1093, 547, 1273, 684], "id": 2, "cls": 2, "conf": 0.7561923265457153, "frame_idx": 389, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1093, 548, 1275, 684], "id": 2, "cls": 2, "conf": 0.7371773719787598, "frame_idx": 390, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1093, 549, 1275, 684], "id": 2, "cls": 2, "conf": 0.7662423849105835, "frame_idx": 391, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1090, 548, 1276, 685], "id": 2, "cls": 2, "conf": 0.7733460664749146, "frame_idx": 392, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1090, 548, 1275, 684], "id": 2, "cls": 2, "conf": 0.8063229918479919, "frame_idx": 393, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1088, 547, 1275, 685], "id": 2, "cls": 2, "conf": 0.834899365901947, "frame_idx": 394, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1085, 546, 1275, 686], "id": 2, "cls": 2, "conf": 0.8267676830291748, "frame_idx": 395, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1083, 546, 1274, 686], "id": 2, "cls": 2, "conf": 0.8470121622085571, "frame_idx": 396, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1082, 546, 1272, 685], "id": 2, "cls": 2, "conf": 0.8356623649597168, "frame_idx": 397, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1081, 546, 1271, 686], "id": 2, "cls": 2, "conf": 0.8369763493537903, "frame_idx": 398, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1080, 545, 1272, 686], "id": 2, "cls": 2, "conf": 0.8737363219261169, "frame_idx": 399, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1080, 544, 1271, 687], "id": 2, "cls": 2, "conf": 0.8609719276428223, "frame_idx": 400, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1078, 544, 1272, 689], "id": 2, "cls": 2, "conf": 0.83541339635849, "frame_idx": 401, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1078, 545, 1270, 689], "id": 2, "cls": 2, "conf": 0.8013574481010437, "frame_idx": 402, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1075, 544, 1271, 689], "id": 2, "cls": 2, "conf": 0.7798829078674316, "frame_idx": 403, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1074, 543, 1270, 691], "id": 2, "cls": 2, "conf": 0.8236221671104431, "frame_idx": 404, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1070, 543, 1270, 692], "id": 2, "cls": 2, "conf": 0.8620288372039795, "frame_idx": 405, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1070, 543, 1268, 692], "id": 2, "cls": 2, "conf": 0.8752257227897644, "frame_idx": 406, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1067, 542, 1268, 693], "id": 2, "cls": 2, "conf": 0.870403528213501, "frame_idx": 407, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1066, 542, 1269, 695], "id": 2, "cls": 2, "conf": 0.8699027299880981, "frame_idx": 408, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1062, 541, 1270, 696], "id": 2, "cls": 2, "conf": 0.8874167799949646, "frame_idx": 409, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1061, 541, 1269, 696], "id": 2, "cls": 2, "conf": 0.8754041194915771, "frame_idx": 410, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1060, 540, 1269, 698], "id": 2, "cls": 2, "conf": 0.8649414777755737, "frame_idx": 411, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1057, 539, 1268, 699], "id": 2, "cls": 2, "conf": 0.8912915587425232, "frame_idx": 412, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1056, 539, 1268, 700], "id": 2, "cls": 2, "conf": 0.8944886922836304, "frame_idx": 413, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1055, 539, 1269, 700], "id": 2, "cls": 2, "conf": 0.8907544612884521, "frame_idx": 414, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1054, 540, 1268, 701], "id": 2, "cls": 2, "conf": 0.8559849262237549, "frame_idx": 415, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1053, 541, 1266, 701], "id": 2, "cls": 2, "conf": 0.8329747319221497, "frame_idx": 416, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1051, 540, 1265, 702], "id": 2, "cls": 2, "conf": 0.8382128477096558, "frame_idx": 417, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1049, 540, 1266, 702], "id": 2, "cls": 2, "conf": 0.8805363178253174, "frame_idx": 418, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1046, 539, 1266, 703], "id": 2, "cls": 2, "conf": 0.8715322017669678, "frame_idx": 419, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1045, 539, 1267, 704], "id": 2, "cls": 2, "conf": 0.842781662940979, "frame_idx": 420, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1041, 539, 1268, 706], "id": 2, "cls": 2, "conf": 0.8441018462181091, "frame_idx": 421, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1038, 539, 1266, 708], "id": 2, "cls": 2, "conf": 0.7819275856018066, "frame_idx": 422, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1037, 539, 1264, 708], "id": 2, "cls": 2, "conf": 0.8135506510734558, "frame_idx": 423, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1033, 538, 1264, 710], "id": 2, "cls": 2, "conf": 0.8242059350013733, "frame_idx": 424, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1032, 538, 1265, 710], "id": 2, "cls": 2, "conf": 0.7836756110191345, "frame_idx": 425, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1031, 538, 1264, 710], "id": 2, "cls": 2, "conf": 0.8388970494270325, "frame_idx": 426, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1029, 537, 1264, 711], "id": 2, "cls": 2, "conf": 0.7970230579376221, "frame_idx": 427, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1027, 537, 1265, 711], "id": 2, "cls": 2, "conf": 0.7321099638938904, "frame_idx": 428, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1025, 538, 1265, 712], "id": 2, "cls": 2, "conf": 0.7343229651451111, "frame_idx": 429, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1020, 536, 1261, 712], "id": 2, "cls": 2, "conf": 0.787158727645874, "frame_idx": 430, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1018, 537, 1259, 713], "id": 2, "cls": 2, "conf": 0.8460677862167358, "frame_idx": 431, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1013, 536, 1261, 714], "id": 2, "cls": 2, "conf": 0.8292366862297058, "frame_idx": 432, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1011, 536, 1259, 716], "id": 2, "cls": 2, "conf": 0.8152600526809692, "frame_idx": 433, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1008, 535, 1258, 718], "id": 2, "cls": 2, "conf": 0.7996748089790344, "frame_idx": 434, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1007, 535, 1255, 719], "id": 2, "cls": 2, "conf": 0.8389233946800232, "frame_idx": 435, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1008, 535, 1253, 720], "id": 2, "cls": 2, "conf": 0.8631499409675598, "frame_idx": 436, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1002, 534, 1254, 721], "id": 2, "cls": 2, "conf": 0.8657373785972595, "frame_idx": 437, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [998, 534, 1253, 721], "id": 2, "cls": 2, "conf": 0.8603703379631042, "frame_idx": 438, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [995, 532, 1253, 722], "id": 2, "cls": 2, "conf": 0.8645334839820862, "frame_idx": 439, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [994, 532, 1252, 723], "id": 2, "cls": 2, "conf": 0.8768425583839417, "frame_idx": 440, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [991, 530, 1254, 724], "id": 2, "cls": 2, "conf": 0.8931466937065125, "frame_idx": 441, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [986, 530, 1256, 725], "id": 2, "cls": 2, "conf": 0.9038722515106201, "frame_idx": 442, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [985, 530, 1253, 725], "id": 2, "cls": 2, "conf": 0.9084876775741577, "frame_idx": 443, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [983, 530, 1251, 727], "id": 2, "cls": 2, "conf": 0.9005601406097412, "frame_idx": 444, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [980, 529, 1252, 729], "id": 2, "cls": 2, "conf": 0.8964847922325134, "frame_idx": 445, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [977, 529, 1251, 730], "id": 2, "cls": 2, "conf": 0.8957618474960327, "frame_idx": 446, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [974, 529, 1248, 731], "id": 2, "cls": 2, "conf": 0.8834296464920044, "frame_idx": 447, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [970, 527, 1246, 732], "id": 2, "cls": 2, "conf": 0.8654475212097168, "frame_idx": 448, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [966, 526, 1248, 734], "id": 2, "cls": 2, "conf": 0.8783361315727234, "frame_idx": 449, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [962, 526, 1245, 734], "id": 2, "cls": 2, "conf": 0.8720850348472595, "frame_idx": 450, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [959, 525, 1247, 735], "id": 2, "cls": 2, "conf": 0.8909793496131897, "frame_idx": 451, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [957, 525, 1244, 737], "id": 2, "cls": 2, "conf": 0.8911501169204712, "frame_idx": 452, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [954, 525, 1243, 739], "id": 2, "cls": 2, "conf": 0.8941781520843506, "frame_idx": 453, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [948, 524, 1245, 741], "id": 2, "cls": 2, "conf": 0.8771947622299194, "frame_idx": 454, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [943, 524, 1243, 744], "id": 2, "cls": 2, "conf": 0.8804555535316467, "frame_idx": 455, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [940, 523, 1243, 747], "id": 2, "cls": 2, "conf": 0.8785960078239441, "frame_idx": 456, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [934, 522, 1243, 749], "id": 2, "cls": 2, "conf": 0.9005946516990662, "frame_idx": 457, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [931, 521, 1242, 749], "id": 2, "cls": 2, "conf": 0.8925696611404419, "frame_idx": 458, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [928, 521, 1242, 749], "id": 2, "cls": 2, "conf": 0.8925560116767883, "frame_idx": 459, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [925, 522, 1239, 751], "id": 2, "cls": 2, "conf": 0.8871305584907532, "frame_idx": 460, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [917, 523, 1235, 753], "id": 2, "cls": 2, "conf": 0.8800134658813477, "frame_idx": 461, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [913, 523, 1234, 755], "id": 2, "cls": 2, "conf": 0.8769950270652771, "frame_idx": 462, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [912, 522, 1232, 757], "id": 2, "cls": 2, "conf": 0.8771668672561646, "frame_idx": 463, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [907, 521, 1230, 758], "id": 2, "cls": 2, "conf": 0.8780584931373596, "frame_idx": 464, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [902, 520, 1229, 759], "id": 2, "cls": 2, "conf": 0.9009929299354553, "frame_idx": 465, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [892, 520, 1230, 761], "id": 2, "cls": 2, "conf": 0.880210280418396, "frame_idx": 466, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [889, 519, 1227, 762], "id": 2, "cls": 2, "conf": 0.870464026927948, "frame_idx": 467, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [885, 520, 1225, 767], "id": 2, "cls": 2, "conf": 0.9003344774246216, "frame_idx": 468, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [877, 519, 1226, 767], "id": 2, "cls": 2, "conf": 0.920558512210846, "frame_idx": 469, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [875, 519, 1224, 768], "id": 2, "cls": 2, "conf": 0.9045699238777161, "frame_idx": 470, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [868, 518, 1223, 770], "id": 2, "cls": 2, "conf": 0.9074614644050598, "frame_idx": 471, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [864, 517, 1223, 773], "id": 2, "cls": 2, "conf": 0.9183488488197327, "frame_idx": 472, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [857, 516, 1222, 775], "id": 2, "cls": 2, "conf": 0.9148356914520264, "frame_idx": 473, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [853, 516, 1220, 777], "id": 2, "cls": 2, "conf": 0.9280686378479004, "frame_idx": 474, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [841, 514, 1221, 778], "id": 2, "cls": 2, "conf": 0.9198227524757385, "frame_idx": 475, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [838, 513, 1218, 780], "id": 2, "cls": 2, "conf": 0.8942911028862, "frame_idx": 476, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [830, 513, 1218, 782], "id": 2, "cls": 2, "conf": 0.8980481028556824, "frame_idx": 477, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [826, 513, 1213, 787], "id": 2, "cls": 2, "conf": 0.9096649289131165, "frame_idx": 478, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [819, 512, 1212, 793], "id": 2, "cls": 2, "conf": 0.9269362688064575, "frame_idx": 479, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [811, 509, 1213, 794], "id": 2, "cls": 2, "conf": 0.92948979139328, "frame_idx": 480, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [804, 509, 1211, 796], "id": 2, "cls": 2, "conf": 0.9076160788536072, "frame_idx": 481, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [794, 508, 1210, 798], "id": 2, "cls": 2, "conf": 0.9064416289329529, "frame_idx": 482, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [789, 508, 1208, 800], "id": 2, "cls": 2, "conf": 0.9050999879837036, "frame_idx": 483, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [780, 507, 1204, 803], "id": 2, "cls": 2, "conf": 0.9137296080589294, "frame_idx": 484, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [771, 507, 1204, 807], "id": 2, "cls": 2, "conf": 0.9088245630264282, "frame_idx": 485, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [765, 506, 1204, 810], "id": 2, "cls": 2, "conf": 0.9037410020828247, "frame_idx": 486, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [756, 506, 1203, 812], "id": 2, "cls": 2, "conf": 0.9066951870918274, "frame_idx": 487, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [746, 503, 1201, 818], "id": 2, "cls": 2, "conf": 0.914334774017334, "frame_idx": 488, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [735, 503, 1197, 825], "id": 2, "cls": 2, "conf": 0.9123433232307434, "frame_idx": 489, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [725, 502, 1195, 829], "id": 2, "cls": 2, "conf": 0.9094393849372864, "frame_idx": 490, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [717, 498, 1194, 833], "id": 2, "cls": 2, "conf": 0.9276642203330994, "frame_idx": 491, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [705, 499, 1194, 835], "id": 2, "cls": 2, "conf": 0.9282996654510498, "frame_idx": 492, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [696, 498, 1192, 837], "id": 2, "cls": 2, "conf": 0.9298180937767029, "frame_idx": 493, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [684, 496, 1191, 841], "id": 2, "cls": 2, "conf": 0.9258641600608826, "frame_idx": 494, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [673, 496, 1188, 847], "id": 2, "cls": 2, "conf": 0.923974335193634, "frame_idx": 495, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [661, 498, 1186, 856], "id": 2, "cls": 2, "conf": 0.9190512299537659, "frame_idx": 496, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [646, 495, 1183, 859], "id": 2, "cls": 2, "conf": 0.9168910980224609, "frame_idx": 497, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [632, 495, 1183, 868], "id": 2, "cls": 2, "conf": 0.925777018070221, "frame_idx": 498, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [621, 493, 1182, 873], "id": 2, "cls": 2, "conf": 0.9183085560798645, "frame_idx": 499, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [607, 491, 1180, 878], "id": 2, "cls": 2, "conf": 0.9321070909500122, "frame_idx": 500, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [588, 488, 1177, 882], "id": 2, "cls": 2, "conf": 0.9307034611701965, "frame_idx": 501, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [576, 485, 1174, 888], "id": 2, "cls": 2, "conf": 0.9412079453468323, "frame_idx": 502, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [562, 483, 1173, 893], "id": 2, "cls": 2, "conf": 0.9401066303253174, "frame_idx": 503, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [543, 475, 1171, 897], "id": 2, "cls": 2, "conf": 0.9346688389778137, "frame_idx": 504, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [527, 473, 1169, 903], "id": 2, "cls": 2, "conf": 0.9343288540840149, "frame_idx": 505, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [510, 474, 1164, 914], "id": 2, "cls": 2, "conf": 0.9404311180114746, "frame_idx": 506, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [490, 471, 1161, 920], "id": 2, "cls": 2, "conf": 0.9414466619491577, "frame_idx": 507, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [473, 469, 1159, 927], "id": 2, "cls": 2, "conf": 0.9434319138526917, "frame_idx": 508, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [451, 469, 1158, 938], "id": 2, "cls": 2, "conf": 0.9345313906669617, "frame_idx": 509, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [427, 469, 1156, 946], "id": 2, "cls": 2, "conf": 0.9282017946243286, "frame_idx": 510, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [405, 468, 1152, 952], "id": 2, "cls": 2, "conf": 0.9417479038238525, "frame_idx": 511, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [382, 468, 1150, 966], "id": 2, "cls": 2, "conf": 0.9451406598091125, "frame_idx": 512, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [360, 465, 1148, 976], "id": 2, "cls": 2, "conf": 0.9428954720497131, "frame_idx": 513, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [332, 463, 1148, 984], "id": 2, "cls": 2, "conf": 0.9395127892494202, "frame_idx": 514, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [303, 463, 1144, 992], "id": 2, "cls": 2, "conf": 0.9283111095428467, "frame_idx": 515, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [275, 462, 1136, 1003], "id": 2, "cls": 2, "conf": 0.9324305653572083, "frame_idx": 516, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [245, 461, 1131, 1018], "id": 2, "cls": 2, "conf": 0.9247828125953674, "frame_idx": 517, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [208, 453, 1130, 1032], "id": 2, "cls": 2, "conf": 0.9319226741790771, "frame_idx": 518, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [172, 451, 1129, 1045], "id": 2, "cls": 2, "conf": 0.9351807832717896, "frame_idx": 519, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [134, 449, 1125, 1058], "id": 2, "cls": 2, "conf": 0.9390578269958496, "frame_idx": 520, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [91, 445, 1119, 1068], "id": 2, "cls": 2, "conf": 0.947394609451294, "frame_idx": 521, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [46, 443, 1114, 1070], "id": 2, "cls": 2, "conf": 0.9468377232551575, "frame_idx": 522, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [2, 440, 1110, 1072], "id": 2, "cls": 2, "conf": 0.9386428594589233, "frame_idx": 523, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 438, 1105, 1072], "id": 2, "cls": 2, "conf": 0.9346777200698853, "frame_idx": 524, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 435, 1107, 1072], "id": 2, "cls": 2, "conf": 0.9273584485054016, "frame_idx": 525, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 437, 1096, 1071], "id": 2, "cls": 2, "conf": 0.9241657257080078, "frame_idx": 526, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 432, 1095, 1071], "id": 2, "cls": 2, "conf": 0.9355752468109131, "frame_idx": 527, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 428, 1094, 1070], "id": 2, "cls": 2, "conf": 0.9321312308311462, "frame_idx": 528, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 420, 1082, 1073], "id": 2, "cls": 2, "conf": 0.9156169891357422, "frame_idx": 529, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [2, 409, 1077, 1070], "id": 2, "cls": 2, "conf": 0.8867893815040588, "frame_idx": 530, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [2, 388, 1070, 1071], "id": 2, "cls": 2, "conf": 0.9155814051628113, "frame_idx": 531, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 399, 1066, 1072], "id": 2, "cls": 2, "conf": 0.9372450113296509, "frame_idx": 532, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 389, 1057, 1071], "id": 2, "cls": 2, "conf": 0.9160026907920837, "frame_idx": 533, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 390, 1052, 1070], "id": 2, "cls": 2, "conf": 0.9509764313697815, "frame_idx": 534, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 386, 1042, 1070], "id": 2, "cls": 2, "conf": 0.9340437650680542, "frame_idx": 535, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [2, 381, 1038, 1068], "id": 2, "cls": 2, "conf": 0.9404564499855042, "frame_idx": 536, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [2, 375, 1030, 1066], "id": 2, "cls": 2, "conf": 0.9479154348373413, "frame_idx": 537, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [2, 370, 1024, 1067], "id": 2, "cls": 2, "conf": 0.9565911293029785, "frame_idx": 538, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 365, 1016, 1067], "id": 2, "cls": 2, "conf": 0.9608258008956909, "frame_idx": 539, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [2, 357, 1006, 1064], "id": 2, "cls": 2, "conf": 0.9613184332847595, "frame_idx": 540, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [3, 347, 999, 1064], "id": 2, "cls": 2, "conf": 0.9674457311630249, "frame_idx": 541, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 338, 992, 1064], "id": 2, "cls": 2, "conf": 0.97267746925354, "frame_idx": 542, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 328, 983, 1064], "id": 2, "cls": 2, "conf": 0.9624996781349182, "frame_idx": 543, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 319, 972, 1063], "id": 2, "cls": 2, "conf": 0.9598995447158813, "frame_idx": 544, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 307, 959, 1062], "id": 2, "cls": 2, "conf": 0.9514867663383484, "frame_idx": 545, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 292, 948, 1062], "id": 2, "cls": 2, "conf": 0.9584953784942627, "frame_idx": 546, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 279, 935, 1065], "id": 2, "cls": 2, "conf": 0.9569721221923828, "frame_idx": 547, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 270, 927, 1066], "id": 2, "cls": 2, "conf": 0.972572922706604, "frame_idx": 548, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [2, 258, 915, 1066], "id": 2, "cls": 2, "conf": 0.9626525044441223, "frame_idx": 549, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [2, 241, 898, 1064], "id": 2, "cls": 2, "conf": 0.9489137530326843, "frame_idx": 550, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 221, 885, 1065], "id": 2, "cls": 2, "conf": 0.9458200931549072, "frame_idx": 551, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 204, 868, 1066], "id": 2, "cls": 2, "conf": 0.9462317228317261, "frame_idx": 552, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 193, 856, 1066], "id": 2, "cls": 2, "conf": 0.9367963075637817, "frame_idx": 553, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 180, 836, 1067], "id": 2, "cls": 2, "conf": 0.9550886154174805, "frame_idx": 554, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 173, 820, 1068], "id": 2, "cls": 2, "conf": 0.9146677255630493, "frame_idx": 555, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 170, 797, 1066], "id": 2, "cls": 2, "conf": 0.9364038109779358, "frame_idx": 556, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 171, 779, 1067], "id": 2, "cls": 2, "conf": 0.9397339224815369, "frame_idx": 557, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 171, 751, 1068], "id": 2, "cls": 2, "conf": 0.9423396587371826, "frame_idx": 558, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 175, 729, 1067], "id": 2, "cls": 2, "conf": 0.9324960708618164, "frame_idx": 559, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 181, 700, 1066], "id": 2, "cls": 2, "conf": 0.9049985408782959, "frame_idx": 560, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 188, 672, 1067], "id": 2, "cls": 2, "conf": 0.8566305637359619, "frame_idx": 561, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 195, 637, 1067], "id": 2, "cls": 2, "conf": 0.9080706834793091, "frame_idx": 562, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 199, 603, 1068], "id": 2, "cls": 2, "conf": 0.9104960560798645, "frame_idx": 563, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [1, 220, 559, 1063], "id": 2, "cls": 2, "conf": 0.9200505614280701, "frame_idx": 564, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 235, 516, 1067], "id": 2, "cls": 2, "conf": 0.9269247651100159, "frame_idx": 565, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [0, 250, 470, 1065], "id": 2, "cls": 2, "conf": 0.8854379057884216, "frame_idx": 566, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [3, 256, 409, 1066], "id": 2, "cls": 2, "conf": 0.8114883303642273, "frame_idx": 567, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [4, 239, 349, 1070], "id": 2, "cls": 2, "conf": 0.7934050559997559, "frame_idx": 568, "source": "video/sample.mp4", "class_name": "car"} +{"bbox": [7, 409, 283, 1065], "id": 2, "cls": 2, "conf": 0.7185706496238708, "frame_idx": 569, "source": "video/sample.mp4", "class_name": "car"} diff --git a/models/.gitkeep b/models/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/models/bangchakv2.mpta b/models/bangchakv2.mpta new file mode 100644 index 0000000..b79eb1c Binary files /dev/null and b/models/bangchakv2.mpta differ diff --git a/note.txt b/note.txt new file mode 100644 index 0000000..1667b90 --- /dev/null +++ b/note.txt @@ -0,0 +1 @@ +uvicorn app:app --host 0.0.0.0 --port 8000 --reload \ No newline at end of file diff --git a/output/sample2_detected.png b/output/sample2_detected.png new file mode 100644 index 0000000..8dd9f79 Binary files /dev/null and b/output/sample2_detected.png differ diff --git a/output/sample2_results.txt b/output/sample2_results.txt new file mode 100644 index 0000000..d90003d --- /dev/null +++ b/output/sample2_results.txt @@ -0,0 +1,17 @@ +Model: car_frontal_detection_v1.pt +Image: sample2.png +Confidence threshold: 0.3 +Detections: 2 + +Detection 1: + Class: Car + Confidence: 0.863 + Bounding box: (86.5, 73.4, 825.6, 625.2) + Size: 739.1x551.9 + +Detection 2: + Class: Frontal + Confidence: 0.504 + Bounding box: (176.6, 307.2, 708.1, 609.0) + Size: 531.5x301.7 + diff --git a/output/sample_detected.jpg b/output/sample_detected.jpg new file mode 100644 index 0000000..24c3281 Binary files /dev/null and b/output/sample_detected.jpg differ diff --git a/output/sample_results.txt b/output/sample_results.txt new file mode 100644 index 0000000..3228df7 --- /dev/null +++ b/output/sample_results.txt @@ -0,0 +1,17 @@ +Model: car_frontal_detection_v1.pt +Image: sample.jpg +Confidence threshold: 0.3 +Detections: 2 + +Detection 1: + Class: Frontal + Confidence: 0.555 + Bounding box: (175.9, 279.7, 527.6, 500.9) + Size: 351.7x221.2 + +Detection 2: + Class: Car + Confidence: 0.418 + Bounding box: (167.7, 196.7, 881.4, 532.7) + Size: 713.8x336.0 + diff --git a/output/test_image_detected.jpg b/output/test_image_detected.jpg new file mode 100644 index 0000000..3ce5925 Binary files /dev/null and b/output/test_image_detected.jpg differ diff --git a/output/test_image_results.txt b/output/test_image_results.txt new file mode 100644 index 0000000..72db288 --- /dev/null +++ b/output/test_image_results.txt @@ -0,0 +1,5 @@ +Model: car_frontal_detection_v1.pt +Image: test_image.jpg +Confidence threshold: 0.3 +Detections: 0 + diff --git a/sample.jpg b/sample.jpg new file mode 100644 index 0000000..6ae641c Binary files /dev/null and b/sample.jpg differ diff --git a/sample2.png b/sample2.png new file mode 100644 index 0000000..6415a7e Binary files /dev/null and b/sample2.png differ diff --git a/siwatsystem/pympta.py b/siwatsystem/pympta.py index fd1485d..53d97d9 100644 --- a/siwatsystem/pympta.py +++ b/siwatsystem/pympta.py @@ -117,6 +117,21 @@ def load_pipeline_node(node_config: dict, mpta_dir: str, redis_client, db_manage return node def load_pipeline_from_zip(zip_source: str, target_dir: str) -> dict: + # Restrict to models directory for security + if not zip_source.startswith('models/'): + zip_source = os.path.join('models', zip_source) + + # Validate the path is within models directory (prevent path traversal) + try: + abs_zip_path = os.path.abspath(zip_source) + abs_models_path = os.path.abspath('models') + if not abs_zip_path.startswith(abs_models_path): + logger.error(f"Security violation: {zip_source} is outside models directory") + return None + except Exception as e: + logger.error(f"Error validating path {zip_source}: {str(e)}") + return None + logger.info(f"Attempting to load pipeline from {zip_source} to {target_dir}") os.makedirs(target_dir, exist_ok=True) zip_path = os.path.join(target_dir, "pipeline.mpta") @@ -514,65 +529,6 @@ def resolve_field_mapping(value_template, branch_results, action_context): logger.error(f"Error resolving field mapping '{value_template}': {e}") return None -def validate_pipeline_execution(node, regions_dict): - """ - Pre-validate that all required branches will execute successfully before - committing to Redis actions and database records. - - Returns: - - (True, []) if pipeline can execute completely - - (False, missing_branches) if some required branches won't execute - """ - # Get all branches that parallel actions are waiting for - required_branches = set() - - for action in node.get("parallelActions", []): - if action.get("type") == "postgresql_update_combined": - wait_for_branches = action.get("waitForBranches", []) - required_branches.update(wait_for_branches) - - if not required_branches: - # No parallel actions requiring specific branches - logger.debug("No parallel actions with waitForBranches - validation passes") - return True, [] - - logger.debug(f"Pre-validation: checking if required branches {list(required_branches)} will execute") - - # Check each required branch - missing_branches = [] - - for branch in node.get("branches", []): - branch_id = branch["modelId"] - - if branch_id not in required_branches: - continue # This branch is not required by parallel actions - - # Check if this branch would be triggered - trigger_classes = branch.get("triggerClasses", []) - min_conf = branch.get("minConfidence", 0) - - branch_triggered = False - for det_class in regions_dict: - det_confidence = regions_dict[det_class]["confidence"] - - if (det_class in trigger_classes and det_confidence >= min_conf): - branch_triggered = True - logger.debug(f"Pre-validation: branch {branch_id} WILL be triggered by {det_class} (conf={det_confidence:.3f} >= {min_conf})") - break - - if not branch_triggered: - missing_branches.append(branch_id) - logger.warning(f"Pre-validation: branch {branch_id} will NOT be triggered - no matching classes or insufficient confidence") - logger.debug(f" Required: {trigger_classes} with min_conf={min_conf}") - logger.debug(f" Available: {[(cls, regions_dict[cls]['confidence']) for cls in regions_dict]}") - - if missing_branches: - logger.error(f"Pipeline pre-validation FAILED: required branches {missing_branches} will not execute") - return False, missing_branches - else: - logger.info(f"Pipeline pre-validation PASSED: all required branches {list(required_branches)} will execute") - return True, [] - def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None): """ Enhanced pipeline that supports: @@ -705,14 +661,6 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None): else: logger.debug("No multi-class validation - proceeding with all detections") - # ─── Pre-validate pipeline execution ──────────────────────── - pipeline_valid, missing_branches = validate_pipeline_execution(node, regions_dict) - - if not pipeline_valid: - logger.error(f"Pipeline execution validation FAILED - required branches {missing_branches} cannot execute") - logger.error("Aborting pipeline: no Redis actions or database records will be created") - return (None, None) if return_bbox else None - # ─── Execute actions with region information ──────────────── detection_result = { "detections": all_detections, @@ -853,11 +801,9 @@ def run_pipeline(frame, node: dict, return_bbox: bool=False, context=None): primary_detection = max(all_detections, key=lambda x: x["confidence"]) primary_bbox = primary_detection["bbox"] - # Add branch results and session_id to primary detection for compatibility + # Add branch results to primary detection for compatibility if "branch_results" in detection_result: primary_detection["branch_results"] = detection_result["branch_results"] - if "session_id" in detection_result: - primary_detection["session_id"] = detection_result["session_id"] return (primary_detection, primary_bbox) if return_bbox else primary_detection diff --git a/test.py b/test.py new file mode 100644 index 0000000..ca34f01 --- /dev/null +++ b/test.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python3 +""" +Test script for car_frontal_detection_v1.pt model +Usage: python test.py --image [--confidence ] [--save-output] +""" +# python test.py --image sample.jpg --confidence 0.6 --save-output + +import argparse +import cv2 +import torch +import numpy as np +from pathlib import Path +import sys + +def load_model_direct(model_path): + """Load model directly with torch.load to handle version compatibility""" + try: + # Try to load with weights_only=False for compatibility + checkpoint = torch.load(model_path, map_location='cpu', weights_only=False) + print(f"Model checkpoint keys: {list(checkpoint.keys())}") + + # Try to get model info + if 'model' in checkpoint: + model_info = checkpoint.get('model', {}) + print(f"Model info available: {hasattr(model_info, 'names') if hasattr(model_info, 'names') else 'No names found'}") + + return checkpoint + except Exception as e: + print(f"Direct torch.load failed: {e}") + return None + +def main(): + parser = argparse.ArgumentParser(description='Test car frontal detection model') + parser.add_argument('--image', required=True, help='Path to input image') + parser.add_argument('--model', default='car_frontal_detection_v1.pt', help='Path to model file') + parser.add_argument('--confidence', type=float, default=0.5, help='Confidence threshold (default: 0.5)') + parser.add_argument('--save-output', action='store_true', help='Save output image with detections') + parser.add_argument('--output-dir', default='output', help='Output directory for results') + parser.add_argument('--use-yolo', action='store_true', default=True, help='Use YOLO loading (default: True)') + + args = parser.parse_args() + + # Check if model file exists + if not Path(args.model).exists(): + print(f"Error: Model file '{args.model}' not found") + sys.exit(1) + + # Check if image file exists + if not Path(args.image).exists(): + print(f"Error: Image file '{args.image}' not found") + sys.exit(1) + + print(f"Loading model: {args.model}") + + model = None + if args.use_yolo: + try: + from ultralytics import YOLO + model = YOLO(args.model) + print(f"Model loaded successfully with YOLO") + print(f"Model classes: {model.names}") + except Exception as e: + print(f"Error loading model with YOLO: {e}") + print("Falling back to direct loading...") + + if model is None: + # Try direct loading for inspection + checkpoint = load_model_direct(args.model) + if checkpoint is None: + print("Failed to load model with any method") + sys.exit(1) + + print("Model loaded directly - this is for inspection only") + print("Available keys in checkpoint:", list(checkpoint.keys())) + + # Try to get model information + if 'model' in checkpoint: + model_obj = checkpoint['model'] + print(f"Model object type: {type(model_obj)}") + if hasattr(model_obj, 'names'): + print(f"Model classes: {model_obj.names}") + if hasattr(model_obj, 'yaml'): + print(f"Model YAML config available: {bool(model_obj.yaml)}") + + print("\nTo run inference, you need a compatible Ultralytics version.") + print("Consider upgrading ultralytics: pip install ultralytics --upgrade") + return + + print(f"Loading image: {args.image}") + try: + image = cv2.imread(args.image) + if image is None: + raise ValueError("Could not load image") + print(f"Image shape: {image.shape}") + except Exception as e: + print(f"Error loading image: {e}") + sys.exit(1) + + print(f"Running inference with confidence threshold: {args.confidence}") + try: + results = model(image, conf=args.confidence) + + if len(results) > 0 and len(results[0].boxes) > 0: + print(f"Detections found: {len(results[0].boxes)}") + + # Print detection details + for i, box in enumerate(results[0].boxes): + x1, y1, x2, y2 = box.xyxy[0].cpu().numpy() + conf = box.conf[0].cpu().numpy() + cls = int(box.cls[0].cpu().numpy()) + class_name = model.names[cls] if cls in model.names else f"Class_{cls}" + + print(f"Detection {i+1}:") + print(f" Class: {class_name}") + print(f" Confidence: {conf:.3f}") + print(f" Bounding box: ({x1:.1f}, {y1:.1f}, {x2:.1f}, {y2:.1f})") + print(f" Size: {x2-x1:.1f}x{y2-y1:.1f}") + else: + print("No detections found") + + if args.save_output: + output_dir = Path(args.output_dir) + output_dir.mkdir(exist_ok=True) + + # Draw detections on image + annotated_image = results[0].plot() + + # Save annotated image + input_path = Path(args.image) + output_path = output_dir / f"{input_path.stem}_detected{input_path.suffix}" + cv2.imwrite(str(output_path), annotated_image) + print(f"Output saved to: {output_path}") + + # Also save results as text + results_path = output_dir / f"{input_path.stem}_results.txt" + with open(results_path, 'w') as f: + f.write(f"Model: {args.model}\n") + f.write(f"Image: {args.image}\n") + f.write(f"Confidence threshold: {args.confidence}\n") + f.write(f"Detections: {len(results[0].boxes) if len(results) > 0 else 0}\n\n") + + if len(results) > 0 and len(results[0].boxes) > 0: + for i, box in enumerate(results[0].boxes): + x1, y1, x2, y2 = box.xyxy[0].cpu().numpy() + conf = box.conf[0].cpu().numpy() + cls = int(box.cls[0].cpu().numpy()) + class_name = model.names[cls] if cls in model.names else f"Class_{cls}" + + f.write(f"Detection {i+1}:\n") + f.write(f" Class: {class_name}\n") + f.write(f" Confidence: {conf:.3f}\n") + f.write(f" Bounding box: ({x1:.1f}, {y1:.1f}, {x2:.1f}, {y2:.1f})\n") + f.write(f" Size: {x2-x1:.1f}x{y2-y1:.1f}\n\n") + + print(f"Results saved to: {results_path}") + + except Exception as e: + print(f"Error during inference: {e}") + sys.exit(1) + + print("Test completed successfully!") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/worker.md b/worker.md index c485db5..302c8ce 100644 --- a/worker.md +++ b/worker.md @@ -2,6 +2,12 @@ This document outlines the WebSocket-based communication protocol between the CMS backend and a detector worker. As a worker developer, your primary responsibility is to implement a WebSocket server that adheres to this protocol. +The current Python Detector Worker implementation supports advanced computer vision pipelines with: +- Multi-class YOLO detection with parallel processing +- PostgreSQL database integration with automatic schema management +- Redis integration for image storage and pub/sub messaging +- Hierarchical pipeline execution with detection → classification branching + ## 1. Connection The worker must run a WebSocket server, preferably on port `8000`. The backend system, which is managed by a container orchestration service, will automatically discover and establish a WebSocket connection to your worker. @@ -25,14 +31,34 @@ To enable modularity and dynamic configuration, the backend will send you a URL 2. Extracting its contents. 3. Interpreting the contents to configure its internal pipeline. -**The contents of the `.mpta` file are entirely up to the user who configures the model in the CMS.** This allows for maximum flexibility. For example, the archive could contain: +**The current implementation supports comprehensive pipeline configurations including:** -- AI/ML Models: Pre-trained models for libraries like TensorFlow, PyTorch, or ONNX. -- Configuration Files: A `config.json` or `pipeline.yaml` that defines a sequence of operations, specifies model paths, or sets detection thresholds. -- Scripts: Custom Python scripts for pre-processing or post-processing. -- API Integration Details: A JSON file with endpoint information and credentials for interacting with third-party detection services. +- **AI/ML Models**: YOLO models (.pt files) for detection and classification +- **Pipeline Configuration**: `pipeline.json` defining hierarchical detection→classification workflows +- **Multi-class Detection**: Simultaneous detection of multiple object classes (e.g., Car + Frontal) +- **Parallel Processing**: Concurrent execution of classification branches with ThreadPoolExecutor +- **Database Integration**: PostgreSQL configuration for automatic table creation and updates +- **Redis Actions**: Image storage with region cropping and pub/sub messaging +- **Dynamic Field Mapping**: Template-based field resolution for database operations -Essentially, the `.mpta` file is a self-contained package that tells your worker _how_ to process the video stream for a given subscription. +**Enhanced MPTA Structure:** +``` +pipeline.mpta/ +├── pipeline.json # Main configuration with redis/postgresql settings +├── car_detection.pt # Primary YOLO detection model +├── brand_classifier.pt # Classification model for car brands +├── bodytype_classifier.pt # Classification model for body types +└── ... +``` + +The `pipeline.json` now supports advanced features like: +- Multi-class detection with `expectedClasses` validation +- Parallel branch processing with `parallel: true` +- Database actions with `postgresql_update_combined` +- Redis actions with region-specific image cropping +- Branch synchronization with `waitForBranches` + +Essentially, the `.mpta` file is a self-contained package that tells your worker *how* to process the video stream for a given subscription, including complex multi-stage AI pipelines with database persistence. ## 4. Messages from Worker to Backend @@ -79,6 +105,15 @@ Sent when the worker detects a relevant object. The `detection` object should be - **Type:** `imageDetection` +**Enhanced Detection Capabilities:** + +The current implementation supports multi-class detection with parallel classification processing. When a vehicle is detected, the system: + +1. **Multi-Class Detection**: Simultaneously detects "Car" and "Frontal" classes +2. **Parallel Processing**: Runs brand and body type classification concurrently +3. **Database Integration**: Automatically creates and updates PostgreSQL records +4. **Redis Storage**: Saves cropped frontal images with expiration + **Payload Example:** ```json @@ -88,19 +123,38 @@ Sent when the worker detects a relevant object. The `detection` object should be "timestamp": "2025-07-14T12:34:56.789Z", "data": { "detection": { - "carModel": "Civic", + "class": "Car", + "confidence": 0.92, "carBrand": "Honda", - "carYear": 2023, + "carModel": "Civic", "bodyType": "Sedan", - "licensePlateText": "ABCD1234", - "licensePlateConfidence": 0.95 + "branch_results": { + "car_brand_cls_v1": { + "class": "Honda", + "confidence": 0.89, + "brand": "Honda" + }, + "car_bodytype_cls_v1": { + "class": "Sedan", + "confidence": 0.85, + "body_type": "Sedan" + } + } }, "modelId": 101, - "modelName": "US-LPR-and-Vehicle-ID" + "modelName": "Car Frontal Detection V1" } } ``` +**Database Integration:** + +Each detection automatically: +- Creates a record in `gas_station_1.car_frontal_info` table +- Generates a unique `session_id` for tracking +- Updates the record with classification results after parallel processing completes +- Stores cropped frontal images in Redis with the session_id as key + ### 4.3. Patch Session > **Note:** Patch messages are only used when the worker can't keep up and needs to retroactively send detections. Normally, detections should be sent in real-time using `imageDetection` messages. Use `patchSession` only to update session data after the fact. @@ -117,9 +171,9 @@ Allows the worker to request a modification to an active session's data. The `da "sessionId": 12345, "data": { "currentCar": { - "carModel": "Civic", - "carBrand": "Honda", - "licensePlateText": "ABCD1234" + "carModel": "Civic", + "carBrand": "Honda", + "licensePlateText": "ABCD1234" } } } @@ -133,33 +187,24 @@ The `data` object in the `patchSession` message is merged with the existing `Dis ```typescript interface DisplayPersistentData { - progressionStage: - | 'welcome' - | 'car_fueling' - | 'car_waitpayment' - | 'car_postpayment' - | null; - qrCode: string | null; - adsPlayback: { - playlistSlotOrder: number; // The 'order' of the current slot - adsId: number | null; - adsUrl: string | null; - } | null; - currentCar: { - carModel?: string; - carBrand?: string; - carYear?: number; - bodyType?: string; - licensePlateText?: string; - licensePlateType?: string; - } | null; - fuelPump: { - /* FuelPumpData structure */ - } | null; - weatherData: { - /* WeatherResponse structure */ - } | null; - sessionId: number | null; + progressionStage: "welcome" | "car_fueling" | "car_waitpayment" | "car_postpayment" | null; + qrCode: string | null; + adsPlayback: { + playlistSlotOrder: number; // The 'order' of the current slot + adsId: number | null; + adsUrl: string | null; + } | null; + currentCar: { + carModel?: string; + carBrand?: string; + carYear?: number; + bodyType?: string; + licensePlateText?: string; + licensePlateType?: string; + } | null; + fuelPump: { /* FuelPumpData structure */ } | null; + weatherData: { /* WeatherResponse structure */ } | null; + sessionId: number | null; } ``` @@ -212,7 +257,7 @@ Instructs the worker to process a camera's RTSP stream using the configuration f > - Capture each snapshot only once per cycle, and reuse it for all display subscriptions sharing that camera. > - Capture each frame/image only once per cycle. > - Reuse the same captured image and snapshot for all display subscriptions that share the camera, processing and routing detection results separately for each display as needed. -> This avoids unnecessary load and bandwidth usage, and ensures consistent detection results and snapshots across all displays sharing the same camera. +> This avoids unnecessary load and bandwidth usage, and ensures consistent detection results and snapshots across all displays sharing the same camera. ### 5.2. Unsubscribe from Camera @@ -324,7 +369,7 @@ This section shows a typical sequence of messages between the backend and the wo > **Note:** Unsubscribe is triggered when a user removes a camera or when the node is too heavily loaded and needs rebalancing. 1. **Connection Established** & **Heartbeat** - - **Worker -> Backend** + * **Worker -> Backend** ```json { "type": "stateReport", @@ -336,7 +381,7 @@ This section shows a typical sequence of messages between the backend and the wo } ``` 2. **Backend Subscribes Camera** - - **Backend -> Worker** + * **Backend -> Worker** ```json { "type": "subscribe", @@ -350,7 +395,7 @@ This section shows a typical sequence of messages between the backend and the wo } ``` 3. **Worker Acknowledges in Heartbeat** - - **Worker -> Backend** + * **Worker -> Backend** ```json { "type": "stateReport", @@ -369,7 +414,7 @@ This section shows a typical sequence of messages between the backend and the wo } ``` 4. **Worker Detects a Car** - - **Worker -> Backend** + * **Worker -> Backend** ```json { "type": "imageDetection", @@ -388,7 +433,7 @@ This section shows a typical sequence of messages between the backend and the wo } } ``` - - **Worker -> Backend** + * **Worker -> Backend** ```json { "type": "imageDetection", @@ -407,7 +452,7 @@ This section shows a typical sequence of messages between the backend and the wo } } ``` - - **Worker -> Backend** + * **Worker -> Backend** ```json { "type": "imageDetection", @@ -427,7 +472,7 @@ This section shows a typical sequence of messages between the backend and the wo } ``` 5. **Backend Unsubscribes Camera** - - **Backend -> Worker** + * **Backend -> Worker** ```json { "type": "unsubscribe", @@ -437,7 +482,7 @@ This section shows a typical sequence of messages between the backend and the wo } ``` 6. **Worker Acknowledges Unsubscription** - - **Worker -> Backend** + * **Worker -> Backend** ```json { "type": "stateReport", @@ -448,7 +493,6 @@ This section shows a typical sequence of messages between the backend and the wo "cameraConnections": [] } ``` - ## 7. HTTP API: Image Retrieval In addition to the WebSocket protocol, the worker exposes an HTTP endpoint for retrieving the latest image frame from a camera. @@ -464,13 +508,11 @@ GET /camera/{camera_id}/image ### Response - **Success (200):** Returns the latest JPEG image from the camera stream. - - - `Content-Type: image/jpeg` - - Binary JPEG data. + - `Content-Type: image/jpeg` + - Binary JPEG data. - **Error (404):** If the camera is not found or no frame is available. - - - JSON error response. + - JSON error response. - **Error (500):** Internal server error. @@ -483,9 +525,9 @@ GET /camera/display-001;cam-001/image ### Example Response - **Headers:** - ``` - Content-Type: image/jpeg - ``` + ``` + Content-Type: image/jpeg + ``` - **Body:** Binary JPEG image. ### Notes