276 lines
8.4 KiB
Python
276 lines
8.4 KiB
Python
"""
|
|
Real-time object tracking with event-driven batching architecture.
|
|
|
|
This script demonstrates:
|
|
- Event-driven stream processing with StreamConnectionManager
|
|
- Batched GPU inference with ModelController
|
|
- Ping-pong buffer architecture for optimal throughput
|
|
- Async/await pattern for multiple RTSP streams
|
|
- Automatic PT to TensorRT conversion
|
|
"""
|
|
|
|
import asyncio
|
|
import time
|
|
import os
|
|
import torch
|
|
from dotenv import load_dotenv
|
|
from services import (
|
|
StreamConnectionManager,
|
|
YOLOv8Utils,
|
|
COCO_CLASSES,
|
|
)
|
|
|
|
# Load environment variables
|
|
load_dotenv()
|
|
|
|
|
|
async def main_single_stream():
|
|
"""Single stream example with event-driven architecture."""
|
|
print("=" * 80)
|
|
print("Event-Driven GPU-Accelerated Object Tracking - Single Stream")
|
|
print("=" * 80)
|
|
|
|
# Configuration
|
|
GPU_ID = 0
|
|
MODEL_PATH = "models/yolov8n.pt" # PT file will be auto-converted
|
|
STREAM_URL = os.getenv('CAMERA_URL_1', 'rtsp://localhost:8554/test')
|
|
BATCH_SIZE = 4
|
|
FORCE_TIMEOUT = 0.05
|
|
|
|
print(f"\nConfiguration:")
|
|
print(f" GPU: {GPU_ID}")
|
|
print(f" Model: {MODEL_PATH}")
|
|
print(f" Stream: {STREAM_URL}")
|
|
print(f" Batch size: {BATCH_SIZE}")
|
|
print(f" Force timeout: {FORCE_TIMEOUT}s\n")
|
|
|
|
# Create StreamConnectionManager with PT conversion enabled
|
|
print("[1/3] Creating StreamConnectionManager...")
|
|
manager = StreamConnectionManager(
|
|
gpu_id=GPU_ID,
|
|
batch_size=BATCH_SIZE,
|
|
force_timeout=FORCE_TIMEOUT,
|
|
enable_pt_conversion=True # Enable PT conversion
|
|
)
|
|
print("✓ Manager created")
|
|
|
|
# Initialize with PT model (auto-conversion)
|
|
print("\n[2/3] Initializing with PT model...")
|
|
print("Note: First load will convert PT to TensorRT (3-5 minutes)")
|
|
print("Subsequent loads will use cached TensorRT engine\n")
|
|
|
|
try:
|
|
await manager.initialize(
|
|
model_path=MODEL_PATH,
|
|
model_id="detector",
|
|
preprocess_fn=YOLOv8Utils.preprocess,
|
|
postprocess_fn=YOLOv8Utils.postprocess,
|
|
num_contexts=4,
|
|
pt_input_shapes={"images": (1, 3, 640, 640)},
|
|
pt_precision=torch.float16
|
|
)
|
|
print("✓ Manager initialized (PT converted to TensorRT)")
|
|
except Exception as e:
|
|
print(f"✗ Failed to initialize: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return
|
|
|
|
# Connect stream
|
|
print("\n[3/3] Connecting to stream...")
|
|
try:
|
|
connection = await manager.connect_stream(
|
|
rtsp_url=STREAM_URL,
|
|
stream_id="camera_1",
|
|
buffer_size=30
|
|
)
|
|
print(f"✓ Stream connected: camera_1")
|
|
except Exception as e:
|
|
print(f"✗ Failed to connect stream: {e}")
|
|
return
|
|
|
|
print(f"\n{'=' * 80}")
|
|
print("Event-driven tracking is running!")
|
|
print("Press Ctrl+C to stop")
|
|
print(f"{'=' * 80}\n")
|
|
|
|
# Stream results
|
|
result_count = 0
|
|
start_time = time.time()
|
|
|
|
try:
|
|
async for result in connection.tracking_results():
|
|
result_count += 1
|
|
|
|
# Print stats every 30 results
|
|
if result_count % 30 == 0:
|
|
elapsed = time.time() - start_time
|
|
fps = result_count / elapsed if elapsed > 0 else 0
|
|
|
|
print(f"\nResults: {result_count} | FPS: {fps:.1f}")
|
|
print(f" Stream: {result.stream_id}")
|
|
print(f" Objects: {len(result.tracked_objects)}")
|
|
|
|
if result.tracked_objects:
|
|
class_counts = {}
|
|
for obj in result.tracked_objects:
|
|
class_counts[obj.class_name] = class_counts.get(obj.class_name, 0) + 1
|
|
print(f" Classes: {class_counts}")
|
|
|
|
except KeyboardInterrupt:
|
|
print(f"\n✓ Interrupted by user")
|
|
|
|
# Cleanup
|
|
print(f"\n{'=' * 80}")
|
|
print("Cleanup")
|
|
print(f"{'=' * 80}")
|
|
|
|
await connection.stop()
|
|
await manager.shutdown()
|
|
print("✓ Stopped")
|
|
|
|
# Final stats
|
|
elapsed = time.time() - start_time
|
|
avg_fps = result_count / elapsed if elapsed > 0 else 0
|
|
print(f"\nFinal: {result_count} results in {elapsed:.1f}s ({avg_fps:.1f} FPS)")
|
|
|
|
|
|
async def main_multi_stream():
|
|
"""Multi-stream example with batched inference."""
|
|
print("=" * 80)
|
|
print("Event-Driven GPU-Accelerated Object Tracking - Multi-Stream")
|
|
print("=" * 80)
|
|
|
|
# Configuration
|
|
GPU_ID = 0
|
|
MODEL_PATH = "models/yolov8n.pt" # PT file will be auto-converted
|
|
BATCH_SIZE = 16
|
|
FORCE_TIMEOUT = 0.05
|
|
|
|
# Load camera URLs
|
|
camera_urls = []
|
|
i = 1
|
|
while True:
|
|
url = os.getenv(f'CAMERA_URL_{i}')
|
|
if url:
|
|
camera_urls.append((f"camera_{i}", url))
|
|
i += 1
|
|
else:
|
|
break
|
|
|
|
if not camera_urls:
|
|
print("No camera URLs found in .env")
|
|
return
|
|
|
|
print(f"\nConfiguration:")
|
|
print(f" GPU: {GPU_ID}")
|
|
print(f" Model: {MODEL_PATH}")
|
|
print(f" Streams: {len(camera_urls)}")
|
|
print(f" Batch size: {BATCH_SIZE}\n")
|
|
|
|
# Create manager with PT conversion
|
|
print("[1/3] Creating StreamConnectionManager...")
|
|
manager = StreamConnectionManager(
|
|
gpu_id=GPU_ID,
|
|
batch_size=BATCH_SIZE,
|
|
force_timeout=FORCE_TIMEOUT,
|
|
enable_pt_conversion=True
|
|
)
|
|
print("✓ Manager created")
|
|
|
|
# Initialize with PT model
|
|
print("\n[2/3] Initializing with PT model...")
|
|
try:
|
|
await manager.initialize(
|
|
model_path=MODEL_PATH,
|
|
model_id="detector",
|
|
preprocess_fn=YOLOv8Utils.preprocess,
|
|
postprocess_fn=YOLOv8Utils.postprocess,
|
|
num_contexts=8,
|
|
pt_input_shapes={"images": (1, 3, 640, 640)},
|
|
pt_precision=torch.float16
|
|
)
|
|
print("✓ Manager initialized")
|
|
except Exception as e:
|
|
print(f"✗ Failed to initialize: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return
|
|
|
|
# Connect all streams
|
|
print(f"\n[3/3] Connecting {len(camera_urls)} streams...")
|
|
connections = {}
|
|
for stream_id, rtsp_url in camera_urls:
|
|
try:
|
|
conn = await manager.connect_stream(
|
|
rtsp_url=rtsp_url,
|
|
stream_id=stream_id,
|
|
buffer_size=30
|
|
)
|
|
connections[stream_id] = conn
|
|
print(f"✓ Connected: {stream_id}")
|
|
except Exception as e:
|
|
print(f"✗ Failed {stream_id}: {e}")
|
|
|
|
if not connections:
|
|
print("No streams connected")
|
|
return
|
|
|
|
print(f"\n{'=' * 80}")
|
|
print(f"Multi-stream tracking running ({len(connections)} streams)")
|
|
print("Frames from all streams are batched together!")
|
|
print("Press Ctrl+C to stop")
|
|
print(f"{'=' * 80}\n")
|
|
|
|
# Track stats
|
|
stream_stats = {sid: {'count': 0, 'start': time.time()} for sid in connections.keys()}
|
|
total_results = 0
|
|
start_time = time.time()
|
|
|
|
try:
|
|
# Simple approach: iterate over first connection's results
|
|
# In production, you'd properly merge all result streams
|
|
for conn in connections.values():
|
|
async for result in conn.tracking_results():
|
|
total_results += 1
|
|
stream_id = result.stream_id
|
|
|
|
if stream_id in stream_stats:
|
|
stream_stats[stream_id]['count'] += 1
|
|
|
|
# Print stats every 100 results
|
|
if total_results % 100 == 0:
|
|
elapsed = time.time() - start_time
|
|
total_fps = total_results / elapsed if elapsed > 0 else 0
|
|
|
|
print(f"\nTotal: {total_results} | {elapsed:.1f}s | {total_fps:.1f} FPS")
|
|
for sid, stats in stream_stats.items():
|
|
s_elapsed = time.time() - stats['start']
|
|
s_fps = stats['count'] / s_elapsed if s_elapsed > 0 else 0
|
|
print(f" {sid}: {stats['count']} ({s_fps:.1f} FPS)")
|
|
|
|
except KeyboardInterrupt:
|
|
print(f"\n✓ Interrupted")
|
|
|
|
# Cleanup
|
|
print(f"\n{'=' * 80}")
|
|
print("Cleanup")
|
|
print(f"{'=' * 80}")
|
|
|
|
for conn in connections.values():
|
|
await conn.stop()
|
|
await manager.shutdown()
|
|
print("✓ Stopped")
|
|
|
|
# Final stats
|
|
elapsed = time.time() - start_time
|
|
avg_fps = total_results / elapsed if elapsed > 0 else 0
|
|
print(f"\nFinal: {total_results} results in {elapsed:.1f}s ({avg_fps:.1f} FPS)")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import sys
|
|
if len(sys.argv) > 1 and sys.argv[1] == "single":
|
|
asyncio.run(main_single_stream())
|
|
else:
|
|
asyncio.run(main_multi_stream())
|