converter system
This commit is contained in:
parent
d3dbf9a580
commit
748fb71980
9 changed files with 1012 additions and 14 deletions
|
|
@ -167,9 +167,11 @@ def main():
|
|||
"""
|
||||
Main function for real-time tracking visualization.
|
||||
"""
|
||||
import torch
|
||||
|
||||
# Configuration
|
||||
GPU_ID = 0
|
||||
MODEL_PATH = "models/yolov8n.trt"
|
||||
MODEL_PATH = "models/yolov8n.pt" # Changed to PT file
|
||||
RTSP_URL = os.getenv('CAMERA_URL_1', 'rtsp://localhost:8554/test')
|
||||
BUFFER_SIZE = 30
|
||||
WINDOW_NAME = "Real-time Object Tracking"
|
||||
|
|
@ -178,18 +180,24 @@ def main():
|
|||
print("Real-time GPU-Accelerated Object Tracking")
|
||||
print("=" * 80)
|
||||
|
||||
# Step 1: Create model repository
|
||||
# Step 1: Create model repository with PT conversion enabled
|
||||
print("\n[1/4] Initializing TensorRT Model Repository...")
|
||||
model_repo = TensorRTModelRepository(gpu_id=GPU_ID, default_num_contexts=4)
|
||||
model_repo = TensorRTModelRepository(gpu_id=GPU_ID, default_num_contexts=4, enable_pt_conversion=True)
|
||||
|
||||
# Load detection model
|
||||
# Load detection model (will auto-convert PT to TRT)
|
||||
model_id = "yolov8_detector"
|
||||
if os.path.exists(MODEL_PATH):
|
||||
try:
|
||||
print(f"Loading model from {MODEL_PATH}...")
|
||||
print("Note: First load will convert PT to TensorRT (may take 3-5 minutes)")
|
||||
print("Subsequent loads will use cached TensorRT engine")
|
||||
|
||||
metadata = model_repo.load_model(
|
||||
model_id=model_id,
|
||||
file_path=MODEL_PATH,
|
||||
num_contexts=4
|
||||
num_contexts=4,
|
||||
pt_input_shapes={"images": (1, 3, 640, 640)}, # Required for PT conversion
|
||||
pt_precision=torch.float16 # Use FP16 for better performance
|
||||
)
|
||||
print(f"✓ Model loaded successfully")
|
||||
print(f" Input shape: {metadata.input_shapes}")
|
||||
|
|
@ -197,10 +205,12 @@ def main():
|
|||
except Exception as e:
|
||||
print(f"✗ Failed to load model: {e}")
|
||||
print(f" Please ensure {MODEL_PATH} exists")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return
|
||||
else:
|
||||
print(f"✗ Model file not found: {MODEL_PATH}")
|
||||
print(f" Please provide a valid TensorRT model file")
|
||||
print(f" Please provide a valid PyTorch (.pt) or TensorRT (.trt) model file")
|
||||
return
|
||||
|
||||
# Step 2: Create tracking controller
|
||||
|
|
@ -370,7 +380,7 @@ def main_multi_window():
|
|||
with separate OpenCV windows for each stream.
|
||||
"""
|
||||
GPU_ID = 0
|
||||
MODEL_PATH = "models/yolov8n.trt"
|
||||
MODEL_PATH = "models/yolov8n.pt"
|
||||
|
||||
# Load camera URLs from environment
|
||||
camera_urls = []
|
||||
|
|
@ -389,11 +399,23 @@ def main_multi_window():
|
|||
|
||||
print(f"Starting multi-window tracking with {len(camera_urls)} cameras")
|
||||
|
||||
# Create shared model repository
|
||||
model_repo = TensorRTModelRepository(gpu_id=GPU_ID, default_num_contexts=8)
|
||||
# Create shared model repository with PT conversion enabled
|
||||
import torch
|
||||
model_repo = TensorRTModelRepository(gpu_id=GPU_ID, default_num_contexts=8, enable_pt_conversion=True)
|
||||
|
||||
if os.path.exists(MODEL_PATH):
|
||||
model_repo.load_model("detector", MODEL_PATH, num_contexts=8)
|
||||
print(f"Loading model from {MODEL_PATH}...")
|
||||
print("Note: First load will convert PT to TensorRT (may take 3-5 minutes)")
|
||||
print("Subsequent loads will use cached TensorRT engine")
|
||||
|
||||
model_repo.load_model(
|
||||
model_id="detector",
|
||||
file_path=MODEL_PATH,
|
||||
num_contexts=8,
|
||||
pt_input_shapes={"images": (1, 3, 640, 640)}, # Required for PT conversion
|
||||
pt_precision=torch.float16 # Use FP16 for better performance
|
||||
)
|
||||
print("✓ Model loaded successfully")
|
||||
else:
|
||||
print(f"Model not found: {MODEL_PATH}")
|
||||
return
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue