fix: gpu memory leaks
This commit is contained in:
parent
3a47920186
commit
593611cdb7
13 changed files with 420 additions and 166 deletions
|
|
@ -272,12 +272,14 @@ class ObjectTracker:
|
|||
for tid in stale_track_ids:
|
||||
del self._tracks[tid]
|
||||
|
||||
def update(self, detections: List[Detection]) -> List[TrackedObject]:
|
||||
def update(self, detections: List[Detection], frame_shape: tuple = None, model_input_size: int = 640) -> List[TrackedObject]:
|
||||
"""
|
||||
Update tracker with new detections (decoupled from inference).
|
||||
|
||||
Args:
|
||||
detections: List of Detection objects from model inference
|
||||
frame_shape: Original frame shape (C, H, W) for scaling bboxes back from model space
|
||||
model_input_size: Model input size (default: 640 for YOLOv8)
|
||||
|
||||
Returns:
|
||||
List of currently tracked objects
|
||||
|
|
@ -291,6 +293,22 @@ class ObjectTracker:
|
|||
self._cleanup_stale_tracks()
|
||||
return list(self._tracks.values())
|
||||
|
||||
# Scale detections from model space (640x640) to frame space (H x W)
|
||||
if frame_shape is not None:
|
||||
_, frame_h, frame_w = frame_shape
|
||||
scale_x = frame_w / model_input_size
|
||||
scale_y = frame_h / model_input_size
|
||||
|
||||
# Scale all detection bboxes
|
||||
for det in detections:
|
||||
x1, y1, x2, y2 = det.bbox
|
||||
det.bbox = [
|
||||
x1 * scale_x,
|
||||
y1 * scale_y,
|
||||
x2 * scale_x,
|
||||
y2 * scale_y
|
||||
]
|
||||
|
||||
# Convert detections to tensor for GPU processing
|
||||
det_tensor = torch.tensor(
|
||||
[[*det.bbox, det.confidence, det.class_id] for det in detections],
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue