add StrongSORT Tacker
This commit is contained in:
		
							parent
							
								
									ffc2e99678
								
							
						
					
					
						commit
						b7d8b3266f
					
				
					 93 changed files with 20230 additions and 6 deletions
				
			
		
							
								
								
									
										6
									
								
								.gitignore
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.gitignore
									
										
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -2,7 +2,7 @@
 | 
			
		|||
archive/
 | 
			
		||||
Dockerfile
 | 
			
		||||
 | 
			
		||||
/models
 | 
			
		||||
# /models
 | 
			
		||||
app.log
 | 
			
		||||
*.pt
 | 
			
		||||
.venv/
 | 
			
		||||
| 
						 | 
				
			
			@ -15,3 +15,7 @@ mptas
 | 
			
		|||
detector_worker.log
 | 
			
		||||
.gitignore
 | 
			
		||||
no_frame_debug.log
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Result from tracker
 | 
			
		||||
feeder/runs/
 | 
			
		||||
							
								
								
									
										4
									
								
								debug/cuda.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								debug/cuda.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,4 @@
 | 
			
		|||
import torch
 | 
			
		||||
print(torch.cuda.is_available())  # True if CUDA is available
 | 
			
		||||
print(torch.cuda.get_device_name(0))  # GPU name
 | 
			
		||||
print(torch.version.cuda)  # CUDA version PyTorch was compiled with
 | 
			
		||||
							
								
								
									
										1
									
								
								feeder/note.txt
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								feeder/note.txt
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1 @@
 | 
			
		|||
python simple_track.py --source video/sample.mp4 --show-vid --save-vid --enable-json-log
 | 
			
		||||
							
								
								
									
										0
									
								
								feeder/sender/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								feeder/sender/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
								
								
									
										21
									
								
								feeder/sender/base.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								feeder/sender/base.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,21 @@
 | 
			
		|||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
class NumpyArrayEncoder(json.JSONEncoder):
 | 
			
		||||
    def default(self, obj):
 | 
			
		||||
        if isinstance(obj, np.integer):
 | 
			
		||||
            return int(obj)
 | 
			
		||||
        elif isinstance(obj, np.floating):
 | 
			
		||||
            return float(obj)
 | 
			
		||||
        elif isinstance(obj, np.ndarray):
 | 
			
		||||
            return obj.tolist()
 | 
			
		||||
        else:
 | 
			
		||||
            return super(NumpyArrayEncoder, self).default(obj)
 | 
			
		||||
        
 | 
			
		||||
class BasSender:
 | 
			
		||||
    def __init__(self) -> None:
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    def send(self, messages):
 | 
			
		||||
        raise NotImplementedError()
 | 
			
		||||
							
								
								
									
										13
									
								
								feeder/sender/jsonlogger.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								feeder/sender/jsonlogger.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,13 @@
 | 
			
		|||
from .base import BasSender
 | 
			
		||||
from loguru import logger
 | 
			
		||||
import json
 | 
			
		||||
from .base import NumpyArrayEncoder
 | 
			
		||||
 | 
			
		||||
class JsonLogger(BasSender):
 | 
			
		||||
    def __init__(self, log_filename:str = "tracking.log") -> None:
 | 
			
		||||
        super().__init__()
 | 
			
		||||
        self.logger = logger
 | 
			
		||||
        self.logger.add(log_filename, format="{message}", level="INFO")
 | 
			
		||||
 | 
			
		||||
    def send(self, messages):
 | 
			
		||||
        self.logger.info(json.dumps(messages, cls=NumpyArrayEncoder))
 | 
			
		||||
							
								
								
									
										14
									
								
								feeder/sender/szmq.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								feeder/sender/szmq.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,14 @@
 | 
			
		|||
from .base import BasSender, NumpyArrayEncoder
 | 
			
		||||
import zmq
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ZmqLogger(BasSender):
 | 
			
		||||
    def __init__(self, ip_addr:str = "localhost", port:int = 5555) -> None:
 | 
			
		||||
        super().__init__()
 | 
			
		||||
        self.context = zmq.Context()
 | 
			
		||||
        self.producer = self.context.socket(zmq.PUB)
 | 
			
		||||
        self.producer.connect(f"tcp://{ip_addr}:{port}")
 | 
			
		||||
        
 | 
			
		||||
    def send(self, messages):
 | 
			
		||||
        self.producer.send_string(json.dumps(messages, cls = NumpyArrayEncoder))
 | 
			
		||||
							
								
								
									
										245
									
								
								feeder/simple_track.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										245
									
								
								feeder/simple_track.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,245 @@
 | 
			
		|||
import argparse
 | 
			
		||||
import cv2
 | 
			
		||||
import os
 | 
			
		||||
os.environ["OMP_NUM_THREADS"] = "1"
 | 
			
		||||
os.environ["OPENBLAS_NUM_THREADS"] = "1"
 | 
			
		||||
os.environ["MKL_NUM_THREADS"] = "1"
 | 
			
		||||
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
 | 
			
		||||
os.environ["NUMEXPR_NUM_THREADS"] = "1"
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import numpy as np
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
FILE = Path(__file__).resolve()
 | 
			
		||||
ROOT = FILE.parents[0]
 | 
			
		||||
WEIGHTS = ROOT / 'weights'
 | 
			
		||||
 | 
			
		||||
if str(ROOT) not in sys.path:
 | 
			
		||||
    sys.path.append(str(ROOT))
 | 
			
		||||
if str(ROOT / 'trackers' / 'strongsort') not in sys.path:
 | 
			
		||||
    sys.path.append(str(ROOT / 'trackers' / 'strongsort'))
 | 
			
		||||
 | 
			
		||||
from ultralytics.nn.autobackend import AutoBackend
 | 
			
		||||
from ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages
 | 
			
		||||
from ultralytics.yolo.data.utils import VID_FORMATS
 | 
			
		||||
from ultralytics.yolo.utils import LOGGER, colorstr
 | 
			
		||||
from ultralytics.yolo.utils.checks import check_file, check_imgsz
 | 
			
		||||
from ultralytics.yolo.utils.files import increment_path
 | 
			
		||||
from ultralytics.yolo.utils.torch_utils import select_device
 | 
			
		||||
from ultralytics.yolo.utils.ops import Profile, non_max_suppression, scale_boxes
 | 
			
		||||
from ultralytics.yolo.utils.plotting import Annotator, colors
 | 
			
		||||
 | 
			
		||||
from trackers.multi_tracker_zoo import create_tracker
 | 
			
		||||
from sender.jsonlogger import JsonLogger
 | 
			
		||||
from sender.szmq import ZmqLogger
 | 
			
		||||
 | 
			
		||||
@torch.no_grad()
 | 
			
		||||
def run(
 | 
			
		||||
        source='0',
 | 
			
		||||
        yolo_weights=WEIGHTS / 'yolov8n.pt',
 | 
			
		||||
        reid_weights=WEIGHTS / 'osnet_x0_25_msmt17.pt',
 | 
			
		||||
        imgsz=(640, 640),
 | 
			
		||||
        conf_thres=0.7,
 | 
			
		||||
        iou_thres=0.45,
 | 
			
		||||
        max_det=1000,
 | 
			
		||||
        device='',
 | 
			
		||||
        show_vid=True,
 | 
			
		||||
        save_vid=True,
 | 
			
		||||
        project=ROOT / 'runs' / 'track',
 | 
			
		||||
        name='exp',
 | 
			
		||||
        exist_ok=False,
 | 
			
		||||
        line_thickness=2,
 | 
			
		||||
        hide_labels=False,
 | 
			
		||||
        hide_conf=False,
 | 
			
		||||
        half=False,
 | 
			
		||||
        vid_stride=1,
 | 
			
		||||
        enable_json_log=False,
 | 
			
		||||
        enable_zmq=False,
 | 
			
		||||
        zmq_ip='localhost',
 | 
			
		||||
        zmq_port=5555,
 | 
			
		||||
):
 | 
			
		||||
    source = str(source)
 | 
			
		||||
    is_file = Path(source).suffix[1:] in (VID_FORMATS)
 | 
			
		||||
    
 | 
			
		||||
    if is_file:
 | 
			
		||||
        source = check_file(source)
 | 
			
		||||
    
 | 
			
		||||
    device = select_device(device)
 | 
			
		||||
    
 | 
			
		||||
    model = AutoBackend(yolo_weights, device=device, dnn=False, fp16=half)
 | 
			
		||||
    stride, names, pt = model.stride, model.names, model.pt
 | 
			
		||||
    imgsz = check_imgsz(imgsz, stride=stride)
 | 
			
		||||
    
 | 
			
		||||
    dataset = LoadImages(
 | 
			
		||||
        source,
 | 
			
		||||
        imgsz=imgsz,
 | 
			
		||||
        stride=stride,
 | 
			
		||||
        auto=pt,
 | 
			
		||||
        transforms=getattr(model.model, 'transforms', None),
 | 
			
		||||
        vid_stride=vid_stride
 | 
			
		||||
    )
 | 
			
		||||
    bs = len(dataset)
 | 
			
		||||
    
 | 
			
		||||
    tracking_config = ROOT / 'trackers' / 'strongsort' / 'configs' / 'strongsort.yaml'
 | 
			
		||||
    tracker = create_tracker('strongsort', tracking_config, reid_weights, device, half)
 | 
			
		||||
    
 | 
			
		||||
    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)
 | 
			
		||||
    (save_dir / 'tracks').mkdir(parents=True, exist_ok=True)
 | 
			
		||||
    
 | 
			
		||||
    # Initialize loggers
 | 
			
		||||
    json_logger = JsonLogger(f"{source}-strongsort.log") if enable_json_log else None
 | 
			
		||||
    zmq_logger = ZmqLogger(zmq_ip, zmq_port) if enable_zmq else None
 | 
			
		||||
    
 | 
			
		||||
    vid_path, vid_writer = [None] * bs, [None] * bs
 | 
			
		||||
    dt = (Profile(), Profile(), Profile())
 | 
			
		||||
    
 | 
			
		||||
    for frame_idx, (path, im, im0s, vid_cap, s) in enumerate(dataset):
 | 
			
		||||
        
 | 
			
		||||
        with dt[0]:
 | 
			
		||||
            im = torch.from_numpy(im).to(model.device)
 | 
			
		||||
            im = im.half() if model.fp16 else im.float()
 | 
			
		||||
            im /= 255.0
 | 
			
		||||
            if len(im.shape) == 3:
 | 
			
		||||
                im = im[None]
 | 
			
		||||
        
 | 
			
		||||
        with dt[1]:
 | 
			
		||||
            pred = model(im, augment=False, visualize=False)
 | 
			
		||||
        
 | 
			
		||||
        with dt[2]:
 | 
			
		||||
            pred = non_max_suppression(pred, conf_thres, iou_thres, None, False, max_det=max_det)
 | 
			
		||||
        
 | 
			
		||||
        for i, det in enumerate(pred):
 | 
			
		||||
            seen = 0
 | 
			
		||||
            p, im0, _ = path, im0s.copy(), dataset.count
 | 
			
		||||
            p = Path(p)
 | 
			
		||||
            
 | 
			
		||||
            annotator = Annotator(im0, line_width=line_thickness, example=str(names))
 | 
			
		||||
            
 | 
			
		||||
            if len(det):
 | 
			
		||||
                # Filter detections for 'car' class only (class 2 in COCO dataset)
 | 
			
		||||
                car_mask = det[:, 5] == 2  # car class index is 2
 | 
			
		||||
                det = det[car_mask]
 | 
			
		||||
            
 | 
			
		||||
            if len(det):
 | 
			
		||||
                det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
 | 
			
		||||
                
 | 
			
		||||
                for *xyxy, conf, cls in reversed(det):
 | 
			
		||||
                    c = int(cls)
 | 
			
		||||
                    id = f'{c}'
 | 
			
		||||
                    label = None if hide_labels else (f'{id} {names[c]}' if hide_conf else f'{id} {names[c]} {conf:.2f}')
 | 
			
		||||
                    annotator.box_label(xyxy, label, color=colors(c, True))
 | 
			
		||||
                
 | 
			
		||||
                t_outputs = tracker.update(det.cpu(), im0)
 | 
			
		||||
                
 | 
			
		||||
                if len(t_outputs) > 0:
 | 
			
		||||
                    for j, (output) in enumerate(t_outputs):
 | 
			
		||||
                        bbox = output[0:4]
 | 
			
		||||
                        id = output[4]
 | 
			
		||||
                        cls = output[5]
 | 
			
		||||
                        conf = output[6]
 | 
			
		||||
                        
 | 
			
		||||
                        # Log tracking data
 | 
			
		||||
                        if json_logger or zmq_logger:
 | 
			
		||||
                            track_data = {
 | 
			
		||||
                                'bbox': bbox.tolist() if hasattr(bbox, 'tolist') else list(bbox),
 | 
			
		||||
                                'id': int(id), 
 | 
			
		||||
                                'cls': int(cls), 
 | 
			
		||||
                                'conf': float(conf), 
 | 
			
		||||
                                'frame_idx': frame_idx, 
 | 
			
		||||
                                'source': source,
 | 
			
		||||
                                'class_name': names[int(cls)]
 | 
			
		||||
                            }
 | 
			
		||||
                            
 | 
			
		||||
                            if json_logger:
 | 
			
		||||
                                json_logger.send(track_data)
 | 
			
		||||
                            if zmq_logger:
 | 
			
		||||
                                zmq_logger.send(track_data)
 | 
			
		||||
                        
 | 
			
		||||
                        if save_vid or show_vid:
 | 
			
		||||
                            c = int(cls)
 | 
			
		||||
                            id = int(id)
 | 
			
		||||
                            label = f'{id} {names[c]}' if not hide_labels else f'{id}'
 | 
			
		||||
                            if not hide_conf:
 | 
			
		||||
                                label += f' {conf:.2f}'
 | 
			
		||||
                            annotator.box_label(bbox, label, color=colors(c, True))
 | 
			
		||||
            
 | 
			
		||||
            im0 = annotator.result()
 | 
			
		||||
            
 | 
			
		||||
            if show_vid:
 | 
			
		||||
                cv2.imshow(str(p), im0)
 | 
			
		||||
                if cv2.waitKey(1) == ord('q'):
 | 
			
		||||
                    break
 | 
			
		||||
            
 | 
			
		||||
            if save_vid:
 | 
			
		||||
                if vid_path[i] != str(save_dir / p.name):
 | 
			
		||||
                    vid_path[i] = str(save_dir / p.name)
 | 
			
		||||
                    if isinstance(vid_writer[i], cv2.VideoWriter):
 | 
			
		||||
                        vid_writer[i].release()
 | 
			
		||||
                    
 | 
			
		||||
                    if vid_cap:
 | 
			
		||||
                        fps = vid_cap.get(cv2.CAP_PROP_FPS)
 | 
			
		||||
                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
 | 
			
		||||
                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
 | 
			
		||||
                    else:
 | 
			
		||||
                        fps, w, h = 30, im0.shape[1], im0.shape[0]
 | 
			
		||||
                    
 | 
			
		||||
                    vid_writer[i] = cv2.VideoWriter(vid_path[i], cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
 | 
			
		||||
                
 | 
			
		||||
                vid_writer[i].write(im0)
 | 
			
		||||
        
 | 
			
		||||
        LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
 | 
			
		||||
    
 | 
			
		||||
    for i, vid_writer_obj in enumerate(vid_writer):
 | 
			
		||||
        if isinstance(vid_writer_obj, cv2.VideoWriter):
 | 
			
		||||
            vid_writer_obj.release()
 | 
			
		||||
    
 | 
			
		||||
    cv2.destroyAllWindows()
 | 
			
		||||
    
 | 
			
		||||
    LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
 | 
			
		||||
 | 
			
		||||
def xyxy2xywh(x):
 | 
			
		||||
    # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
 | 
			
		||||
    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
 | 
			
		||||
    y[:, 0] = (x[:, 0] + x[:, 2]) / 2  # x center
 | 
			
		||||
    y[:, 1] = (x[:, 1] + x[:, 3]) / 2  # y center
 | 
			
		||||
    y[:, 2] = x[:, 2] - x[:, 0]  # width
 | 
			
		||||
    y[:, 3] = x[:, 3] - x[:, 1]  # height
 | 
			
		||||
    return y
 | 
			
		||||
 | 
			
		||||
def parse_opt():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument('--source', type=str, default='0', help='file/dir/URL/glob, 0 for webcam')
 | 
			
		||||
    parser.add_argument('--yolo-weights', nargs='+', type=str, default=WEIGHTS / 'yolov8n.pt', help='model path')
 | 
			
		||||
    parser.add_argument('--reid-weights', type=str, default=WEIGHTS / 'osnet_x0_25_msmt17.pt')
 | 
			
		||||
    parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
 | 
			
		||||
    parser.add_argument('--conf-thres', type=float, default=0.7, help='confidence threshold')
 | 
			
		||||
    parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
 | 
			
		||||
    parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
 | 
			
		||||
    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
 | 
			
		||||
    parser.add_argument('--show-vid', action='store_true', help='display results')
 | 
			
		||||
    parser.add_argument('--save-vid', action='store_true', help='save video tracking results')
 | 
			
		||||
    parser.add_argument('--project', default=ROOT / 'runs' / 'track', help='save results to project/name')
 | 
			
		||||
    parser.add_argument('--name', default='exp', help='save results to project/name')
 | 
			
		||||
    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
 | 
			
		||||
    parser.add_argument('--line-thickness', default=2, type=int, help='bounding box thickness (pixels)')
 | 
			
		||||
    parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
 | 
			
		||||
    parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
 | 
			
		||||
    parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
 | 
			
		||||
    parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
 | 
			
		||||
    parser.add_argument('--enable-json-log', action='store_true', help='enable JSON file logging')
 | 
			
		||||
    parser.add_argument('--enable-zmq', action='store_true', help='enable ZMQ messaging')
 | 
			
		||||
    parser.add_argument('--zmq-ip', type=str, default='localhost', help='ZMQ server IP')
 | 
			
		||||
    parser.add_argument('--zmq-port', type=int, default=5555, help='ZMQ server port')
 | 
			
		||||
    
 | 
			
		||||
    opt = parser.parse_args()
 | 
			
		||||
    opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1
 | 
			
		||||
    return opt
 | 
			
		||||
 | 
			
		||||
def main(opt):
 | 
			
		||||
    run(**vars(opt))
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    opt = parse_opt()
 | 
			
		||||
    main(opt)
 | 
			
		||||
							
								
								
									
										0
									
								
								feeder/trackers/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								feeder/trackers/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
								
								
									
										60
									
								
								feeder/trackers/botsort/basetrack.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										60
									
								
								feeder/trackers/botsort/basetrack.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,60 @@
 | 
			
		|||
import numpy as np
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TrackState(object):
 | 
			
		||||
    New = 0
 | 
			
		||||
    Tracked = 1
 | 
			
		||||
    Lost = 2
 | 
			
		||||
    LongLost = 3
 | 
			
		||||
    Removed = 4
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BaseTrack(object):
 | 
			
		||||
    _count = 0
 | 
			
		||||
 | 
			
		||||
    track_id = 0
 | 
			
		||||
    is_activated = False
 | 
			
		||||
    state = TrackState.New
 | 
			
		||||
 | 
			
		||||
    history = OrderedDict()
 | 
			
		||||
    features = []
 | 
			
		||||
    curr_feature = None
 | 
			
		||||
    score = 0
 | 
			
		||||
    start_frame = 0
 | 
			
		||||
    frame_id = 0
 | 
			
		||||
    time_since_update = 0
 | 
			
		||||
 | 
			
		||||
    # multi-camera
 | 
			
		||||
    location = (np.inf, np.inf)
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def end_frame(self):
 | 
			
		||||
        return self.frame_id
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def next_id():
 | 
			
		||||
        BaseTrack._count += 1
 | 
			
		||||
        return BaseTrack._count
 | 
			
		||||
 | 
			
		||||
    def activate(self, *args):
 | 
			
		||||
        raise NotImplementedError
 | 
			
		||||
 | 
			
		||||
    def predict(self):
 | 
			
		||||
        raise NotImplementedError
 | 
			
		||||
 | 
			
		||||
    def update(self, *args, **kwargs):
 | 
			
		||||
        raise NotImplementedError
 | 
			
		||||
 | 
			
		||||
    def mark_lost(self):
 | 
			
		||||
        self.state = TrackState.Lost
 | 
			
		||||
 | 
			
		||||
    def mark_long_lost(self):
 | 
			
		||||
        self.state = TrackState.LongLost
 | 
			
		||||
 | 
			
		||||
    def mark_removed(self):
 | 
			
		||||
        self.state = TrackState.Removed
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def clear_count():
 | 
			
		||||
        BaseTrack._count = 0
 | 
			
		||||
							
								
								
									
										534
									
								
								feeder/trackers/botsort/bot_sort.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										534
									
								
								feeder/trackers/botsort/bot_sort.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,534 @@
 | 
			
		|||
import cv2
 | 
			
		||||
import matplotlib.pyplot as plt
 | 
			
		||||
import numpy as np
 | 
			
		||||
from collections import deque
 | 
			
		||||
 | 
			
		||||
from trackers.botsort import  matching
 | 
			
		||||
from trackers.botsort.gmc import GMC
 | 
			
		||||
from trackers.botsort.basetrack import BaseTrack, TrackState
 | 
			
		||||
from trackers.botsort.kalman_filter import KalmanFilter
 | 
			
		||||
 | 
			
		||||
# from fast_reid.fast_reid_interfece import FastReIDInterface
 | 
			
		||||
 | 
			
		||||
from reid_multibackend import ReIDDetectMultiBackend
 | 
			
		||||
from ultralytics.yolo.utils.ops import xyxy2xywh, xywh2xyxy
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class STrack(BaseTrack):
 | 
			
		||||
    shared_kalman = KalmanFilter()
 | 
			
		||||
 | 
			
		||||
    def __init__(self, tlwh, score, cls, feat=None, feat_history=50):
 | 
			
		||||
 | 
			
		||||
        # wait activate
 | 
			
		||||
        self._tlwh = np.asarray(tlwh, dtype=np.float32)
 | 
			
		||||
        self.kalman_filter = None
 | 
			
		||||
        self.mean, self.covariance = None, None
 | 
			
		||||
        self.is_activated = False
 | 
			
		||||
 | 
			
		||||
        self.cls = -1
 | 
			
		||||
        self.cls_hist = []  # (cls id, freq)
 | 
			
		||||
        self.update_cls(cls, score)
 | 
			
		||||
 | 
			
		||||
        self.score = score
 | 
			
		||||
        self.tracklet_len = 0
 | 
			
		||||
 | 
			
		||||
        self.smooth_feat = None
 | 
			
		||||
        self.curr_feat = None
 | 
			
		||||
        if feat is not None:
 | 
			
		||||
            self.update_features(feat)
 | 
			
		||||
        self.features = deque([], maxlen=feat_history)
 | 
			
		||||
        self.alpha = 0.9
 | 
			
		||||
 | 
			
		||||
    def update_features(self, feat):
 | 
			
		||||
        feat /= np.linalg.norm(feat)
 | 
			
		||||
        self.curr_feat = feat
 | 
			
		||||
        if self.smooth_feat is None:
 | 
			
		||||
            self.smooth_feat = feat
 | 
			
		||||
        else:
 | 
			
		||||
            self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
 | 
			
		||||
        self.features.append(feat)
 | 
			
		||||
        self.smooth_feat /= np.linalg.norm(self.smooth_feat)
 | 
			
		||||
 | 
			
		||||
    def update_cls(self, cls, score):
 | 
			
		||||
        if len(self.cls_hist) > 0:
 | 
			
		||||
            max_freq = 0
 | 
			
		||||
            found = False
 | 
			
		||||
            for c in self.cls_hist:
 | 
			
		||||
                if cls == c[0]:
 | 
			
		||||
                    c[1] += score
 | 
			
		||||
                    found = True
 | 
			
		||||
 | 
			
		||||
                if c[1] > max_freq:
 | 
			
		||||
                    max_freq = c[1]
 | 
			
		||||
                    self.cls = c[0]
 | 
			
		||||
            if not found:
 | 
			
		||||
                self.cls_hist.append([cls, score])
 | 
			
		||||
                self.cls = cls
 | 
			
		||||
        else:
 | 
			
		||||
            self.cls_hist.append([cls, score])
 | 
			
		||||
            self.cls = cls
 | 
			
		||||
 | 
			
		||||
    def predict(self):
 | 
			
		||||
        mean_state = self.mean.copy()
 | 
			
		||||
        if self.state != TrackState.Tracked:
 | 
			
		||||
            mean_state[6] = 0
 | 
			
		||||
            mean_state[7] = 0
 | 
			
		||||
 | 
			
		||||
        self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def multi_predict(stracks):
 | 
			
		||||
        if len(stracks) > 0:
 | 
			
		||||
            multi_mean = np.asarray([st.mean.copy() for st in stracks])
 | 
			
		||||
            multi_covariance = np.asarray([st.covariance for st in stracks])
 | 
			
		||||
            for i, st in enumerate(stracks):
 | 
			
		||||
                if st.state != TrackState.Tracked:
 | 
			
		||||
                    multi_mean[i][6] = 0
 | 
			
		||||
                    multi_mean[i][7] = 0
 | 
			
		||||
            multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
 | 
			
		||||
            for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
 | 
			
		||||
                stracks[i].mean = mean
 | 
			
		||||
                stracks[i].covariance = cov
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def multi_gmc(stracks, H=np.eye(2, 3)):
 | 
			
		||||
        if len(stracks) > 0:
 | 
			
		||||
            multi_mean = np.asarray([st.mean.copy() for st in stracks])
 | 
			
		||||
            multi_covariance = np.asarray([st.covariance for st in stracks])
 | 
			
		||||
 | 
			
		||||
            R = H[:2, :2]
 | 
			
		||||
            R8x8 = np.kron(np.eye(4, dtype=float), R)
 | 
			
		||||
            t = H[:2, 2]
 | 
			
		||||
 | 
			
		||||
            for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
 | 
			
		||||
                mean = R8x8.dot(mean)
 | 
			
		||||
                mean[:2] += t
 | 
			
		||||
                cov = R8x8.dot(cov).dot(R8x8.transpose())
 | 
			
		||||
 | 
			
		||||
                stracks[i].mean = mean
 | 
			
		||||
                stracks[i].covariance = cov
 | 
			
		||||
 | 
			
		||||
    def activate(self, kalman_filter, frame_id):
 | 
			
		||||
        """Start a new tracklet"""
 | 
			
		||||
        self.kalman_filter = kalman_filter
 | 
			
		||||
        self.track_id = self.next_id()
 | 
			
		||||
 | 
			
		||||
        self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xywh(self._tlwh))
 | 
			
		||||
 | 
			
		||||
        self.tracklet_len = 0
 | 
			
		||||
        self.state = TrackState.Tracked
 | 
			
		||||
        if frame_id == 1:
 | 
			
		||||
            self.is_activated = True
 | 
			
		||||
        self.frame_id = frame_id
 | 
			
		||||
        self.start_frame = frame_id
 | 
			
		||||
 | 
			
		||||
    def re_activate(self, new_track, frame_id, new_id=False):
 | 
			
		||||
 | 
			
		||||
        self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xywh(new_track.tlwh))
 | 
			
		||||
        if new_track.curr_feat is not None:
 | 
			
		||||
            self.update_features(new_track.curr_feat)
 | 
			
		||||
        self.tracklet_len = 0
 | 
			
		||||
        self.state = TrackState.Tracked
 | 
			
		||||
        self.is_activated = True
 | 
			
		||||
        self.frame_id = frame_id
 | 
			
		||||
        if new_id:
 | 
			
		||||
            self.track_id = self.next_id()
 | 
			
		||||
        self.score = new_track.score
 | 
			
		||||
 | 
			
		||||
        self.update_cls(new_track.cls, new_track.score)
 | 
			
		||||
 | 
			
		||||
    def update(self, new_track, frame_id):
 | 
			
		||||
        """
 | 
			
		||||
        Update a matched track
 | 
			
		||||
        :type new_track: STrack
 | 
			
		||||
        :type frame_id: int
 | 
			
		||||
        :type update_feature: bool
 | 
			
		||||
        :return:
 | 
			
		||||
        """
 | 
			
		||||
        self.frame_id = frame_id
 | 
			
		||||
        self.tracklet_len += 1
 | 
			
		||||
 | 
			
		||||
        new_tlwh = new_track.tlwh
 | 
			
		||||
 | 
			
		||||
        self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xywh(new_tlwh))
 | 
			
		||||
 | 
			
		||||
        if new_track.curr_feat is not None:
 | 
			
		||||
            self.update_features(new_track.curr_feat)
 | 
			
		||||
 | 
			
		||||
        self.state = TrackState.Tracked
 | 
			
		||||
        self.is_activated = True
 | 
			
		||||
 | 
			
		||||
        self.score = new_track.score
 | 
			
		||||
        self.update_cls(new_track.cls, new_track.score)
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def tlwh(self):
 | 
			
		||||
        """Get current position in bounding box format `(top left x, top left y,
 | 
			
		||||
                width, height)`.
 | 
			
		||||
        """
 | 
			
		||||
        if self.mean is None:
 | 
			
		||||
            return self._tlwh.copy()
 | 
			
		||||
        ret = self.mean[:4].copy()
 | 
			
		||||
        ret[:2] -= ret[2:] / 2
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def tlbr(self):
 | 
			
		||||
        """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
 | 
			
		||||
        `(top left, bottom right)`.
 | 
			
		||||
        """
 | 
			
		||||
        ret = self.tlwh.copy()
 | 
			
		||||
        ret[2:] += ret[:2]
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def xywh(self):
 | 
			
		||||
        """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
 | 
			
		||||
        `(top left, bottom right)`.
 | 
			
		||||
        """
 | 
			
		||||
        ret = self.tlwh.copy()
 | 
			
		||||
        ret[:2] += ret[2:] / 2.0
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def tlwh_to_xyah(tlwh):
 | 
			
		||||
        """Convert bounding box to format `(center x, center y, aspect ratio,
 | 
			
		||||
        height)`, where the aspect ratio is `width / height`.
 | 
			
		||||
        """
 | 
			
		||||
        ret = np.asarray(tlwh).copy()
 | 
			
		||||
        ret[:2] += ret[2:] / 2
 | 
			
		||||
        ret[2] /= ret[3]
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def tlwh_to_xywh(tlwh):
 | 
			
		||||
        """Convert bounding box to format `(center x, center y, width,
 | 
			
		||||
        height)`.
 | 
			
		||||
        """
 | 
			
		||||
        ret = np.asarray(tlwh).copy()
 | 
			
		||||
        ret[:2] += ret[2:] / 2
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    def to_xywh(self):
 | 
			
		||||
        return self.tlwh_to_xywh(self.tlwh)
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def tlbr_to_tlwh(tlbr):
 | 
			
		||||
        ret = np.asarray(tlbr).copy()
 | 
			
		||||
        ret[2:] -= ret[:2]
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def tlwh_to_tlbr(tlwh):
 | 
			
		||||
        ret = np.asarray(tlwh).copy()
 | 
			
		||||
        ret[2:] += ret[:2]
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BoTSORT(object):
 | 
			
		||||
    def __init__(self, 
 | 
			
		||||
                model_weights,
 | 
			
		||||
                device,
 | 
			
		||||
                fp16,
 | 
			
		||||
                track_high_thresh:float = 0.45,
 | 
			
		||||
                new_track_thresh:float = 0.6,
 | 
			
		||||
                track_buffer:int = 30,
 | 
			
		||||
                match_thresh:float = 0.8,
 | 
			
		||||
                proximity_thresh:float = 0.5,
 | 
			
		||||
                appearance_thresh:float = 0.25,
 | 
			
		||||
                cmc_method:str = 'sparseOptFlow',
 | 
			
		||||
                frame_rate=30,
 | 
			
		||||
                lambda_=0.985
 | 
			
		||||
                ):
 | 
			
		||||
 | 
			
		||||
        self.tracked_stracks = []  # type: list[STrack]
 | 
			
		||||
        self.lost_stracks = []  # type: list[STrack]
 | 
			
		||||
        self.removed_stracks = []  # type: list[STrack]
 | 
			
		||||
        BaseTrack.clear_count()
 | 
			
		||||
 | 
			
		||||
        self.frame_id = 0
 | 
			
		||||
 | 
			
		||||
        self.lambda_ = lambda_
 | 
			
		||||
        self.track_high_thresh = track_high_thresh
 | 
			
		||||
        self.new_track_thresh = new_track_thresh
 | 
			
		||||
 | 
			
		||||
        self.buffer_size = int(frame_rate / 30.0 * track_buffer)
 | 
			
		||||
        self.max_time_lost = self.buffer_size
 | 
			
		||||
        self.kalman_filter = KalmanFilter()
 | 
			
		||||
 | 
			
		||||
        # ReID module
 | 
			
		||||
        self.proximity_thresh = proximity_thresh
 | 
			
		||||
        self.appearance_thresh = appearance_thresh
 | 
			
		||||
        self.match_thresh = match_thresh
 | 
			
		||||
 | 
			
		||||
        self.model = ReIDDetectMultiBackend(weights=model_weights, device=device, fp16=fp16)
 | 
			
		||||
 | 
			
		||||
        self.gmc = GMC(method=cmc_method, verbose=[None,False])
 | 
			
		||||
 | 
			
		||||
    def update(self, output_results, img):
 | 
			
		||||
        self.frame_id += 1
 | 
			
		||||
        activated_starcks = []
 | 
			
		||||
        refind_stracks = []
 | 
			
		||||
        lost_stracks = []
 | 
			
		||||
        removed_stracks = []
 | 
			
		||||
        
 | 
			
		||||
        xyxys = output_results[:, 0:4]
 | 
			
		||||
        xywh = xyxy2xywh(xyxys.numpy())
 | 
			
		||||
        confs = output_results[:, 4]
 | 
			
		||||
        clss = output_results[:, 5]
 | 
			
		||||
        
 | 
			
		||||
        classes = clss.numpy()
 | 
			
		||||
        xyxys = xyxys.numpy()
 | 
			
		||||
        confs = confs.numpy()
 | 
			
		||||
 | 
			
		||||
        remain_inds = confs > self.track_high_thresh
 | 
			
		||||
        inds_low = confs > 0.1
 | 
			
		||||
        inds_high = confs < self.track_high_thresh
 | 
			
		||||
 | 
			
		||||
        inds_second = np.logical_and(inds_low, inds_high)
 | 
			
		||||
        
 | 
			
		||||
        dets_second = xywh[inds_second]
 | 
			
		||||
        dets = xywh[remain_inds]
 | 
			
		||||
        
 | 
			
		||||
        scores_keep = confs[remain_inds]
 | 
			
		||||
        scores_second = confs[inds_second]
 | 
			
		||||
        
 | 
			
		||||
        classes_keep = classes[remain_inds]
 | 
			
		||||
        clss_second = classes[inds_second]
 | 
			
		||||
 | 
			
		||||
        self.height, self.width = img.shape[:2]
 | 
			
		||||
 | 
			
		||||
        '''Extract embeddings '''
 | 
			
		||||
        features_keep = self._get_features(dets, img)
 | 
			
		||||
 | 
			
		||||
        if len(dets) > 0:
 | 
			
		||||
            '''Detections'''
 | 
			
		||||
            
 | 
			
		||||
            detections = [STrack(xyxy, s, c, f.cpu().numpy()) for
 | 
			
		||||
                              (xyxy, s, c, f) in zip(dets, scores_keep, classes_keep, features_keep)]
 | 
			
		||||
        else:
 | 
			
		||||
            detections = []
 | 
			
		||||
 | 
			
		||||
        ''' Add newly detected tracklets to tracked_stracks'''
 | 
			
		||||
        unconfirmed = []
 | 
			
		||||
        tracked_stracks = []  # type: list[STrack]
 | 
			
		||||
        for track in self.tracked_stracks:
 | 
			
		||||
            if not track.is_activated:
 | 
			
		||||
                unconfirmed.append(track)
 | 
			
		||||
            else:
 | 
			
		||||
                tracked_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        ''' Step 2: First association, with high score detection boxes'''
 | 
			
		||||
        strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
 | 
			
		||||
 | 
			
		||||
        # Predict the current location with KF
 | 
			
		||||
        STrack.multi_predict(strack_pool)
 | 
			
		||||
 | 
			
		||||
        # Fix camera motion
 | 
			
		||||
        warp = self.gmc.apply(img, dets)
 | 
			
		||||
        STrack.multi_gmc(strack_pool, warp)
 | 
			
		||||
        STrack.multi_gmc(unconfirmed, warp)
 | 
			
		||||
 | 
			
		||||
        # Associate with high score detection boxes
 | 
			
		||||
        raw_emb_dists = matching.embedding_distance(strack_pool, detections)
 | 
			
		||||
        dists = matching.fuse_motion(self.kalman_filter, raw_emb_dists, strack_pool, detections, only_position=False, lambda_=self.lambda_)
 | 
			
		||||
 | 
			
		||||
        # ious_dists = matching.iou_distance(strack_pool, detections)
 | 
			
		||||
        # ious_dists_mask = (ious_dists > self.proximity_thresh)
 | 
			
		||||
 | 
			
		||||
        # ious_dists = matching.fuse_score(ious_dists, detections)
 | 
			
		||||
 | 
			
		||||
        # emb_dists = matching.embedding_distance(strack_pool, detections) / 2.0
 | 
			
		||||
        # raw_emb_dists = emb_dists.copy()
 | 
			
		||||
        # emb_dists[emb_dists > self.appearance_thresh] = 1.0
 | 
			
		||||
        # emb_dists[ious_dists_mask] = 1.0
 | 
			
		||||
        # dists = np.minimum(ious_dists, emb_dists)
 | 
			
		||||
 | 
			
		||||
            # Popular ReID method (JDE / FairMOT)
 | 
			
		||||
            # raw_emb_dists = matching.embedding_distance(strack_pool, detections)
 | 
			
		||||
            # dists = matching.fuse_motion(self.kalman_filter, raw_emb_dists, strack_pool, detections)
 | 
			
		||||
            # emb_dists = dists
 | 
			
		||||
 | 
			
		||||
            # IoU making ReID
 | 
			
		||||
            # dists = matching.embedding_distance(strack_pool, detections)
 | 
			
		||||
            # dists[ious_dists_mask] = 1.0
 | 
			
		||||
    
 | 
			
		||||
        matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.match_thresh)
 | 
			
		||||
 | 
			
		||||
        for itracked, idet in matches:
 | 
			
		||||
            track = strack_pool[itracked]
 | 
			
		||||
            det = detections[idet]
 | 
			
		||||
            if track.state == TrackState.Tracked:
 | 
			
		||||
                track.update(detections[idet], self.frame_id)
 | 
			
		||||
                activated_starcks.append(track)
 | 
			
		||||
            else:
 | 
			
		||||
                track.re_activate(det, self.frame_id, new_id=False)
 | 
			
		||||
                refind_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        ''' Step 3: Second association, with low score detection boxes'''
 | 
			
		||||
        # if len(scores):
 | 
			
		||||
        #     inds_high = scores < self.track_high_thresh
 | 
			
		||||
        #     inds_low = scores > self.track_low_thresh
 | 
			
		||||
        #     inds_second = np.logical_and(inds_low, inds_high)
 | 
			
		||||
        #     dets_second = bboxes[inds_second]
 | 
			
		||||
        #     scores_second = scores[inds_second]
 | 
			
		||||
        #     classes_second = classes[inds_second]
 | 
			
		||||
        # else:
 | 
			
		||||
        #     dets_second = []
 | 
			
		||||
        #     scores_second = []
 | 
			
		||||
        #     classes_second = []
 | 
			
		||||
 | 
			
		||||
        # association the untrack to the low score detections
 | 
			
		||||
        if len(dets_second) > 0:
 | 
			
		||||
            '''Detections'''
 | 
			
		||||
            detections_second = [STrack(STrack.tlbr_to_tlwh(tlbr), s, c) for
 | 
			
		||||
                (tlbr, s, c) in zip(dets_second, scores_second, clss_second)]
 | 
			
		||||
        else:
 | 
			
		||||
            detections_second = []
 | 
			
		||||
 | 
			
		||||
        r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
 | 
			
		||||
        dists = matching.iou_distance(r_tracked_stracks, detections_second)
 | 
			
		||||
        matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)
 | 
			
		||||
        for itracked, idet in matches:
 | 
			
		||||
            track = r_tracked_stracks[itracked]
 | 
			
		||||
            det = detections_second[idet]
 | 
			
		||||
            if track.state == TrackState.Tracked:
 | 
			
		||||
                track.update(det, self.frame_id)
 | 
			
		||||
                activated_starcks.append(track)
 | 
			
		||||
            else:
 | 
			
		||||
                track.re_activate(det, self.frame_id, new_id=False)
 | 
			
		||||
                refind_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        for it in u_track:
 | 
			
		||||
            track = r_tracked_stracks[it]
 | 
			
		||||
            if not track.state == TrackState.Lost:
 | 
			
		||||
                track.mark_lost()
 | 
			
		||||
                lost_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        '''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
 | 
			
		||||
        detections = [detections[i] for i in u_detection]
 | 
			
		||||
        ious_dists = matching.iou_distance(unconfirmed, detections)
 | 
			
		||||
        ious_dists_mask = (ious_dists > self.proximity_thresh)
 | 
			
		||||
        
 | 
			
		||||
        ious_dists = matching.fuse_score(ious_dists, detections)
 | 
			
		||||
    
 | 
			
		||||
        emb_dists = matching.embedding_distance(unconfirmed, detections) / 2.0
 | 
			
		||||
        raw_emb_dists = emb_dists.copy()
 | 
			
		||||
        emb_dists[emb_dists > self.appearance_thresh] = 1.0
 | 
			
		||||
        emb_dists[ious_dists_mask] = 1.0
 | 
			
		||||
        dists = np.minimum(ious_dists, emb_dists)
 | 
			
		||||
    
 | 
			
		||||
        matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
 | 
			
		||||
        for itracked, idet in matches:
 | 
			
		||||
            unconfirmed[itracked].update(detections[idet], self.frame_id)
 | 
			
		||||
            activated_starcks.append(unconfirmed[itracked])
 | 
			
		||||
        for it in u_unconfirmed:
 | 
			
		||||
            track = unconfirmed[it]
 | 
			
		||||
            track.mark_removed()
 | 
			
		||||
            removed_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        """ Step 4: Init new stracks"""
 | 
			
		||||
        for inew in u_detection:
 | 
			
		||||
            track = detections[inew]
 | 
			
		||||
            if track.score < self.new_track_thresh:
 | 
			
		||||
                continue
 | 
			
		||||
 | 
			
		||||
            track.activate(self.kalman_filter, self.frame_id)
 | 
			
		||||
            activated_starcks.append(track)
 | 
			
		||||
 | 
			
		||||
        """ Step 5: Update state"""
 | 
			
		||||
        for track in self.lost_stracks:
 | 
			
		||||
            if self.frame_id - track.end_frame > self.max_time_lost:
 | 
			
		||||
                track.mark_removed()
 | 
			
		||||
                removed_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        """ Merge """
 | 
			
		||||
        self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
 | 
			
		||||
        self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
 | 
			
		||||
        self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
 | 
			
		||||
        self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
 | 
			
		||||
        self.lost_stracks.extend(lost_stracks)
 | 
			
		||||
        self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
 | 
			
		||||
        self.removed_stracks.extend(removed_stracks)
 | 
			
		||||
        self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
 | 
			
		||||
 | 
			
		||||
        # output_stracks = [track for track in self.tracked_stracks if track.is_activated]
 | 
			
		||||
        output_stracks = [track for track in self.tracked_stracks if track.is_activated]
 | 
			
		||||
        outputs = []
 | 
			
		||||
        for t in output_stracks:
 | 
			
		||||
            output= []
 | 
			
		||||
            tlwh = t.tlwh
 | 
			
		||||
            tid = t.track_id
 | 
			
		||||
            tlwh = np.expand_dims(tlwh, axis=0)
 | 
			
		||||
            xyxy = xywh2xyxy(tlwh)
 | 
			
		||||
            xyxy = np.squeeze(xyxy, axis=0)
 | 
			
		||||
            output.extend(xyxy)
 | 
			
		||||
            output.append(tid)
 | 
			
		||||
            output.append(t.cls)
 | 
			
		||||
            output.append(t.score)
 | 
			
		||||
            outputs.append(output)
 | 
			
		||||
 | 
			
		||||
        return outputs
 | 
			
		||||
 | 
			
		||||
    def _xywh_to_xyxy(self, bbox_xywh):
 | 
			
		||||
        x, y, w, h = bbox_xywh
 | 
			
		||||
        x1 = max(int(x - w / 2), 0)
 | 
			
		||||
        x2 = min(int(x + w / 2), self.width - 1)
 | 
			
		||||
        y1 = max(int(y - h / 2), 0)
 | 
			
		||||
        y2 = min(int(y + h / 2), self.height - 1)
 | 
			
		||||
        return x1, y1, x2, y2
 | 
			
		||||
 | 
			
		||||
    def _get_features(self, bbox_xywh, ori_img):
 | 
			
		||||
        im_crops = []
 | 
			
		||||
        for box in bbox_xywh:
 | 
			
		||||
            x1, y1, x2, y2 = self._xywh_to_xyxy(box)
 | 
			
		||||
            im = ori_img[y1:y2, x1:x2]
 | 
			
		||||
            im_crops.append(im)
 | 
			
		||||
        if im_crops:
 | 
			
		||||
            features = self.model(im_crops)
 | 
			
		||||
        else:
 | 
			
		||||
            features = np.array([])
 | 
			
		||||
        return features
 | 
			
		||||
 | 
			
		||||
def joint_stracks(tlista, tlistb):
 | 
			
		||||
    exists = {}
 | 
			
		||||
    res = []
 | 
			
		||||
    for t in tlista:
 | 
			
		||||
        exists[t.track_id] = 1
 | 
			
		||||
        res.append(t)
 | 
			
		||||
    for t in tlistb:
 | 
			
		||||
        tid = t.track_id
 | 
			
		||||
        if not exists.get(tid, 0):
 | 
			
		||||
            exists[tid] = 1
 | 
			
		||||
            res.append(t)
 | 
			
		||||
    return res
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sub_stracks(tlista, tlistb):
 | 
			
		||||
    stracks = {}
 | 
			
		||||
    for t in tlista:
 | 
			
		||||
        stracks[t.track_id] = t
 | 
			
		||||
    for t in tlistb:
 | 
			
		||||
        tid = t.track_id
 | 
			
		||||
        if stracks.get(tid, 0):
 | 
			
		||||
            del stracks[tid]
 | 
			
		||||
    return list(stracks.values())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def remove_duplicate_stracks(stracksa, stracksb):
 | 
			
		||||
    pdist = matching.iou_distance(stracksa, stracksb)
 | 
			
		||||
    pairs = np.where(pdist < 0.15)
 | 
			
		||||
    dupa, dupb = list(), list()
 | 
			
		||||
    for p, q in zip(*pairs):
 | 
			
		||||
        timep = stracksa[p].frame_id - stracksa[p].start_frame
 | 
			
		||||
        timeq = stracksb[q].frame_id - stracksb[q].start_frame
 | 
			
		||||
        if timep > timeq:
 | 
			
		||||
            dupb.append(q)
 | 
			
		||||
        else:
 | 
			
		||||
            dupa.append(p)
 | 
			
		||||
    resa = [t for i, t in enumerate(stracksa) if not i in dupa]
 | 
			
		||||
    resb = [t for i, t in enumerate(stracksb) if not i in dupb]
 | 
			
		||||
    return resa, resb
 | 
			
		||||
							
								
								
									
										13
									
								
								feeder/trackers/botsort/configs/botsort.yaml
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								feeder/trackers/botsort/configs/botsort.yaml
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,13 @@
 | 
			
		|||
# Trial number:      232
 | 
			
		||||
# HOTA, MOTA, IDF1:  [45.31]
 | 
			
		||||
botsort:
 | 
			
		||||
  appearance_thresh: 0.4818211117541298
 | 
			
		||||
  cmc_method: sparseOptFlow
 | 
			
		||||
  conf_thres: 0.3501265956918775
 | 
			
		||||
  frame_rate: 30
 | 
			
		||||
  lambda_: 0.9896143462366406
 | 
			
		||||
  match_thresh: 0.22734550911325851
 | 
			
		||||
  new_track_thresh: 0.21144301345190655
 | 
			
		||||
  proximity_thresh: 0.5945380911899254
 | 
			
		||||
  track_buffer: 60
 | 
			
		||||
  track_high_thresh: 0.33824964456239337
 | 
			
		||||
							
								
								
									
										316
									
								
								feeder/trackers/botsort/gmc.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										316
									
								
								feeder/trackers/botsort/gmc.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,316 @@
 | 
			
		|||
import cv2
 | 
			
		||||
import matplotlib.pyplot as plt
 | 
			
		||||
import numpy as np
 | 
			
		||||
import copy
 | 
			
		||||
import time
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GMC:
 | 
			
		||||
    def __init__(self, method='sparseOptFlow', downscale=2, verbose=None):
 | 
			
		||||
        super(GMC, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.method = method
 | 
			
		||||
        self.downscale = max(1, int(downscale))
 | 
			
		||||
 | 
			
		||||
        if self.method == 'orb':
 | 
			
		||||
            self.detector = cv2.FastFeatureDetector_create(20)
 | 
			
		||||
            self.extractor = cv2.ORB_create()
 | 
			
		||||
            self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
 | 
			
		||||
 | 
			
		||||
        elif self.method == 'sift':
 | 
			
		||||
            self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)
 | 
			
		||||
            self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)
 | 
			
		||||
            self.matcher = cv2.BFMatcher(cv2.NORM_L2)
 | 
			
		||||
 | 
			
		||||
        elif self.method == 'ecc':
 | 
			
		||||
            number_of_iterations = 5000
 | 
			
		||||
            termination_eps = 1e-6
 | 
			
		||||
            self.warp_mode = cv2.MOTION_EUCLIDEAN
 | 
			
		||||
            self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
 | 
			
		||||
 | 
			
		||||
        elif self.method == 'sparseOptFlow':
 | 
			
		||||
            self.feature_params = dict(maxCorners=1000, qualityLevel=0.01, minDistance=1, blockSize=3,
 | 
			
		||||
                                       useHarrisDetector=False, k=0.04)
 | 
			
		||||
            # self.gmc_file = open('GMC_results.txt', 'w')
 | 
			
		||||
 | 
			
		||||
        elif self.method == 'file' or self.method == 'files':
 | 
			
		||||
            seqName = verbose[0]
 | 
			
		||||
            ablation = verbose[1]
 | 
			
		||||
            if ablation:
 | 
			
		||||
                filePath = r'tracker/GMC_files/MOT17_ablation'
 | 
			
		||||
            else:
 | 
			
		||||
                filePath = r'tracker/GMC_files/MOTChallenge'
 | 
			
		||||
 | 
			
		||||
            if '-FRCNN' in seqName:
 | 
			
		||||
                seqName = seqName[:-6]
 | 
			
		||||
            elif '-DPM' in seqName:
 | 
			
		||||
                seqName = seqName[:-4]
 | 
			
		||||
            elif '-SDP' in seqName:
 | 
			
		||||
                seqName = seqName[:-4]
 | 
			
		||||
 | 
			
		||||
            self.gmcFile = open(filePath + "/GMC-" + seqName + ".txt", 'r')
 | 
			
		||||
 | 
			
		||||
            if self.gmcFile is None:
 | 
			
		||||
                raise ValueError("Error: Unable to open GMC file in directory:" + filePath)
 | 
			
		||||
        elif self.method == 'none' or self.method == 'None':
 | 
			
		||||
            self.method = 'none'
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError("Error: Unknown CMC method:" + method)
 | 
			
		||||
 | 
			
		||||
        self.prevFrame = None
 | 
			
		||||
        self.prevKeyPoints = None
 | 
			
		||||
        self.prevDescriptors = None
 | 
			
		||||
 | 
			
		||||
        self.initializedFirstFrame = False
 | 
			
		||||
 | 
			
		||||
    def apply(self, raw_frame, detections=None):
 | 
			
		||||
        if self.method == 'orb' or self.method == 'sift':
 | 
			
		||||
            return self.applyFeaures(raw_frame, detections)
 | 
			
		||||
        elif self.method == 'ecc':
 | 
			
		||||
            return self.applyEcc(raw_frame, detections)
 | 
			
		||||
        elif self.method == 'sparseOptFlow':
 | 
			
		||||
            return self.applySparseOptFlow(raw_frame, detections)
 | 
			
		||||
        elif self.method == 'file':
 | 
			
		||||
            return self.applyFile(raw_frame, detections)
 | 
			
		||||
        elif self.method == 'none':
 | 
			
		||||
            return np.eye(2, 3)
 | 
			
		||||
        else:
 | 
			
		||||
            return np.eye(2, 3)
 | 
			
		||||
 | 
			
		||||
    def applyEcc(self, raw_frame, detections=None):
 | 
			
		||||
 | 
			
		||||
        # Initialize
 | 
			
		||||
        height, width, _ = raw_frame.shape
 | 
			
		||||
        frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
 | 
			
		||||
        H = np.eye(2, 3, dtype=np.float32)
 | 
			
		||||
 | 
			
		||||
        # Downscale image (TODO: consider using pyramids)
 | 
			
		||||
        if self.downscale > 1.0:
 | 
			
		||||
            frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
 | 
			
		||||
            frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
 | 
			
		||||
            width = width // self.downscale
 | 
			
		||||
            height = height // self.downscale
 | 
			
		||||
 | 
			
		||||
        # Handle first frame
 | 
			
		||||
        if not self.initializedFirstFrame:
 | 
			
		||||
            # Initialize data
 | 
			
		||||
            self.prevFrame = frame.copy()
 | 
			
		||||
 | 
			
		||||
            # Initialization done
 | 
			
		||||
            self.initializedFirstFrame = True
 | 
			
		||||
 | 
			
		||||
            return H
 | 
			
		||||
 | 
			
		||||
        # Run the ECC algorithm. The results are stored in warp_matrix.
 | 
			
		||||
        # (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria)
 | 
			
		||||
        try:
 | 
			
		||||
            (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1)
 | 
			
		||||
        except:
 | 
			
		||||
            print('Warning: find transform failed. Set warp as identity')
 | 
			
		||||
 | 
			
		||||
        return H
 | 
			
		||||
 | 
			
		||||
    def applyFeaures(self, raw_frame, detections=None):
 | 
			
		||||
 | 
			
		||||
        # Initialize
 | 
			
		||||
        height, width, _ = raw_frame.shape
 | 
			
		||||
        frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
 | 
			
		||||
        H = np.eye(2, 3)
 | 
			
		||||
 | 
			
		||||
        # Downscale image (TODO: consider using pyramids)
 | 
			
		||||
        if self.downscale > 1.0:
 | 
			
		||||
            # frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
 | 
			
		||||
            frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
 | 
			
		||||
            width = width // self.downscale
 | 
			
		||||
            height = height // self.downscale
 | 
			
		||||
 | 
			
		||||
        # find the keypoints
 | 
			
		||||
        mask = np.zeros_like(frame)
 | 
			
		||||
        # mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255
 | 
			
		||||
        mask[int(0.02 * height): int(0.98 * height), int(0.02 * width): int(0.98 * width)] = 255
 | 
			
		||||
        if detections is not None:
 | 
			
		||||
            for det in detections:
 | 
			
		||||
                tlbr = (det[:4] / self.downscale).astype(np.int_)
 | 
			
		||||
                mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0
 | 
			
		||||
 | 
			
		||||
        keypoints = self.detector.detect(frame, mask)
 | 
			
		||||
 | 
			
		||||
        # compute the descriptors
 | 
			
		||||
        keypoints, descriptors = self.extractor.compute(frame, keypoints)
 | 
			
		||||
 | 
			
		||||
        # Handle first frame
 | 
			
		||||
        if not self.initializedFirstFrame:
 | 
			
		||||
            # Initialize data
 | 
			
		||||
            self.prevFrame = frame.copy()
 | 
			
		||||
            self.prevKeyPoints = copy.copy(keypoints)
 | 
			
		||||
            self.prevDescriptors = copy.copy(descriptors)
 | 
			
		||||
 | 
			
		||||
            # Initialization done
 | 
			
		||||
            self.initializedFirstFrame = True
 | 
			
		||||
 | 
			
		||||
            return H
 | 
			
		||||
 | 
			
		||||
        # Match descriptors.
 | 
			
		||||
        knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2)
 | 
			
		||||
 | 
			
		||||
        # Filtered matches based on smallest spatial distance
 | 
			
		||||
        matches = []
 | 
			
		||||
        spatialDistances = []
 | 
			
		||||
 | 
			
		||||
        maxSpatialDistance = 0.25 * np.array([width, height])
 | 
			
		||||
 | 
			
		||||
        # Handle empty matches case
 | 
			
		||||
        if len(knnMatches) == 0:
 | 
			
		||||
            # Store to next iteration
 | 
			
		||||
            self.prevFrame = frame.copy()
 | 
			
		||||
            self.prevKeyPoints = copy.copy(keypoints)
 | 
			
		||||
            self.prevDescriptors = copy.copy(descriptors)
 | 
			
		||||
 | 
			
		||||
            return H
 | 
			
		||||
 | 
			
		||||
        for m, n in knnMatches:
 | 
			
		||||
            if m.distance < 0.9 * n.distance:
 | 
			
		||||
                prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt
 | 
			
		||||
                currKeyPointLocation = keypoints[m.trainIdx].pt
 | 
			
		||||
 | 
			
		||||
                spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0],
 | 
			
		||||
                                   prevKeyPointLocation[1] - currKeyPointLocation[1])
 | 
			
		||||
 | 
			
		||||
                if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \
 | 
			
		||||
                        (np.abs(spatialDistance[1]) < maxSpatialDistance[1]):
 | 
			
		||||
                    spatialDistances.append(spatialDistance)
 | 
			
		||||
                    matches.append(m)
 | 
			
		||||
 | 
			
		||||
        meanSpatialDistances = np.mean(spatialDistances, 0)
 | 
			
		||||
        stdSpatialDistances = np.std(spatialDistances, 0)
 | 
			
		||||
 | 
			
		||||
        inliesrs = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances
 | 
			
		||||
 | 
			
		||||
        goodMatches = []
 | 
			
		||||
        prevPoints = []
 | 
			
		||||
        currPoints = []
 | 
			
		||||
        for i in range(len(matches)):
 | 
			
		||||
            if inliesrs[i, 0] and inliesrs[i, 1]:
 | 
			
		||||
                goodMatches.append(matches[i])
 | 
			
		||||
                prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt)
 | 
			
		||||
                currPoints.append(keypoints[matches[i].trainIdx].pt)
 | 
			
		||||
 | 
			
		||||
        prevPoints = np.array(prevPoints)
 | 
			
		||||
        currPoints = np.array(currPoints)
 | 
			
		||||
 | 
			
		||||
        # Draw the keypoint matches on the output image
 | 
			
		||||
        if 0:
 | 
			
		||||
            matches_img = np.hstack((self.prevFrame, frame))
 | 
			
		||||
            matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR)
 | 
			
		||||
            W = np.size(self.prevFrame, 1)
 | 
			
		||||
            for m in goodMatches:
 | 
			
		||||
                prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_)
 | 
			
		||||
                curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_)
 | 
			
		||||
                curr_pt[0] += W
 | 
			
		||||
                color = np.random.randint(0, 255, (3,))
 | 
			
		||||
                color = (int(color[0]), int(color[1]), int(color[2]))
 | 
			
		||||
 | 
			
		||||
                matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA)
 | 
			
		||||
                matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1)
 | 
			
		||||
                matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1)
 | 
			
		||||
 | 
			
		||||
            plt.figure()
 | 
			
		||||
            plt.imshow(matches_img)
 | 
			
		||||
            plt.show()
 | 
			
		||||
 | 
			
		||||
        # Find rigid matrix
 | 
			
		||||
        if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):
 | 
			
		||||
            H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
 | 
			
		||||
 | 
			
		||||
            # Handle downscale
 | 
			
		||||
            if self.downscale > 1.0:
 | 
			
		||||
                H[0, 2] *= self.downscale
 | 
			
		||||
                H[1, 2] *= self.downscale
 | 
			
		||||
        else:
 | 
			
		||||
            print('Warning: not enough matching points')
 | 
			
		||||
 | 
			
		||||
        # Store to next iteration
 | 
			
		||||
        self.prevFrame = frame.copy()
 | 
			
		||||
        self.prevKeyPoints = copy.copy(keypoints)
 | 
			
		||||
        self.prevDescriptors = copy.copy(descriptors)
 | 
			
		||||
 | 
			
		||||
        return H
 | 
			
		||||
 | 
			
		||||
    def applySparseOptFlow(self, raw_frame, detections=None):
 | 
			
		||||
 | 
			
		||||
        t0 = time.time()
 | 
			
		||||
 | 
			
		||||
        # Initialize
 | 
			
		||||
        height, width, _ = raw_frame.shape
 | 
			
		||||
        frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
 | 
			
		||||
        H = np.eye(2, 3)
 | 
			
		||||
 | 
			
		||||
        # Downscale image
 | 
			
		||||
        if self.downscale > 1.0:
 | 
			
		||||
            # frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
 | 
			
		||||
            frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
 | 
			
		||||
 | 
			
		||||
        # find the keypoints
 | 
			
		||||
        keypoints = cv2.goodFeaturesToTrack(frame, mask=None, **self.feature_params)
 | 
			
		||||
 | 
			
		||||
        # Handle first frame
 | 
			
		||||
        if not self.initializedFirstFrame:
 | 
			
		||||
            # Initialize data
 | 
			
		||||
            self.prevFrame = frame.copy()
 | 
			
		||||
            self.prevKeyPoints = copy.copy(keypoints)
 | 
			
		||||
 | 
			
		||||
            # Initialization done
 | 
			
		||||
            self.initializedFirstFrame = True
 | 
			
		||||
 | 
			
		||||
            return H
 | 
			
		||||
 | 
			
		||||
        # find correspondences
 | 
			
		||||
        matchedKeypoints, status, err = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None)
 | 
			
		||||
 | 
			
		||||
        # leave good correspondences only
 | 
			
		||||
        prevPoints = []
 | 
			
		||||
        currPoints = []
 | 
			
		||||
 | 
			
		||||
        for i in range(len(status)):
 | 
			
		||||
            if status[i]:
 | 
			
		||||
                prevPoints.append(self.prevKeyPoints[i])
 | 
			
		||||
                currPoints.append(matchedKeypoints[i])
 | 
			
		||||
 | 
			
		||||
        prevPoints = np.array(prevPoints)
 | 
			
		||||
        currPoints = np.array(currPoints)
 | 
			
		||||
 | 
			
		||||
        # Find rigid matrix
 | 
			
		||||
        if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):
 | 
			
		||||
            H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
 | 
			
		||||
 | 
			
		||||
            # Handle downscale
 | 
			
		||||
            if self.downscale > 1.0:
 | 
			
		||||
                H[0, 2] *= self.downscale
 | 
			
		||||
                H[1, 2] *= self.downscale
 | 
			
		||||
        else:
 | 
			
		||||
            print('Warning: not enough matching points')
 | 
			
		||||
 | 
			
		||||
        # Store to next iteration
 | 
			
		||||
        self.prevFrame = frame.copy()
 | 
			
		||||
        self.prevKeyPoints = copy.copy(keypoints)
 | 
			
		||||
 | 
			
		||||
        t1 = time.time()
 | 
			
		||||
 | 
			
		||||
        # gmc_line = str(1000 * (t1 - t0)) + "\t" + str(H[0, 0]) + "\t" + str(H[0, 1]) + "\t" + str(
 | 
			
		||||
        #     H[0, 2]) + "\t" + str(H[1, 0]) + "\t" + str(H[1, 1]) + "\t" + str(H[1, 2]) + "\n"
 | 
			
		||||
        # self.gmc_file.write(gmc_line)
 | 
			
		||||
 | 
			
		||||
        return H
 | 
			
		||||
 | 
			
		||||
    def applyFile(self, raw_frame, detections=None):
 | 
			
		||||
        line = self.gmcFile.readline()
 | 
			
		||||
        tokens = line.split("\t")
 | 
			
		||||
        H = np.eye(2, 3, dtype=np.float_)
 | 
			
		||||
        H[0, 0] = float(tokens[1])
 | 
			
		||||
        H[0, 1] = float(tokens[2])
 | 
			
		||||
        H[0, 2] = float(tokens[3])
 | 
			
		||||
        H[1, 0] = float(tokens[4])
 | 
			
		||||
        H[1, 1] = float(tokens[5])
 | 
			
		||||
        H[1, 2] = float(tokens[6])
 | 
			
		||||
 | 
			
		||||
        return H
 | 
			
		||||
							
								
								
									
										269
									
								
								feeder/trackers/botsort/kalman_filter.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										269
									
								
								feeder/trackers/botsort/kalman_filter.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,269 @@
 | 
			
		|||
# vim: expandtab:ts=4:sw=4
 | 
			
		||||
import numpy as np
 | 
			
		||||
import scipy.linalg
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
Table for the 0.95 quantile of the chi-square distribution with N degrees of
 | 
			
		||||
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
 | 
			
		||||
function and used as Mahalanobis gating threshold.
 | 
			
		||||
"""
 | 
			
		||||
chi2inv95 = {
 | 
			
		||||
    1: 3.8415,
 | 
			
		||||
    2: 5.9915,
 | 
			
		||||
    3: 7.8147,
 | 
			
		||||
    4: 9.4877,
 | 
			
		||||
    5: 11.070,
 | 
			
		||||
    6: 12.592,
 | 
			
		||||
    7: 14.067,
 | 
			
		||||
    8: 15.507,
 | 
			
		||||
    9: 16.919}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class KalmanFilter(object):
 | 
			
		||||
    """
 | 
			
		||||
    A simple Kalman filter for tracking bounding boxes in image space.
 | 
			
		||||
 | 
			
		||||
    The 8-dimensional state space
 | 
			
		||||
 | 
			
		||||
        x, y, w, h, vx, vy, vw, vh
 | 
			
		||||
 | 
			
		||||
    contains the bounding box center position (x, y), width w, height h,
 | 
			
		||||
    and their respective velocities.
 | 
			
		||||
 | 
			
		||||
    Object motion follows a constant velocity model. The bounding box location
 | 
			
		||||
    (x, y, w, h) is taken as direct observation of the state space (linear
 | 
			
		||||
    observation model).
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        ndim, dt = 4, 1.
 | 
			
		||||
 | 
			
		||||
        # Create Kalman filter model matrices.
 | 
			
		||||
        self._motion_mat = np.eye(2 * ndim, 2 * ndim)
 | 
			
		||||
        for i in range(ndim):
 | 
			
		||||
            self._motion_mat[i, ndim + i] = dt
 | 
			
		||||
        self._update_mat = np.eye(ndim, 2 * ndim)
 | 
			
		||||
 | 
			
		||||
        # Motion and observation uncertainty are chosen relative to the current
 | 
			
		||||
        # state estimate. These weights control the amount of uncertainty in
 | 
			
		||||
        # the model. This is a bit hacky.
 | 
			
		||||
        self._std_weight_position = 1. / 20
 | 
			
		||||
        self._std_weight_velocity = 1. / 160
 | 
			
		||||
 | 
			
		||||
    def initiate(self, measurement):
 | 
			
		||||
        """Create track from unassociated measurement.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        measurement : ndarray
 | 
			
		||||
            Bounding box coordinates (x, y, w, h) with center position (x, y),
 | 
			
		||||
            width w, and height h.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the mean vector (8 dimensional) and covariance matrix (8x8
 | 
			
		||||
            dimensional) of the new track. Unobserved velocities are initialized
 | 
			
		||||
            to 0 mean.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        mean_pos = measurement
 | 
			
		||||
        mean_vel = np.zeros_like(mean_pos)
 | 
			
		||||
        mean = np.r_[mean_pos, mean_vel]
 | 
			
		||||
 | 
			
		||||
        std = [
 | 
			
		||||
            2 * self._std_weight_position * measurement[2],
 | 
			
		||||
            2 * self._std_weight_position * measurement[3],
 | 
			
		||||
            2 * self._std_weight_position * measurement[2],
 | 
			
		||||
            2 * self._std_weight_position * measurement[3],
 | 
			
		||||
            10 * self._std_weight_velocity * measurement[2],
 | 
			
		||||
            10 * self._std_weight_velocity * measurement[3],
 | 
			
		||||
            10 * self._std_weight_velocity * measurement[2],
 | 
			
		||||
            10 * self._std_weight_velocity * measurement[3]]
 | 
			
		||||
        covariance = np.diag(np.square(std))
 | 
			
		||||
        return mean, covariance
 | 
			
		||||
 | 
			
		||||
    def predict(self, mean, covariance):
 | 
			
		||||
        """Run Kalman filter prediction step.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            The 8 dimensional mean vector of the object state at the previous
 | 
			
		||||
            time step.
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            The 8x8 dimensional covariance matrix of the object state at the
 | 
			
		||||
            previous time step.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the mean vector and covariance matrix of the predicted
 | 
			
		||||
            state. Unobserved velocities are initialized to 0 mean.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        std_pos = [
 | 
			
		||||
            self._std_weight_position * mean[2],
 | 
			
		||||
            self._std_weight_position * mean[3],
 | 
			
		||||
            self._std_weight_position * mean[2],
 | 
			
		||||
            self._std_weight_position * mean[3]]
 | 
			
		||||
        std_vel = [
 | 
			
		||||
            self._std_weight_velocity * mean[2],
 | 
			
		||||
            self._std_weight_velocity * mean[3],
 | 
			
		||||
            self._std_weight_velocity * mean[2],
 | 
			
		||||
            self._std_weight_velocity * mean[3]]
 | 
			
		||||
        motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
 | 
			
		||||
 | 
			
		||||
        mean = np.dot(mean, self._motion_mat.T)
 | 
			
		||||
        covariance = np.linalg.multi_dot((
 | 
			
		||||
            self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
 | 
			
		||||
 | 
			
		||||
        return mean, covariance
 | 
			
		||||
 | 
			
		||||
    def project(self, mean, covariance):
 | 
			
		||||
        """Project state distribution to measurement space.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            The state's mean vector (8 dimensional array).
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            The state's covariance matrix (8x8 dimensional).
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the projected mean and covariance matrix of the given state
 | 
			
		||||
            estimate.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        std = [
 | 
			
		||||
            self._std_weight_position * mean[2],
 | 
			
		||||
            self._std_weight_position * mean[3],
 | 
			
		||||
            self._std_weight_position * mean[2],
 | 
			
		||||
            self._std_weight_position * mean[3]]
 | 
			
		||||
        innovation_cov = np.diag(np.square(std))
 | 
			
		||||
 | 
			
		||||
        mean = np.dot(self._update_mat, mean)
 | 
			
		||||
        covariance = np.linalg.multi_dot((
 | 
			
		||||
            self._update_mat, covariance, self._update_mat.T))
 | 
			
		||||
        return mean, covariance + innovation_cov
 | 
			
		||||
 | 
			
		||||
    def multi_predict(self, mean, covariance):
 | 
			
		||||
        """Run Kalman filter prediction step (Vectorized version).
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            The Nx8 dimensional mean matrix of the object states at the previous
 | 
			
		||||
            time step.
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            The Nx8x8 dimensional covariance matrics of the object states at the
 | 
			
		||||
            previous time step.
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the mean vector and covariance matrix of the predicted
 | 
			
		||||
            state. Unobserved velocities are initialized to 0 mean.
 | 
			
		||||
        """
 | 
			
		||||
        std_pos = [
 | 
			
		||||
            self._std_weight_position * mean[:, 2],
 | 
			
		||||
            self._std_weight_position * mean[:, 3],
 | 
			
		||||
            self._std_weight_position * mean[:, 2],
 | 
			
		||||
            self._std_weight_position * mean[:, 3]]
 | 
			
		||||
        std_vel = [
 | 
			
		||||
            self._std_weight_velocity * mean[:, 2],
 | 
			
		||||
            self._std_weight_velocity * mean[:, 3],
 | 
			
		||||
            self._std_weight_velocity * mean[:, 2],
 | 
			
		||||
            self._std_weight_velocity * mean[:, 3]]
 | 
			
		||||
        sqr = np.square(np.r_[std_pos, std_vel]).T
 | 
			
		||||
 | 
			
		||||
        motion_cov = []
 | 
			
		||||
        for i in range(len(mean)):
 | 
			
		||||
            motion_cov.append(np.diag(sqr[i]))
 | 
			
		||||
        motion_cov = np.asarray(motion_cov)
 | 
			
		||||
 | 
			
		||||
        mean = np.dot(mean, self._motion_mat.T)
 | 
			
		||||
        left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
 | 
			
		||||
        covariance = np.dot(left, self._motion_mat.T) + motion_cov
 | 
			
		||||
 | 
			
		||||
        return mean, covariance
 | 
			
		||||
 | 
			
		||||
    def update(self, mean, covariance, measurement):
 | 
			
		||||
        """Run Kalman filter correction step.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            The predicted state's mean vector (8 dimensional).
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            The state's covariance matrix (8x8 dimensional).
 | 
			
		||||
        measurement : ndarray
 | 
			
		||||
            The 4 dimensional measurement vector (x, y, w, h), where (x, y)
 | 
			
		||||
            is the center position, w the width, and h the height of the
 | 
			
		||||
            bounding box.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the measurement-corrected state distribution.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        projected_mean, projected_cov = self.project(mean, covariance)
 | 
			
		||||
 | 
			
		||||
        chol_factor, lower = scipy.linalg.cho_factor(
 | 
			
		||||
            projected_cov, lower=True, check_finite=False)
 | 
			
		||||
        kalman_gain = scipy.linalg.cho_solve(
 | 
			
		||||
            (chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
 | 
			
		||||
            check_finite=False).T
 | 
			
		||||
        innovation = measurement - projected_mean
 | 
			
		||||
 | 
			
		||||
        new_mean = mean + np.dot(innovation, kalman_gain.T)
 | 
			
		||||
        new_covariance = covariance - np.linalg.multi_dot((
 | 
			
		||||
            kalman_gain, projected_cov, kalman_gain.T))
 | 
			
		||||
        return new_mean, new_covariance
 | 
			
		||||
 | 
			
		||||
    def gating_distance(self, mean, covariance, measurements,
 | 
			
		||||
                        only_position=False, metric='maha'):
 | 
			
		||||
        """Compute gating distance between state distribution and measurements.
 | 
			
		||||
        A suitable distance threshold can be obtained from `chi2inv95`. If
 | 
			
		||||
        `only_position` is False, the chi-square distribution has 4 degrees of
 | 
			
		||||
        freedom, otherwise 2.
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            Mean vector over the state distribution (8 dimensional).
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            Covariance of the state distribution (8x8 dimensional).
 | 
			
		||||
        measurements : ndarray
 | 
			
		||||
            An Nx4 dimensional matrix of N measurements, each in
 | 
			
		||||
            format (x, y, a, h) where (x, y) is the bounding box center
 | 
			
		||||
            position, a the aspect ratio, and h the height.
 | 
			
		||||
        only_position : Optional[bool]
 | 
			
		||||
            If True, distance computation is done with respect to the bounding
 | 
			
		||||
            box center position only.
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        ndarray
 | 
			
		||||
            Returns an array of length N, where the i-th element contains the
 | 
			
		||||
            squared Mahalanobis distance between (mean, covariance) and
 | 
			
		||||
            `measurements[i]`.
 | 
			
		||||
        """
 | 
			
		||||
        mean, covariance = self.project(mean, covariance)
 | 
			
		||||
        if only_position:
 | 
			
		||||
            mean, covariance = mean[:2], covariance[:2, :2]
 | 
			
		||||
            measurements = measurements[:, :2]
 | 
			
		||||
 | 
			
		||||
        d = measurements - mean
 | 
			
		||||
        if metric == 'gaussian':
 | 
			
		||||
            return np.sum(d * d, axis=1)
 | 
			
		||||
        elif metric == 'maha':
 | 
			
		||||
            cholesky_factor = np.linalg.cholesky(covariance)
 | 
			
		||||
            z = scipy.linalg.solve_triangular(
 | 
			
		||||
                cholesky_factor, d.T, lower=True, check_finite=False,
 | 
			
		||||
                overwrite_b=True)
 | 
			
		||||
            squared_maha = np.sum(z * z, axis=0)
 | 
			
		||||
            return squared_maha
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError('invalid distance metric')
 | 
			
		||||
							
								
								
									
										234
									
								
								feeder/trackers/botsort/matching.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										234
									
								
								feeder/trackers/botsort/matching.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,234 @@
 | 
			
		|||
import numpy as np
 | 
			
		||||
import scipy
 | 
			
		||||
import lap
 | 
			
		||||
from scipy.spatial.distance import cdist
 | 
			
		||||
 | 
			
		||||
from trackers.botsort import kalman_filter
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def merge_matches(m1, m2, shape):
 | 
			
		||||
    O,P,Q = shape
 | 
			
		||||
    m1 = np.asarray(m1)
 | 
			
		||||
    m2 = np.asarray(m2)
 | 
			
		||||
 | 
			
		||||
    M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))
 | 
			
		||||
    M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))
 | 
			
		||||
 | 
			
		||||
    mask = M1*M2
 | 
			
		||||
    match = mask.nonzero()
 | 
			
		||||
    match = list(zip(match[0], match[1]))
 | 
			
		||||
    unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))
 | 
			
		||||
    unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))
 | 
			
		||||
 | 
			
		||||
    return match, unmatched_O, unmatched_Q
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _indices_to_matches(cost_matrix, indices, thresh):
 | 
			
		||||
    matched_cost = cost_matrix[tuple(zip(*indices))]
 | 
			
		||||
    matched_mask = (matched_cost <= thresh)
 | 
			
		||||
 | 
			
		||||
    matches = indices[matched_mask]
 | 
			
		||||
    unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0]))
 | 
			
		||||
    unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1]))
 | 
			
		||||
 | 
			
		||||
    return matches, unmatched_a, unmatched_b
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def linear_assignment(cost_matrix, thresh):
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
 | 
			
		||||
    matches, unmatched_a, unmatched_b = [], [], []
 | 
			
		||||
    cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
 | 
			
		||||
    for ix, mx in enumerate(x):
 | 
			
		||||
        if mx >= 0:
 | 
			
		||||
            matches.append([ix, mx])
 | 
			
		||||
    unmatched_a = np.where(x < 0)[0]
 | 
			
		||||
    unmatched_b = np.where(y < 0)[0]
 | 
			
		||||
    matches = np.asarray(matches)
 | 
			
		||||
    return matches, unmatched_a, unmatched_b
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ious(atlbrs, btlbrs):
 | 
			
		||||
    """
 | 
			
		||||
    Compute cost based on IoU
 | 
			
		||||
    :type atlbrs: list[tlbr] | np.ndarray
 | 
			
		||||
    :type atlbrs: list[tlbr] | np.ndarray
 | 
			
		||||
 | 
			
		||||
    :rtype ious np.ndarray
 | 
			
		||||
    """
 | 
			
		||||
    ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32)
 | 
			
		||||
    if ious.size == 0:
 | 
			
		||||
        return ious
 | 
			
		||||
 | 
			
		||||
    ious = bbox_ious(
 | 
			
		||||
        np.ascontiguousarray(atlbrs, dtype=np.float32),
 | 
			
		||||
        np.ascontiguousarray(btlbrs, dtype=np.float32)
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    return ious
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def tlbr_expand(tlbr, scale=1.2):
 | 
			
		||||
    w = tlbr[2] - tlbr[0]
 | 
			
		||||
    h = tlbr[3] - tlbr[1]
 | 
			
		||||
 | 
			
		||||
    half_scale = 0.5 * scale
 | 
			
		||||
 | 
			
		||||
    tlbr[0] -= half_scale * w
 | 
			
		||||
    tlbr[1] -= half_scale * h
 | 
			
		||||
    tlbr[2] += half_scale * w
 | 
			
		||||
    tlbr[3] += half_scale * h
 | 
			
		||||
 | 
			
		||||
    return tlbr
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def iou_distance(atracks, btracks):
 | 
			
		||||
    """
 | 
			
		||||
    Compute cost based on IoU
 | 
			
		||||
    :type atracks: list[STrack]
 | 
			
		||||
    :type btracks: list[STrack]
 | 
			
		||||
 | 
			
		||||
    :rtype cost_matrix np.ndarray
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
 | 
			
		||||
        atlbrs = atracks
 | 
			
		||||
        btlbrs = btracks
 | 
			
		||||
    else:
 | 
			
		||||
        atlbrs = [track.tlbr for track in atracks]
 | 
			
		||||
        btlbrs = [track.tlbr for track in btracks]
 | 
			
		||||
    _ious = ious(atlbrs, btlbrs)
 | 
			
		||||
    cost_matrix = 1 - _ious
 | 
			
		||||
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def v_iou_distance(atracks, btracks):
 | 
			
		||||
    """
 | 
			
		||||
    Compute cost based on IoU
 | 
			
		||||
    :type atracks: list[STrack]
 | 
			
		||||
    :type btracks: list[STrack]
 | 
			
		||||
 | 
			
		||||
    :rtype cost_matrix np.ndarray
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
 | 
			
		||||
        atlbrs = atracks
 | 
			
		||||
        btlbrs = btracks
 | 
			
		||||
    else:
 | 
			
		||||
        atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks]
 | 
			
		||||
        btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks]
 | 
			
		||||
    _ious = ious(atlbrs, btlbrs)
 | 
			
		||||
    cost_matrix = 1 - _ious
 | 
			
		||||
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def embedding_distance(tracks, detections, metric='cosine'):
 | 
			
		||||
    """
 | 
			
		||||
    :param tracks: list[STrack]
 | 
			
		||||
    :param detections: list[BaseTrack]
 | 
			
		||||
    :param metric:
 | 
			
		||||
    :return: cost_matrix np.ndarray
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32)
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
    det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32)
 | 
			
		||||
    track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32)
 | 
			
		||||
 | 
			
		||||
    cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric))  # / 2.0  # Nomalized features
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False):
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
    gating_dim = 2 if only_position else 4
 | 
			
		||||
    gating_threshold = kalman_filter.chi2inv95[gating_dim]
 | 
			
		||||
    # measurements = np.asarray([det.to_xyah() for det in detections])
 | 
			
		||||
    measurements = np.asarray([det.to_xywh() for det in detections])
 | 
			
		||||
    for row, track in enumerate(tracks):
 | 
			
		||||
        gating_distance = kf.gating_distance(
 | 
			
		||||
            track.mean, track.covariance, measurements, only_position)
 | 
			
		||||
        cost_matrix[row, gating_distance > gating_threshold] = np.inf
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98):
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
    gating_dim = 2 if only_position else 4
 | 
			
		||||
    gating_threshold = kalman_filter.chi2inv95[gating_dim]
 | 
			
		||||
    # measurements = np.asarray([det.to_xyah() for det in detections])
 | 
			
		||||
    measurements = np.asarray([det.to_xywh() for det in detections])
 | 
			
		||||
    for row, track in enumerate(tracks):
 | 
			
		||||
        gating_distance = kf.gating_distance(
 | 
			
		||||
            track.mean, track.covariance, measurements, only_position, metric='maha')
 | 
			
		||||
        cost_matrix[row, gating_distance > gating_threshold] = np.inf
 | 
			
		||||
        cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fuse_iou(cost_matrix, tracks, detections):
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
    reid_sim = 1 - cost_matrix
 | 
			
		||||
    iou_dist = iou_distance(tracks, detections)
 | 
			
		||||
    iou_sim = 1 - iou_dist
 | 
			
		||||
    fuse_sim = reid_sim * (1 + iou_sim) / 2
 | 
			
		||||
    det_scores = np.array([det.score for det in detections])
 | 
			
		||||
    det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0)
 | 
			
		||||
    #fuse_sim = fuse_sim * (1 + det_scores) / 2
 | 
			
		||||
    fuse_cost = 1 - fuse_sim
 | 
			
		||||
    return fuse_cost
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fuse_score(cost_matrix, detections):
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
    iou_sim = 1 - cost_matrix
 | 
			
		||||
    det_scores = np.array([det.score for det in detections])
 | 
			
		||||
    det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0)
 | 
			
		||||
    fuse_sim = iou_sim * det_scores
 | 
			
		||||
    fuse_cost = 1 - fuse_sim
 | 
			
		||||
    return fuse_cost
 | 
			
		||||
 | 
			
		||||
def bbox_ious(boxes, query_boxes):
 | 
			
		||||
    """
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    boxes: (N, 4) ndarray of float
 | 
			
		||||
    query_boxes: (K, 4) ndarray of float
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    overlaps: (N, K) ndarray of overlap between boxes and query_boxes
 | 
			
		||||
    """
 | 
			
		||||
    N = boxes.shape[0]
 | 
			
		||||
    K = query_boxes.shape[0]
 | 
			
		||||
    overlaps = np.zeros((N, K), dtype=np.float32)
 | 
			
		||||
    
 | 
			
		||||
    for k in range(K):
 | 
			
		||||
        box_area = (
 | 
			
		||||
            (query_boxes[k, 2] - query_boxes[k, 0] + 1) *
 | 
			
		||||
            (query_boxes[k, 3] - query_boxes[k, 1] + 1)
 | 
			
		||||
        )
 | 
			
		||||
        for n in range(N):
 | 
			
		||||
            iw = (
 | 
			
		||||
                min(boxes[n, 2], query_boxes[k, 2]) -
 | 
			
		||||
                max(boxes[n, 0], query_boxes[k, 0]) + 1
 | 
			
		||||
            )
 | 
			
		||||
            if iw > 0:
 | 
			
		||||
                ih = (
 | 
			
		||||
                    min(boxes[n, 3], query_boxes[k, 3]) -
 | 
			
		||||
                    max(boxes[n, 1], query_boxes[k, 1]) + 1
 | 
			
		||||
                )
 | 
			
		||||
                if ih > 0:
 | 
			
		||||
                    ua = float(
 | 
			
		||||
                        (boxes[n, 2] - boxes[n, 0] + 1) *
 | 
			
		||||
                        (boxes[n, 3] - boxes[n, 1] + 1) +
 | 
			
		||||
                        box_area - iw * ih
 | 
			
		||||
                    )
 | 
			
		||||
                    overlaps[n, k] = iw * ih / ua
 | 
			
		||||
    return overlaps
 | 
			
		||||
							
								
								
									
										52
									
								
								feeder/trackers/bytetrack/basetrack.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										52
									
								
								feeder/trackers/bytetrack/basetrack.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,52 @@
 | 
			
		|||
import numpy as np
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TrackState(object):
 | 
			
		||||
    New = 0
 | 
			
		||||
    Tracked = 1
 | 
			
		||||
    Lost = 2
 | 
			
		||||
    Removed = 3
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BaseTrack(object):
 | 
			
		||||
    _count = 0
 | 
			
		||||
 | 
			
		||||
    track_id = 0
 | 
			
		||||
    is_activated = False
 | 
			
		||||
    state = TrackState.New
 | 
			
		||||
 | 
			
		||||
    history = OrderedDict()
 | 
			
		||||
    features = []
 | 
			
		||||
    curr_feature = None
 | 
			
		||||
    score = 0
 | 
			
		||||
    start_frame = 0
 | 
			
		||||
    frame_id = 0
 | 
			
		||||
    time_since_update = 0
 | 
			
		||||
 | 
			
		||||
    # multi-camera
 | 
			
		||||
    location = (np.inf, np.inf)
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def end_frame(self):
 | 
			
		||||
        return self.frame_id
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def next_id():
 | 
			
		||||
        BaseTrack._count += 1
 | 
			
		||||
        return BaseTrack._count
 | 
			
		||||
 | 
			
		||||
    def activate(self, *args):
 | 
			
		||||
        raise NotImplementedError
 | 
			
		||||
 | 
			
		||||
    def predict(self):
 | 
			
		||||
        raise NotImplementedError
 | 
			
		||||
 | 
			
		||||
    def update(self, *args, **kwargs):
 | 
			
		||||
        raise NotImplementedError
 | 
			
		||||
 | 
			
		||||
    def mark_lost(self):
 | 
			
		||||
        self.state = TrackState.Lost
 | 
			
		||||
 | 
			
		||||
    def mark_removed(self):
 | 
			
		||||
        self.state = TrackState.Removed
 | 
			
		||||
							
								
								
									
										348
									
								
								feeder/trackers/bytetrack/byte_tracker.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										348
									
								
								feeder/trackers/bytetrack/byte_tracker.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,348 @@
 | 
			
		|||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from ultralytics.yolo.utils.ops import xywh2xyxy, xyxy2xywh
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
from trackers.bytetrack.kalman_filter import KalmanFilter
 | 
			
		||||
from trackers.bytetrack import matching
 | 
			
		||||
from trackers.bytetrack.basetrack import BaseTrack, TrackState
 | 
			
		||||
 | 
			
		||||
class STrack(BaseTrack):
 | 
			
		||||
    shared_kalman = KalmanFilter()
 | 
			
		||||
    def __init__(self, tlwh, score, cls):
 | 
			
		||||
 | 
			
		||||
        # wait activate
 | 
			
		||||
        self._tlwh = np.asarray(tlwh, dtype=np.float32)
 | 
			
		||||
        self.kalman_filter = None
 | 
			
		||||
        self.mean, self.covariance = None, None
 | 
			
		||||
        self.is_activated = False
 | 
			
		||||
 | 
			
		||||
        self.score = score
 | 
			
		||||
        self.tracklet_len = 0
 | 
			
		||||
        self.cls = cls
 | 
			
		||||
 | 
			
		||||
    def predict(self):
 | 
			
		||||
        mean_state = self.mean.copy()
 | 
			
		||||
        if self.state != TrackState.Tracked:
 | 
			
		||||
            mean_state[7] = 0
 | 
			
		||||
        self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def multi_predict(stracks):
 | 
			
		||||
        if len(stracks) > 0:
 | 
			
		||||
            multi_mean = np.asarray([st.mean.copy() for st in stracks])
 | 
			
		||||
            multi_covariance = np.asarray([st.covariance for st in stracks])
 | 
			
		||||
            for i, st in enumerate(stracks):
 | 
			
		||||
                if st.state != TrackState.Tracked:
 | 
			
		||||
                    multi_mean[i][7] = 0
 | 
			
		||||
            multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
 | 
			
		||||
            for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
 | 
			
		||||
                stracks[i].mean = mean
 | 
			
		||||
                stracks[i].covariance = cov
 | 
			
		||||
 | 
			
		||||
    def activate(self, kalman_filter, frame_id):
 | 
			
		||||
        """Start a new tracklet"""
 | 
			
		||||
        self.kalman_filter = kalman_filter
 | 
			
		||||
        self.track_id = self.next_id()
 | 
			
		||||
        self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
 | 
			
		||||
 | 
			
		||||
        self.tracklet_len = 0
 | 
			
		||||
        self.state = TrackState.Tracked
 | 
			
		||||
        if frame_id == 1:
 | 
			
		||||
            self.is_activated = True
 | 
			
		||||
        # self.is_activated = True
 | 
			
		||||
        self.frame_id = frame_id
 | 
			
		||||
        self.start_frame = frame_id
 | 
			
		||||
 | 
			
		||||
    def re_activate(self, new_track, frame_id, new_id=False):
 | 
			
		||||
        self.mean, self.covariance = self.kalman_filter.update(
 | 
			
		||||
            self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
 | 
			
		||||
        )
 | 
			
		||||
        self.tracklet_len = 0
 | 
			
		||||
        self.state = TrackState.Tracked
 | 
			
		||||
        self.is_activated = True
 | 
			
		||||
        self.frame_id = frame_id
 | 
			
		||||
        if new_id:
 | 
			
		||||
            self.track_id = self.next_id()
 | 
			
		||||
        self.score = new_track.score
 | 
			
		||||
        self.cls = new_track.cls
 | 
			
		||||
 | 
			
		||||
    def update(self, new_track, frame_id):
 | 
			
		||||
        """
 | 
			
		||||
        Update a matched track
 | 
			
		||||
        :type new_track: STrack
 | 
			
		||||
        :type frame_id: int
 | 
			
		||||
        :type update_feature: bool
 | 
			
		||||
        :return:
 | 
			
		||||
        """
 | 
			
		||||
        self.frame_id = frame_id
 | 
			
		||||
        self.tracklet_len += 1
 | 
			
		||||
        # self.cls = cls
 | 
			
		||||
 | 
			
		||||
        new_tlwh = new_track.tlwh
 | 
			
		||||
        self.mean, self.covariance = self.kalman_filter.update(
 | 
			
		||||
            self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
 | 
			
		||||
        self.state = TrackState.Tracked
 | 
			
		||||
        self.is_activated = True
 | 
			
		||||
 | 
			
		||||
        self.score = new_track.score
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    # @jit(nopython=True)
 | 
			
		||||
    def tlwh(self):
 | 
			
		||||
        """Get current position in bounding box format `(top left x, top left y,
 | 
			
		||||
                width, height)`.
 | 
			
		||||
        """
 | 
			
		||||
        if self.mean is None:
 | 
			
		||||
            return self._tlwh.copy()
 | 
			
		||||
        ret = self.mean[:4].copy()
 | 
			
		||||
        ret[2] *= ret[3]
 | 
			
		||||
        ret[:2] -= ret[2:] / 2
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    # @jit(nopython=True)
 | 
			
		||||
    def tlbr(self):
 | 
			
		||||
        """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
 | 
			
		||||
        `(top left, bottom right)`.
 | 
			
		||||
        """
 | 
			
		||||
        ret = self.tlwh.copy()
 | 
			
		||||
        ret[2:] += ret[:2]
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    # @jit(nopython=True)
 | 
			
		||||
    def tlwh_to_xyah(tlwh):
 | 
			
		||||
        """Convert bounding box to format `(center x, center y, aspect ratio,
 | 
			
		||||
        height)`, where the aspect ratio is `width / height`.
 | 
			
		||||
        """
 | 
			
		||||
        ret = np.asarray(tlwh).copy()
 | 
			
		||||
        ret[:2] += ret[2:] / 2
 | 
			
		||||
        ret[2] /= ret[3]
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    def to_xyah(self):
 | 
			
		||||
        return self.tlwh_to_xyah(self.tlwh)
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    # @jit(nopython=True)
 | 
			
		||||
    def tlbr_to_tlwh(tlbr):
 | 
			
		||||
        ret = np.asarray(tlbr).copy()
 | 
			
		||||
        ret[2:] -= ret[:2]
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    # @jit(nopython=True)
 | 
			
		||||
    def tlwh_to_tlbr(tlwh):
 | 
			
		||||
        ret = np.asarray(tlwh).copy()
 | 
			
		||||
        ret[2:] += ret[:2]
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BYTETracker(object):
 | 
			
		||||
    def __init__(self, track_thresh=0.45, match_thresh=0.8, track_buffer=25, frame_rate=30):
 | 
			
		||||
        self.tracked_stracks = []  # type: list[STrack]
 | 
			
		||||
        self.lost_stracks = []  # type: list[STrack]
 | 
			
		||||
        self.removed_stracks = []  # type: list[STrack]
 | 
			
		||||
 | 
			
		||||
        self.frame_id = 0
 | 
			
		||||
        self.track_buffer=track_buffer
 | 
			
		||||
        
 | 
			
		||||
        self.track_thresh = track_thresh
 | 
			
		||||
        self.match_thresh = match_thresh
 | 
			
		||||
        self.det_thresh = track_thresh + 0.1
 | 
			
		||||
        self.buffer_size = int(frame_rate / 30.0 * track_buffer)
 | 
			
		||||
        self.max_time_lost = self.buffer_size
 | 
			
		||||
        self.kalman_filter = KalmanFilter()
 | 
			
		||||
 | 
			
		||||
    def update(self, dets, _):
 | 
			
		||||
        self.frame_id += 1
 | 
			
		||||
        activated_starcks = []
 | 
			
		||||
        refind_stracks = []
 | 
			
		||||
        lost_stracks = []
 | 
			
		||||
        removed_stracks = []
 | 
			
		||||
 | 
			
		||||
        xyxys = dets[:, 0:4]
 | 
			
		||||
        xywh = xyxy2xywh(xyxys.numpy())
 | 
			
		||||
        confs = dets[:, 4]
 | 
			
		||||
        clss = dets[:, 5]
 | 
			
		||||
        
 | 
			
		||||
        classes = clss.numpy()
 | 
			
		||||
        xyxys = xyxys.numpy()
 | 
			
		||||
        confs = confs.numpy()
 | 
			
		||||
 | 
			
		||||
        remain_inds = confs > self.track_thresh
 | 
			
		||||
        inds_low = confs > 0.1
 | 
			
		||||
        inds_high = confs < self.track_thresh
 | 
			
		||||
 | 
			
		||||
        inds_second = np.logical_and(inds_low, inds_high)
 | 
			
		||||
        
 | 
			
		||||
        dets_second = xywh[inds_second]
 | 
			
		||||
        dets = xywh[remain_inds]
 | 
			
		||||
        
 | 
			
		||||
        scores_keep = confs[remain_inds]
 | 
			
		||||
        scores_second = confs[inds_second]
 | 
			
		||||
        
 | 
			
		||||
        clss_keep = classes[remain_inds]
 | 
			
		||||
        clss_second = classes[inds_second]
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
        if len(dets) > 0:
 | 
			
		||||
            '''Detections'''
 | 
			
		||||
            detections = [STrack(xyxy, s, c) for 
 | 
			
		||||
                (xyxy, s, c) in zip(dets, scores_keep, clss_keep)]
 | 
			
		||||
        else:
 | 
			
		||||
            detections = []
 | 
			
		||||
 | 
			
		||||
        ''' Add newly detected tracklets to tracked_stracks'''
 | 
			
		||||
        unconfirmed = []
 | 
			
		||||
        tracked_stracks = []  # type: list[STrack]
 | 
			
		||||
        for track in self.tracked_stracks:
 | 
			
		||||
            if not track.is_activated:
 | 
			
		||||
                unconfirmed.append(track)
 | 
			
		||||
            else:
 | 
			
		||||
                tracked_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        ''' Step 2: First association, with high score detection boxes'''
 | 
			
		||||
        strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
 | 
			
		||||
        # Predict the current location with KF
 | 
			
		||||
        STrack.multi_predict(strack_pool)
 | 
			
		||||
        dists = matching.iou_distance(strack_pool, detections)
 | 
			
		||||
        #if not self.args.mot20:
 | 
			
		||||
        dists = matching.fuse_score(dists, detections)
 | 
			
		||||
        matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.match_thresh)
 | 
			
		||||
 | 
			
		||||
        for itracked, idet in matches:
 | 
			
		||||
            track = strack_pool[itracked]
 | 
			
		||||
            det = detections[idet]
 | 
			
		||||
            if track.state == TrackState.Tracked:
 | 
			
		||||
                track.update(detections[idet], self.frame_id)
 | 
			
		||||
                activated_starcks.append(track)
 | 
			
		||||
            else:
 | 
			
		||||
                track.re_activate(det, self.frame_id, new_id=False)
 | 
			
		||||
                refind_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        ''' Step 3: Second association, with low score detection boxes'''
 | 
			
		||||
        # association the untrack to the low score detections
 | 
			
		||||
        if len(dets_second) > 0:
 | 
			
		||||
            '''Detections'''
 | 
			
		||||
            detections_second = [STrack(xywh, s, c) for (xywh, s, c) in zip(dets_second, scores_second, clss_second)]
 | 
			
		||||
        else:
 | 
			
		||||
            detections_second = []
 | 
			
		||||
        r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
 | 
			
		||||
        dists = matching.iou_distance(r_tracked_stracks, detections_second)
 | 
			
		||||
        matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)
 | 
			
		||||
        for itracked, idet in matches:
 | 
			
		||||
            track = r_tracked_stracks[itracked]
 | 
			
		||||
            det = detections_second[idet]
 | 
			
		||||
            if track.state == TrackState.Tracked:
 | 
			
		||||
                track.update(det, self.frame_id)
 | 
			
		||||
                activated_starcks.append(track)
 | 
			
		||||
            else:
 | 
			
		||||
                track.re_activate(det, self.frame_id, new_id=False)
 | 
			
		||||
                refind_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        for it in u_track:
 | 
			
		||||
            track = r_tracked_stracks[it]
 | 
			
		||||
            if not track.state == TrackState.Lost:
 | 
			
		||||
                track.mark_lost()
 | 
			
		||||
                lost_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        '''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
 | 
			
		||||
        detections = [detections[i] for i in u_detection]
 | 
			
		||||
        dists = matching.iou_distance(unconfirmed, detections)
 | 
			
		||||
        #if not self.args.mot20:
 | 
			
		||||
        dists = matching.fuse_score(dists, detections)
 | 
			
		||||
        matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
 | 
			
		||||
        for itracked, idet in matches:
 | 
			
		||||
            unconfirmed[itracked].update(detections[idet], self.frame_id)
 | 
			
		||||
            activated_starcks.append(unconfirmed[itracked])
 | 
			
		||||
        for it in u_unconfirmed:
 | 
			
		||||
            track = unconfirmed[it]
 | 
			
		||||
            track.mark_removed()
 | 
			
		||||
            removed_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        """ Step 4: Init new stracks"""
 | 
			
		||||
        for inew in u_detection:
 | 
			
		||||
            track = detections[inew]
 | 
			
		||||
            if track.score < self.det_thresh:
 | 
			
		||||
                continue
 | 
			
		||||
            track.activate(self.kalman_filter, self.frame_id)
 | 
			
		||||
            activated_starcks.append(track)
 | 
			
		||||
        """ Step 5: Update state"""
 | 
			
		||||
        for track in self.lost_stracks:
 | 
			
		||||
            if self.frame_id - track.end_frame > self.max_time_lost:
 | 
			
		||||
                track.mark_removed()
 | 
			
		||||
                removed_stracks.append(track)
 | 
			
		||||
 | 
			
		||||
        # print('Ramained match {} s'.format(t4-t3))
 | 
			
		||||
 | 
			
		||||
        self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
 | 
			
		||||
        self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
 | 
			
		||||
        self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
 | 
			
		||||
        self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
 | 
			
		||||
        self.lost_stracks.extend(lost_stracks)
 | 
			
		||||
        self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
 | 
			
		||||
        self.removed_stracks.extend(removed_stracks)
 | 
			
		||||
        self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
 | 
			
		||||
        # get scores of lost tracks
 | 
			
		||||
        output_stracks = [track for track in self.tracked_stracks if track.is_activated]
 | 
			
		||||
        outputs = []
 | 
			
		||||
        for t in output_stracks:
 | 
			
		||||
            output= []
 | 
			
		||||
            tlwh = t.tlwh
 | 
			
		||||
            tid = t.track_id
 | 
			
		||||
            tlwh = np.expand_dims(tlwh, axis=0)
 | 
			
		||||
            xyxy = xywh2xyxy(tlwh)
 | 
			
		||||
            xyxy = np.squeeze(xyxy, axis=0)
 | 
			
		||||
            output.extend(xyxy)
 | 
			
		||||
            output.append(tid)
 | 
			
		||||
            output.append(t.cls)
 | 
			
		||||
            output.append(t.score)
 | 
			
		||||
            outputs.append(output)
 | 
			
		||||
 | 
			
		||||
        return outputs
 | 
			
		||||
#track_id, class_id, conf
 | 
			
		||||
 | 
			
		||||
def joint_stracks(tlista, tlistb):
 | 
			
		||||
    exists = {}
 | 
			
		||||
    res = []
 | 
			
		||||
    for t in tlista:
 | 
			
		||||
        exists[t.track_id] = 1
 | 
			
		||||
        res.append(t)
 | 
			
		||||
    for t in tlistb:
 | 
			
		||||
        tid = t.track_id
 | 
			
		||||
        if not exists.get(tid, 0):
 | 
			
		||||
            exists[tid] = 1
 | 
			
		||||
            res.append(t)
 | 
			
		||||
    return res
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sub_stracks(tlista, tlistb):
 | 
			
		||||
    stracks = {}
 | 
			
		||||
    for t in tlista:
 | 
			
		||||
        stracks[t.track_id] = t
 | 
			
		||||
    for t in tlistb:
 | 
			
		||||
        tid = t.track_id
 | 
			
		||||
        if stracks.get(tid, 0):
 | 
			
		||||
            del stracks[tid]
 | 
			
		||||
    return list(stracks.values())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def remove_duplicate_stracks(stracksa, stracksb):
 | 
			
		||||
    pdist = matching.iou_distance(stracksa, stracksb)
 | 
			
		||||
    pairs = np.where(pdist < 0.15)
 | 
			
		||||
    dupa, dupb = list(), list()
 | 
			
		||||
    for p, q in zip(*pairs):
 | 
			
		||||
        timep = stracksa[p].frame_id - stracksa[p].start_frame
 | 
			
		||||
        timeq = stracksb[q].frame_id - stracksb[q].start_frame
 | 
			
		||||
        if timep > timeq:
 | 
			
		||||
            dupb.append(q)
 | 
			
		||||
        else:
 | 
			
		||||
            dupa.append(p)
 | 
			
		||||
    resa = [t for i, t in enumerate(stracksa) if not i in dupa]
 | 
			
		||||
    resb = [t for i, t in enumerate(stracksb) if not i in dupb]
 | 
			
		||||
    return resa, resb
 | 
			
		||||
							
								
								
									
										7
									
								
								feeder/trackers/bytetrack/configs/bytetrack.yaml
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								feeder/trackers/bytetrack/configs/bytetrack.yaml
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,7 @@
 | 
			
		|||
bytetrack:
 | 
			
		||||
  track_thresh: 0.6  # tracking confidence threshold
 | 
			
		||||
  track_buffer: 30   # the frames for keep lost tracks
 | 
			
		||||
  match_thresh: 0.8  # matching threshold for tracking
 | 
			
		||||
  frame_rate: 30     # FPS
 | 
			
		||||
  conf_thres: 0.5122620708221085
 | 
			
		||||
  
 | 
			
		||||
							
								
								
									
										270
									
								
								feeder/trackers/bytetrack/kalman_filter.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										270
									
								
								feeder/trackers/bytetrack/kalman_filter.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,270 @@
 | 
			
		|||
# vim: expandtab:ts=4:sw=4
 | 
			
		||||
import numpy as np
 | 
			
		||||
import scipy.linalg
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
Table for the 0.95 quantile of the chi-square distribution with N degrees of
 | 
			
		||||
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
 | 
			
		||||
function and used as Mahalanobis gating threshold.
 | 
			
		||||
"""
 | 
			
		||||
chi2inv95 = {
 | 
			
		||||
    1: 3.8415,
 | 
			
		||||
    2: 5.9915,
 | 
			
		||||
    3: 7.8147,
 | 
			
		||||
    4: 9.4877,
 | 
			
		||||
    5: 11.070,
 | 
			
		||||
    6: 12.592,
 | 
			
		||||
    7: 14.067,
 | 
			
		||||
    8: 15.507,
 | 
			
		||||
    9: 16.919}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class KalmanFilter(object):
 | 
			
		||||
    """
 | 
			
		||||
    A simple Kalman filter for tracking bounding boxes in image space.
 | 
			
		||||
 | 
			
		||||
    The 8-dimensional state space
 | 
			
		||||
 | 
			
		||||
        x, y, a, h, vx, vy, va, vh
 | 
			
		||||
 | 
			
		||||
    contains the bounding box center position (x, y), aspect ratio a, height h,
 | 
			
		||||
    and their respective velocities.
 | 
			
		||||
 | 
			
		||||
    Object motion follows a constant velocity model. The bounding box location
 | 
			
		||||
    (x, y, a, h) is taken as direct observation of the state space (linear
 | 
			
		||||
    observation model).
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        ndim, dt = 4, 1.
 | 
			
		||||
 | 
			
		||||
        # Create Kalman filter model matrices.
 | 
			
		||||
        self._motion_mat = np.eye(2 * ndim, 2 * ndim)
 | 
			
		||||
        for i in range(ndim):
 | 
			
		||||
            self._motion_mat[i, ndim + i] = dt
 | 
			
		||||
        self._update_mat = np.eye(ndim, 2 * ndim)
 | 
			
		||||
 | 
			
		||||
        # Motion and observation uncertainty are chosen relative to the current
 | 
			
		||||
        # state estimate. These weights control the amount of uncertainty in
 | 
			
		||||
        # the model. This is a bit hacky.
 | 
			
		||||
        self._std_weight_position = 1. / 20
 | 
			
		||||
        self._std_weight_velocity = 1. / 160
 | 
			
		||||
 | 
			
		||||
    def initiate(self, measurement):
 | 
			
		||||
        """Create track from unassociated measurement.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        measurement : ndarray
 | 
			
		||||
            Bounding box coordinates (x, y, a, h) with center position (x, y),
 | 
			
		||||
            aspect ratio a, and height h.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the mean vector (8 dimensional) and covariance matrix (8x8
 | 
			
		||||
            dimensional) of the new track. Unobserved velocities are initialized
 | 
			
		||||
            to 0 mean.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        mean_pos = measurement
 | 
			
		||||
        mean_vel = np.zeros_like(mean_pos)
 | 
			
		||||
        mean = np.r_[mean_pos, mean_vel]
 | 
			
		||||
 | 
			
		||||
        std = [
 | 
			
		||||
            2 * self._std_weight_position * measurement[3],
 | 
			
		||||
            2 * self._std_weight_position * measurement[3],
 | 
			
		||||
            1e-2,
 | 
			
		||||
            2 * self._std_weight_position * measurement[3],
 | 
			
		||||
            10 * self._std_weight_velocity * measurement[3],
 | 
			
		||||
            10 * self._std_weight_velocity * measurement[3],
 | 
			
		||||
            1e-5,
 | 
			
		||||
            10 * self._std_weight_velocity * measurement[3]]
 | 
			
		||||
        covariance = np.diag(np.square(std))
 | 
			
		||||
        return mean, covariance
 | 
			
		||||
 | 
			
		||||
    def predict(self, mean, covariance):
 | 
			
		||||
        """Run Kalman filter prediction step.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            The 8 dimensional mean vector of the object state at the previous
 | 
			
		||||
            time step.
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            The 8x8 dimensional covariance matrix of the object state at the
 | 
			
		||||
            previous time step.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the mean vector and covariance matrix of the predicted
 | 
			
		||||
            state. Unobserved velocities are initialized to 0 mean.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        std_pos = [
 | 
			
		||||
            self._std_weight_position * mean[3],
 | 
			
		||||
            self._std_weight_position * mean[3],
 | 
			
		||||
            1e-2,
 | 
			
		||||
            self._std_weight_position * mean[3]]
 | 
			
		||||
        std_vel = [
 | 
			
		||||
            self._std_weight_velocity * mean[3],
 | 
			
		||||
            self._std_weight_velocity * mean[3],
 | 
			
		||||
            1e-5,
 | 
			
		||||
            self._std_weight_velocity * mean[3]]
 | 
			
		||||
        motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
 | 
			
		||||
 | 
			
		||||
        #mean = np.dot(self._motion_mat, mean)
 | 
			
		||||
        mean = np.dot(mean, self._motion_mat.T)
 | 
			
		||||
        covariance = np.linalg.multi_dot((
 | 
			
		||||
            self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
 | 
			
		||||
 | 
			
		||||
        return mean, covariance
 | 
			
		||||
 | 
			
		||||
    def project(self, mean, covariance):
 | 
			
		||||
        """Project state distribution to measurement space.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            The state's mean vector (8 dimensional array).
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            The state's covariance matrix (8x8 dimensional).
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the projected mean and covariance matrix of the given state
 | 
			
		||||
            estimate.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        std = [
 | 
			
		||||
            self._std_weight_position * mean[3],
 | 
			
		||||
            self._std_weight_position * mean[3],
 | 
			
		||||
            1e-1,
 | 
			
		||||
            self._std_weight_position * mean[3]]
 | 
			
		||||
        innovation_cov = np.diag(np.square(std))
 | 
			
		||||
 | 
			
		||||
        mean = np.dot(self._update_mat, mean)
 | 
			
		||||
        covariance = np.linalg.multi_dot((
 | 
			
		||||
            self._update_mat, covariance, self._update_mat.T))
 | 
			
		||||
        return mean, covariance + innovation_cov
 | 
			
		||||
 | 
			
		||||
    def multi_predict(self, mean, covariance):
 | 
			
		||||
        """Run Kalman filter prediction step (Vectorized version).
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            The Nx8 dimensional mean matrix of the object states at the previous
 | 
			
		||||
            time step.
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            The Nx8x8 dimensional covariance matrics of the object states at the
 | 
			
		||||
            previous time step.
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the mean vector and covariance matrix of the predicted
 | 
			
		||||
            state. Unobserved velocities are initialized to 0 mean.
 | 
			
		||||
        """
 | 
			
		||||
        std_pos = [
 | 
			
		||||
            self._std_weight_position * mean[:, 3],
 | 
			
		||||
            self._std_weight_position * mean[:, 3],
 | 
			
		||||
            1e-2 * np.ones_like(mean[:, 3]),
 | 
			
		||||
            self._std_weight_position * mean[:, 3]]
 | 
			
		||||
        std_vel = [
 | 
			
		||||
            self._std_weight_velocity * mean[:, 3],
 | 
			
		||||
            self._std_weight_velocity * mean[:, 3],
 | 
			
		||||
            1e-5 * np.ones_like(mean[:, 3]),
 | 
			
		||||
            self._std_weight_velocity * mean[:, 3]]
 | 
			
		||||
        sqr = np.square(np.r_[std_pos, std_vel]).T
 | 
			
		||||
 | 
			
		||||
        motion_cov = []
 | 
			
		||||
        for i in range(len(mean)):
 | 
			
		||||
            motion_cov.append(np.diag(sqr[i]))
 | 
			
		||||
        motion_cov = np.asarray(motion_cov)
 | 
			
		||||
 | 
			
		||||
        mean = np.dot(mean, self._motion_mat.T)
 | 
			
		||||
        left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
 | 
			
		||||
        covariance = np.dot(left, self._motion_mat.T) + motion_cov
 | 
			
		||||
 | 
			
		||||
        return mean, covariance
 | 
			
		||||
 | 
			
		||||
    def update(self, mean, covariance, measurement):
 | 
			
		||||
        """Run Kalman filter correction step.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            The predicted state's mean vector (8 dimensional).
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            The state's covariance matrix (8x8 dimensional).
 | 
			
		||||
        measurement : ndarray
 | 
			
		||||
            The 4 dimensional measurement vector (x, y, a, h), where (x, y)
 | 
			
		||||
            is the center position, a the aspect ratio, and h the height of the
 | 
			
		||||
            bounding box.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the measurement-corrected state distribution.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        projected_mean, projected_cov = self.project(mean, covariance)
 | 
			
		||||
 | 
			
		||||
        chol_factor, lower = scipy.linalg.cho_factor(
 | 
			
		||||
            projected_cov, lower=True, check_finite=False)
 | 
			
		||||
        kalman_gain = scipy.linalg.cho_solve(
 | 
			
		||||
            (chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
 | 
			
		||||
            check_finite=False).T
 | 
			
		||||
        innovation = measurement - projected_mean
 | 
			
		||||
 | 
			
		||||
        new_mean = mean + np.dot(innovation, kalman_gain.T)
 | 
			
		||||
        new_covariance = covariance - np.linalg.multi_dot((
 | 
			
		||||
            kalman_gain, projected_cov, kalman_gain.T))
 | 
			
		||||
        return new_mean, new_covariance
 | 
			
		||||
 | 
			
		||||
    def gating_distance(self, mean, covariance, measurements,
 | 
			
		||||
                        only_position=False, metric='maha'):
 | 
			
		||||
        """Compute gating distance between state distribution and measurements.
 | 
			
		||||
        A suitable distance threshold can be obtained from `chi2inv95`. If
 | 
			
		||||
        `only_position` is False, the chi-square distribution has 4 degrees of
 | 
			
		||||
        freedom, otherwise 2.
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            Mean vector over the state distribution (8 dimensional).
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            Covariance of the state distribution (8x8 dimensional).
 | 
			
		||||
        measurements : ndarray
 | 
			
		||||
            An Nx4 dimensional matrix of N measurements, each in
 | 
			
		||||
            format (x, y, a, h) where (x, y) is the bounding box center
 | 
			
		||||
            position, a the aspect ratio, and h the height.
 | 
			
		||||
        only_position : Optional[bool]
 | 
			
		||||
            If True, distance computation is done with respect to the bounding
 | 
			
		||||
            box center position only.
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        ndarray
 | 
			
		||||
            Returns an array of length N, where the i-th element contains the
 | 
			
		||||
            squared Mahalanobis distance between (mean, covariance) and
 | 
			
		||||
            `measurements[i]`.
 | 
			
		||||
        """
 | 
			
		||||
        mean, covariance = self.project(mean, covariance)
 | 
			
		||||
        if only_position:
 | 
			
		||||
            mean, covariance = mean[:2], covariance[:2, :2]
 | 
			
		||||
            measurements = measurements[:, :2]
 | 
			
		||||
 | 
			
		||||
        d = measurements - mean
 | 
			
		||||
        if metric == 'gaussian':
 | 
			
		||||
            return np.sum(d * d, axis=1)
 | 
			
		||||
        elif metric == 'maha':
 | 
			
		||||
            cholesky_factor = np.linalg.cholesky(covariance)
 | 
			
		||||
            z = scipy.linalg.solve_triangular(
 | 
			
		||||
                cholesky_factor, d.T, lower=True, check_finite=False,
 | 
			
		||||
                overwrite_b=True)
 | 
			
		||||
            squared_maha = np.sum(z * z, axis=0)
 | 
			
		||||
            return squared_maha
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError('invalid distance metric')
 | 
			
		||||
							
								
								
									
										219
									
								
								feeder/trackers/bytetrack/matching.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										219
									
								
								feeder/trackers/bytetrack/matching.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,219 @@
 | 
			
		|||
import cv2
 | 
			
		||||
import numpy as np
 | 
			
		||||
import scipy
 | 
			
		||||
import lap
 | 
			
		||||
from scipy.spatial.distance import cdist
 | 
			
		||||
 | 
			
		||||
from trackers.bytetrack import kalman_filter
 | 
			
		||||
import time
 | 
			
		||||
 | 
			
		||||
def merge_matches(m1, m2, shape):
 | 
			
		||||
    O,P,Q = shape
 | 
			
		||||
    m1 = np.asarray(m1)
 | 
			
		||||
    m2 = np.asarray(m2)
 | 
			
		||||
 | 
			
		||||
    M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))
 | 
			
		||||
    M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))
 | 
			
		||||
 | 
			
		||||
    mask = M1*M2
 | 
			
		||||
    match = mask.nonzero()
 | 
			
		||||
    match = list(zip(match[0], match[1]))
 | 
			
		||||
    unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))
 | 
			
		||||
    unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))
 | 
			
		||||
 | 
			
		||||
    return match, unmatched_O, unmatched_Q
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _indices_to_matches(cost_matrix, indices, thresh):
 | 
			
		||||
    matched_cost = cost_matrix[tuple(zip(*indices))]
 | 
			
		||||
    matched_mask = (matched_cost <= thresh)
 | 
			
		||||
 | 
			
		||||
    matches = indices[matched_mask]
 | 
			
		||||
    unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0]))
 | 
			
		||||
    unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1]))
 | 
			
		||||
 | 
			
		||||
    return matches, unmatched_a, unmatched_b
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def linear_assignment(cost_matrix, thresh):
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
 | 
			
		||||
    matches, unmatched_a, unmatched_b = [], [], []
 | 
			
		||||
    cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
 | 
			
		||||
    for ix, mx in enumerate(x):
 | 
			
		||||
        if mx >= 0:
 | 
			
		||||
            matches.append([ix, mx])
 | 
			
		||||
    unmatched_a = np.where(x < 0)[0]
 | 
			
		||||
    unmatched_b = np.where(y < 0)[0]
 | 
			
		||||
    matches = np.asarray(matches)
 | 
			
		||||
    return matches, unmatched_a, unmatched_b
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ious(atlbrs, btlbrs):
 | 
			
		||||
    """
 | 
			
		||||
    Compute cost based on IoU
 | 
			
		||||
    :type atlbrs: list[tlbr] | np.ndarray
 | 
			
		||||
    :type atlbrs: list[tlbr] | np.ndarray
 | 
			
		||||
 | 
			
		||||
    :rtype ious np.ndarray
 | 
			
		||||
    """
 | 
			
		||||
    ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32)
 | 
			
		||||
    if ious.size == 0:
 | 
			
		||||
        return ious
 | 
			
		||||
 | 
			
		||||
    ious = bbox_ious(
 | 
			
		||||
        np.ascontiguousarray(atlbrs, dtype=np.float32),
 | 
			
		||||
        np.ascontiguousarray(btlbrs, dtype=np.float32)
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    return ious
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def iou_distance(atracks, btracks):
 | 
			
		||||
    """
 | 
			
		||||
    Compute cost based on IoU
 | 
			
		||||
    :type atracks: list[STrack]
 | 
			
		||||
    :type btracks: list[STrack]
 | 
			
		||||
 | 
			
		||||
    :rtype cost_matrix np.ndarray
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
 | 
			
		||||
        atlbrs = atracks
 | 
			
		||||
        btlbrs = btracks
 | 
			
		||||
    else:
 | 
			
		||||
        atlbrs = [track.tlbr for track in atracks]
 | 
			
		||||
        btlbrs = [track.tlbr for track in btracks]
 | 
			
		||||
    _ious = ious(atlbrs, btlbrs)
 | 
			
		||||
    cost_matrix = 1 - _ious
 | 
			
		||||
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
 | 
			
		||||
def v_iou_distance(atracks, btracks):
 | 
			
		||||
    """
 | 
			
		||||
    Compute cost based on IoU
 | 
			
		||||
    :type atracks: list[STrack]
 | 
			
		||||
    :type btracks: list[STrack]
 | 
			
		||||
 | 
			
		||||
    :rtype cost_matrix np.ndarray
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
 | 
			
		||||
        atlbrs = atracks
 | 
			
		||||
        btlbrs = btracks
 | 
			
		||||
    else:
 | 
			
		||||
        atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks]
 | 
			
		||||
        btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks]
 | 
			
		||||
    _ious = ious(atlbrs, btlbrs)
 | 
			
		||||
    cost_matrix = 1 - _ious
 | 
			
		||||
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
 | 
			
		||||
def embedding_distance(tracks, detections, metric='cosine'):
 | 
			
		||||
    """
 | 
			
		||||
    :param tracks: list[STrack]
 | 
			
		||||
    :param detections: list[BaseTrack]
 | 
			
		||||
    :param metric:
 | 
			
		||||
    :return: cost_matrix np.ndarray
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32)
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
    det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32)
 | 
			
		||||
    #for i, track in enumerate(tracks):
 | 
			
		||||
        #cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric))
 | 
			
		||||
    track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32)
 | 
			
		||||
    cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric))  # Nomalized features
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False):
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
    gating_dim = 2 if only_position else 4
 | 
			
		||||
    gating_threshold = kalman_filter.chi2inv95[gating_dim]
 | 
			
		||||
    measurements = np.asarray([det.to_xyah() for det in detections])
 | 
			
		||||
    for row, track in enumerate(tracks):
 | 
			
		||||
        gating_distance = kf.gating_distance(
 | 
			
		||||
            track.mean, track.covariance, measurements, only_position)
 | 
			
		||||
        cost_matrix[row, gating_distance > gating_threshold] = np.inf
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98):
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
    gating_dim = 2 if only_position else 4
 | 
			
		||||
    gating_threshold = kalman_filter.chi2inv95[gating_dim]
 | 
			
		||||
    measurements = np.asarray([det.to_xyah() for det in detections])
 | 
			
		||||
    for row, track in enumerate(tracks):
 | 
			
		||||
        gating_distance = kf.gating_distance(
 | 
			
		||||
            track.mean, track.covariance, measurements, only_position, metric='maha')
 | 
			
		||||
        cost_matrix[row, gating_distance > gating_threshold] = np.inf
 | 
			
		||||
        cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fuse_iou(cost_matrix, tracks, detections):
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
    reid_sim = 1 - cost_matrix
 | 
			
		||||
    iou_dist = iou_distance(tracks, detections)
 | 
			
		||||
    iou_sim = 1 - iou_dist
 | 
			
		||||
    fuse_sim = reid_sim * (1 + iou_sim) / 2
 | 
			
		||||
    det_scores = np.array([det.score for det in detections])
 | 
			
		||||
    det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0)
 | 
			
		||||
    #fuse_sim = fuse_sim * (1 + det_scores) / 2
 | 
			
		||||
    fuse_cost = 1 - fuse_sim
 | 
			
		||||
    return fuse_cost
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fuse_score(cost_matrix, detections):
 | 
			
		||||
    if cost_matrix.size == 0:
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
    iou_sim = 1 - cost_matrix
 | 
			
		||||
    det_scores = np.array([det.score for det in detections])
 | 
			
		||||
    det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0)
 | 
			
		||||
    fuse_sim = iou_sim * det_scores
 | 
			
		||||
    fuse_cost = 1 - fuse_sim
 | 
			
		||||
    return fuse_cost
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def bbox_ious(boxes, query_boxes):
 | 
			
		||||
    """
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    boxes: (N, 4) ndarray of float
 | 
			
		||||
    query_boxes: (K, 4) ndarray of float
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    overlaps: (N, K) ndarray of overlap between boxes and query_boxes
 | 
			
		||||
    """
 | 
			
		||||
    N = boxes.shape[0]
 | 
			
		||||
    K = query_boxes.shape[0]
 | 
			
		||||
    overlaps = np.zeros((N, K), dtype=np.float32)
 | 
			
		||||
    
 | 
			
		||||
    for k in range(K):
 | 
			
		||||
        box_area = (
 | 
			
		||||
            (query_boxes[k, 2] - query_boxes[k, 0] + 1) *
 | 
			
		||||
            (query_boxes[k, 3] - query_boxes[k, 1] + 1)
 | 
			
		||||
        )
 | 
			
		||||
        for n in range(N):
 | 
			
		||||
            iw = (
 | 
			
		||||
                min(boxes[n, 2], query_boxes[k, 2]) -
 | 
			
		||||
                max(boxes[n, 0], query_boxes[k, 0]) + 1
 | 
			
		||||
            )
 | 
			
		||||
            if iw > 0:
 | 
			
		||||
                ih = (
 | 
			
		||||
                    min(boxes[n, 3], query_boxes[k, 3]) -
 | 
			
		||||
                    max(boxes[n, 1], query_boxes[k, 1]) + 1
 | 
			
		||||
                )
 | 
			
		||||
                if ih > 0:
 | 
			
		||||
                    ua = float(
 | 
			
		||||
                        (boxes[n, 2] - boxes[n, 0] + 1) *
 | 
			
		||||
                        (boxes[n, 3] - boxes[n, 1] + 1) +
 | 
			
		||||
                        box_area - iw * ih
 | 
			
		||||
                    )
 | 
			
		||||
                    overlaps[n, k] = iw * ih / ua
 | 
			
		||||
    return overlaps
 | 
			
		||||
							
								
								
									
										2
									
								
								feeder/trackers/deepocsort/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								feeder/trackers/deepocsort/__init__.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,2 @@
 | 
			
		|||
from . import args
 | 
			
		||||
from . import ocsort
 | 
			
		||||
							
								
								
									
										110
									
								
								feeder/trackers/deepocsort/args.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										110
									
								
								feeder/trackers/deepocsort/args.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,110 @@
 | 
			
		|||
import argparse
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def make_parser():
 | 
			
		||||
    parser = argparse.ArgumentParser("OC-SORT parameters")
 | 
			
		||||
 | 
			
		||||
    # distributed
 | 
			
		||||
    parser.add_argument("-b", "--batch-size", type=int, default=1, help="batch size")
 | 
			
		||||
    parser.add_argument("-d", "--devices", default=None, type=int, help="device for training")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--local_rank", default=0, type=int, help="local rank for dist training")
 | 
			
		||||
    parser.add_argument("--num_machines", default=1, type=int, help="num of node for training")
 | 
			
		||||
    parser.add_argument("--machine_rank", default=0, type=int, help="node rank for multi-node training")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "-f",
 | 
			
		||||
        "--exp_file",
 | 
			
		||||
        default=None,
 | 
			
		||||
        type=str,
 | 
			
		||||
        help="pls input your expriment description file",
 | 
			
		||||
    )
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "--test",
 | 
			
		||||
        dest="test",
 | 
			
		||||
        default=False,
 | 
			
		||||
        action="store_true",
 | 
			
		||||
        help="Evaluating on test-dev set.",
 | 
			
		||||
    )
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "opts",
 | 
			
		||||
        help="Modify config options using the command-line",
 | 
			
		||||
        default=None,
 | 
			
		||||
        nargs=argparse.REMAINDER,
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    # det args
 | 
			
		||||
    parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
 | 
			
		||||
    parser.add_argument("--conf", default=0.1, type=float, help="test conf")
 | 
			
		||||
    parser.add_argument("--nms", default=0.7, type=float, help="test nms threshold")
 | 
			
		||||
    parser.add_argument("--tsize", default=[800, 1440], nargs="+", type=int, help="test img size")
 | 
			
		||||
    parser.add_argument("--seed", default=None, type=int, help="eval seed")
 | 
			
		||||
 | 
			
		||||
    # tracking args
 | 
			
		||||
    parser.add_argument("--track_thresh", type=float, default=0.6, help="detection confidence threshold")
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "--iou_thresh",
 | 
			
		||||
        type=float,
 | 
			
		||||
        default=0.3,
 | 
			
		||||
        help="the iou threshold in Sort for matching",
 | 
			
		||||
    )
 | 
			
		||||
    parser.add_argument("--min_hits", type=int, default=3, help="min hits to create track in SORT")
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "--inertia",
 | 
			
		||||
        type=float,
 | 
			
		||||
        default=0.2,
 | 
			
		||||
        help="the weight of VDC term in cost matrix",
 | 
			
		||||
    )
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "--deltat",
 | 
			
		||||
        type=int,
 | 
			
		||||
        default=3,
 | 
			
		||||
        help="time step difference to estimate direction",
 | 
			
		||||
    )
 | 
			
		||||
    parser.add_argument("--track_buffer", type=int, default=30, help="the frames for keep lost tracks")
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "--match_thresh",
 | 
			
		||||
        type=float,
 | 
			
		||||
        default=0.9,
 | 
			
		||||
        help="matching threshold for tracking",
 | 
			
		||||
    )
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "--gt-type",
 | 
			
		||||
        type=str,
 | 
			
		||||
        default="_val_half",
 | 
			
		||||
        help="suffix to find the gt annotation",
 | 
			
		||||
    )
 | 
			
		||||
    parser.add_argument("--public", action="store_true", help="use public detection")
 | 
			
		||||
    parser.add_argument("--asso", default="iou", help="similarity function: iou/giou/diou/ciou/ctdis")
 | 
			
		||||
 | 
			
		||||
    # for kitti/bdd100k inference with public detections
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "--raw_results_path",
 | 
			
		||||
        type=str,
 | 
			
		||||
        default="exps/permatrack_kitti_test/",
 | 
			
		||||
        help="path to the raw tracking results from other tracks",
 | 
			
		||||
    )
 | 
			
		||||
    parser.add_argument("--out_path", type=str, help="path to save output results")
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "--hp",
 | 
			
		||||
        action="store_true",
 | 
			
		||||
        help="use head padding to add the missing objects during \
 | 
			
		||||
            initializing the tracks (offline).",
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    # for demo video
 | 
			
		||||
    parser.add_argument("--demo_type", default="image", help="demo type, eg. image, video and webcam")
 | 
			
		||||
    parser.add_argument("--path", default="./videos/demo.mp4", help="path to images or video")
 | 
			
		||||
    parser.add_argument("--camid", type=int, default=0, help="webcam demo camera id")
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "--save_result",
 | 
			
		||||
        action="store_true",
 | 
			
		||||
        help="whether to save the inference result of image/video",
 | 
			
		||||
    )
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "--device",
 | 
			
		||||
        default="gpu",
 | 
			
		||||
        type=str,
 | 
			
		||||
        help="device to run our model, can either be cpu or gpu",
 | 
			
		||||
    )
 | 
			
		||||
    return parser
 | 
			
		||||
							
								
								
									
										445
									
								
								feeder/trackers/deepocsort/association.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										445
									
								
								feeder/trackers/deepocsort/association.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,445 @@
 | 
			
		|||
import os
 | 
			
		||||
import pdb
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
from scipy.special import softmax
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def iou_batch(bboxes1, bboxes2):
 | 
			
		||||
    """
 | 
			
		||||
    From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2]
 | 
			
		||||
    """
 | 
			
		||||
    bboxes2 = np.expand_dims(bboxes2, 0)
 | 
			
		||||
    bboxes1 = np.expand_dims(bboxes1, 1)
 | 
			
		||||
 | 
			
		||||
    xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
    w = np.maximum(0.0, xx2 - xx1)
 | 
			
		||||
    h = np.maximum(0.0, yy2 - yy1)
 | 
			
		||||
    wh = w * h
 | 
			
		||||
    o = wh / (
 | 
			
		||||
        (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])
 | 
			
		||||
        + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1])
 | 
			
		||||
        - wh
 | 
			
		||||
    )
 | 
			
		||||
    return o
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def giou_batch(bboxes1, bboxes2):
 | 
			
		||||
    """
 | 
			
		||||
    :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :return:
 | 
			
		||||
    """
 | 
			
		||||
    # for details should go to https://arxiv.org/pdf/1902.09630.pdf
 | 
			
		||||
    # ensure predict's bbox form
 | 
			
		||||
    bboxes2 = np.expand_dims(bboxes2, 0)
 | 
			
		||||
    bboxes1 = np.expand_dims(bboxes1, 1)
 | 
			
		||||
 | 
			
		||||
    xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
    w = np.maximum(0.0, xx2 - xx1)
 | 
			
		||||
    h = np.maximum(0.0, yy2 - yy1)
 | 
			
		||||
    wh = w * h
 | 
			
		||||
    iou = wh / (
 | 
			
		||||
        (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])
 | 
			
		||||
        + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1])
 | 
			
		||||
        - wh
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
    wc = xxc2 - xxc1
 | 
			
		||||
    hc = yyc2 - yyc1
 | 
			
		||||
    assert (wc > 0).all() and (hc > 0).all()
 | 
			
		||||
    area_enclose = wc * hc
 | 
			
		||||
    giou = iou - (area_enclose - wh) / area_enclose
 | 
			
		||||
    giou = (giou + 1.0) / 2.0  # resize from (-1,1) to (0,1)
 | 
			
		||||
    return giou
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def diou_batch(bboxes1, bboxes2):
 | 
			
		||||
    """
 | 
			
		||||
    :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :return:
 | 
			
		||||
    """
 | 
			
		||||
    # for details should go to https://arxiv.org/pdf/1902.09630.pdf
 | 
			
		||||
    # ensure predict's bbox form
 | 
			
		||||
    bboxes2 = np.expand_dims(bboxes2, 0)
 | 
			
		||||
    bboxes1 = np.expand_dims(bboxes1, 1)
 | 
			
		||||
 | 
			
		||||
    # calculate the intersection box
 | 
			
		||||
    xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
    w = np.maximum(0.0, xx2 - xx1)
 | 
			
		||||
    h = np.maximum(0.0, yy2 - yy1)
 | 
			
		||||
    wh = w * h
 | 
			
		||||
    iou = wh / (
 | 
			
		||||
        (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])
 | 
			
		||||
        + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1])
 | 
			
		||||
        - wh
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0
 | 
			
		||||
    centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0
 | 
			
		||||
    centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0
 | 
			
		||||
    centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0
 | 
			
		||||
 | 
			
		||||
    inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2
 | 
			
		||||
 | 
			
		||||
    xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
 | 
			
		||||
    outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2
 | 
			
		||||
    diou = iou - inner_diag / outer_diag
 | 
			
		||||
 | 
			
		||||
    return (diou + 1) / 2.0  # resize from (-1,1) to (0,1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ciou_batch(bboxes1, bboxes2):
 | 
			
		||||
    """
 | 
			
		||||
    :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :return:
 | 
			
		||||
    """
 | 
			
		||||
    # for details should go to https://arxiv.org/pdf/1902.09630.pdf
 | 
			
		||||
    # ensure predict's bbox form
 | 
			
		||||
    bboxes2 = np.expand_dims(bboxes2, 0)
 | 
			
		||||
    bboxes1 = np.expand_dims(bboxes1, 1)
 | 
			
		||||
 | 
			
		||||
    # calculate the intersection box
 | 
			
		||||
    xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
    w = np.maximum(0.0, xx2 - xx1)
 | 
			
		||||
    h = np.maximum(0.0, yy2 - yy1)
 | 
			
		||||
    wh = w * h
 | 
			
		||||
    iou = wh / (
 | 
			
		||||
        (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])
 | 
			
		||||
        + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1])
 | 
			
		||||
        - wh
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0
 | 
			
		||||
    centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0
 | 
			
		||||
    centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0
 | 
			
		||||
    centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0
 | 
			
		||||
 | 
			
		||||
    inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2
 | 
			
		||||
 | 
			
		||||
    xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
 | 
			
		||||
    outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2
 | 
			
		||||
 | 
			
		||||
    w1 = bboxes1[..., 2] - bboxes1[..., 0]
 | 
			
		||||
    h1 = bboxes1[..., 3] - bboxes1[..., 1]
 | 
			
		||||
    w2 = bboxes2[..., 2] - bboxes2[..., 0]
 | 
			
		||||
    h2 = bboxes2[..., 3] - bboxes2[..., 1]
 | 
			
		||||
 | 
			
		||||
    # prevent dividing over zero. add one pixel shift
 | 
			
		||||
    h2 = h2 + 1.0
 | 
			
		||||
    h1 = h1 + 1.0
 | 
			
		||||
    arctan = np.arctan(w2 / h2) - np.arctan(w1 / h1)
 | 
			
		||||
    v = (4 / (np.pi**2)) * (arctan**2)
 | 
			
		||||
    S = 1 - iou
 | 
			
		||||
    alpha = v / (S + v)
 | 
			
		||||
    ciou = iou - inner_diag / outer_diag - alpha * v
 | 
			
		||||
 | 
			
		||||
    return (ciou + 1) / 2.0  # resize from (-1,1) to (0,1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ct_dist(bboxes1, bboxes2):
 | 
			
		||||
    """
 | 
			
		||||
    Measure the center distance between two sets of bounding boxes,
 | 
			
		||||
    this is a coarse implementation, we don't recommend using it only
 | 
			
		||||
    for association, which can be unstable and sensitive to frame rate
 | 
			
		||||
    and object speed.
 | 
			
		||||
    """
 | 
			
		||||
    bboxes2 = np.expand_dims(bboxes2, 0)
 | 
			
		||||
    bboxes1 = np.expand_dims(bboxes1, 1)
 | 
			
		||||
 | 
			
		||||
    centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0
 | 
			
		||||
    centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0
 | 
			
		||||
    centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0
 | 
			
		||||
    centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0
 | 
			
		||||
 | 
			
		||||
    ct_dist2 = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2
 | 
			
		||||
 | 
			
		||||
    ct_dist = np.sqrt(ct_dist2)
 | 
			
		||||
 | 
			
		||||
    # The linear rescaling is a naive version and needs more study
 | 
			
		||||
    ct_dist = ct_dist / ct_dist.max()
 | 
			
		||||
    return ct_dist.max() - ct_dist  # resize to (0,1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def speed_direction_batch(dets, tracks):
 | 
			
		||||
    tracks = tracks[..., np.newaxis]
 | 
			
		||||
    CX1, CY1 = (dets[:, 0] + dets[:, 2]) / 2.0, (dets[:, 1] + dets[:, 3]) / 2.0
 | 
			
		||||
    CX2, CY2 = (tracks[:, 0] + tracks[:, 2]) / 2.0, (tracks[:, 1] + tracks[:, 3]) / 2.0
 | 
			
		||||
    dx = CX1 - CX2
 | 
			
		||||
    dy = CY1 - CY2
 | 
			
		||||
    norm = np.sqrt(dx**2 + dy**2) + 1e-6
 | 
			
		||||
    dx = dx / norm
 | 
			
		||||
    dy = dy / norm
 | 
			
		||||
    return dy, dx  # size: num_track x num_det
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def linear_assignment(cost_matrix):
 | 
			
		||||
    try:
 | 
			
		||||
        import lap
 | 
			
		||||
 | 
			
		||||
        _, x, y = lap.lapjv(cost_matrix, extend_cost=True)
 | 
			
		||||
        return np.array([[y[i], i] for i in x if i >= 0])  #
 | 
			
		||||
    except ImportError:
 | 
			
		||||
        from scipy.optimize import linear_sum_assignment
 | 
			
		||||
 | 
			
		||||
        x, y = linear_sum_assignment(cost_matrix)
 | 
			
		||||
        return np.array(list(zip(x, y)))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def associate_detections_to_trackers(detections, trackers, iou_threshold=0.3):
 | 
			
		||||
    """
 | 
			
		||||
    Assigns detections to tracked object (both represented as bounding boxes)
 | 
			
		||||
    Returns 3 lists of matches, unmatched_detections and unmatched_trackers
 | 
			
		||||
    """
 | 
			
		||||
    if len(trackers) == 0:
 | 
			
		||||
        return (
 | 
			
		||||
            np.empty((0, 2), dtype=int),
 | 
			
		||||
            np.arange(len(detections)),
 | 
			
		||||
            np.empty((0, 5), dtype=int),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    iou_matrix = iou_batch(detections, trackers)
 | 
			
		||||
 | 
			
		||||
    if min(iou_matrix.shape) > 0:
 | 
			
		||||
        a = (iou_matrix > iou_threshold).astype(np.int32)
 | 
			
		||||
        if a.sum(1).max() == 1 and a.sum(0).max() == 1:
 | 
			
		||||
            matched_indices = np.stack(np.where(a), axis=1)
 | 
			
		||||
        else:
 | 
			
		||||
            matched_indices = linear_assignment(-iou_matrix)
 | 
			
		||||
    else:
 | 
			
		||||
        matched_indices = np.empty(shape=(0, 2))
 | 
			
		||||
 | 
			
		||||
    unmatched_detections = []
 | 
			
		||||
    for d, det in enumerate(detections):
 | 
			
		||||
        if d not in matched_indices[:, 0]:
 | 
			
		||||
            unmatched_detections.append(d)
 | 
			
		||||
    unmatched_trackers = []
 | 
			
		||||
    for t, trk in enumerate(trackers):
 | 
			
		||||
        if t not in matched_indices[:, 1]:
 | 
			
		||||
            unmatched_trackers.append(t)
 | 
			
		||||
 | 
			
		||||
    # filter out matched with low IOU
 | 
			
		||||
    matches = []
 | 
			
		||||
    for m in matched_indices:
 | 
			
		||||
        if iou_matrix[m[0], m[1]] < iou_threshold:
 | 
			
		||||
            unmatched_detections.append(m[0])
 | 
			
		||||
            unmatched_trackers.append(m[1])
 | 
			
		||||
        else:
 | 
			
		||||
            matches.append(m.reshape(1, 2))
 | 
			
		||||
    if len(matches) == 0:
 | 
			
		||||
        matches = np.empty((0, 2), dtype=int)
 | 
			
		||||
    else:
 | 
			
		||||
        matches = np.concatenate(matches, axis=0)
 | 
			
		||||
 | 
			
		||||
    return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def compute_aw_max_metric(emb_cost, w_association_emb, bottom=0.5):
 | 
			
		||||
    w_emb = np.full_like(emb_cost, w_association_emb)
 | 
			
		||||
 | 
			
		||||
    for idx in range(emb_cost.shape[0]):
 | 
			
		||||
        inds = np.argsort(-emb_cost[idx])
 | 
			
		||||
        # If there's less than two matches, just keep original weight
 | 
			
		||||
        if len(inds) < 2:
 | 
			
		||||
            continue
 | 
			
		||||
        if emb_cost[idx, inds[0]] == 0:
 | 
			
		||||
            row_weight = 0
 | 
			
		||||
        else:
 | 
			
		||||
            row_weight = 1 - max((emb_cost[idx, inds[1]] / emb_cost[idx, inds[0]]) - bottom, 0) / (1 - bottom)
 | 
			
		||||
        w_emb[idx] *= row_weight
 | 
			
		||||
 | 
			
		||||
    for idj in range(emb_cost.shape[1]):
 | 
			
		||||
        inds = np.argsort(-emb_cost[:, idj])
 | 
			
		||||
        # If there's less than two matches, just keep original weight
 | 
			
		||||
        if len(inds) < 2:
 | 
			
		||||
            continue
 | 
			
		||||
        if emb_cost[inds[0], idj] == 0:
 | 
			
		||||
            col_weight = 0
 | 
			
		||||
        else:
 | 
			
		||||
            col_weight = 1 - max((emb_cost[inds[1], idj] / emb_cost[inds[0], idj]) - bottom, 0) / (1 - bottom)
 | 
			
		||||
        w_emb[:, idj] *= col_weight
 | 
			
		||||
 | 
			
		||||
    return w_emb * emb_cost
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def associate(
 | 
			
		||||
    detections, trackers, iou_threshold, velocities, previous_obs, vdc_weight, emb_cost, w_assoc_emb, aw_off, aw_param
 | 
			
		||||
):
 | 
			
		||||
    if len(trackers) == 0:
 | 
			
		||||
        return (
 | 
			
		||||
            np.empty((0, 2), dtype=int),
 | 
			
		||||
            np.arange(len(detections)),
 | 
			
		||||
            np.empty((0, 5), dtype=int),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    Y, X = speed_direction_batch(detections, previous_obs)
 | 
			
		||||
    inertia_Y, inertia_X = velocities[:, 0], velocities[:, 1]
 | 
			
		||||
    inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1)
 | 
			
		||||
    inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1)
 | 
			
		||||
    diff_angle_cos = inertia_X * X + inertia_Y * Y
 | 
			
		||||
    diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1)
 | 
			
		||||
    diff_angle = np.arccos(diff_angle_cos)
 | 
			
		||||
    diff_angle = (np.pi / 2.0 - np.abs(diff_angle)) / np.pi
 | 
			
		||||
 | 
			
		||||
    valid_mask = np.ones(previous_obs.shape[0])
 | 
			
		||||
    valid_mask[np.where(previous_obs[:, 4] < 0)] = 0
 | 
			
		||||
 | 
			
		||||
    iou_matrix = iou_batch(detections, trackers)
 | 
			
		||||
    scores = np.repeat(detections[:, -1][:, np.newaxis], trackers.shape[0], axis=1)
 | 
			
		||||
    # iou_matrix = iou_matrix * scores # a trick sometiems works, we don't encourage this
 | 
			
		||||
    valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1)
 | 
			
		||||
 | 
			
		||||
    angle_diff_cost = (valid_mask * diff_angle) * vdc_weight
 | 
			
		||||
    angle_diff_cost = angle_diff_cost.T
 | 
			
		||||
    angle_diff_cost = angle_diff_cost * scores
 | 
			
		||||
 | 
			
		||||
    if min(iou_matrix.shape) > 0:
 | 
			
		||||
        a = (iou_matrix > iou_threshold).astype(np.int32)
 | 
			
		||||
        if a.sum(1).max() == 1 and a.sum(0).max() == 1:
 | 
			
		||||
            matched_indices = np.stack(np.where(a), axis=1)
 | 
			
		||||
        else:
 | 
			
		||||
            if emb_cost is None:
 | 
			
		||||
                emb_cost = 0
 | 
			
		||||
            else:
 | 
			
		||||
                emb_cost = emb_cost.cpu().numpy()
 | 
			
		||||
                emb_cost[iou_matrix <= 0] = 0
 | 
			
		||||
                if not aw_off:
 | 
			
		||||
                    emb_cost = compute_aw_max_metric(emb_cost, w_assoc_emb, bottom=aw_param)
 | 
			
		||||
                else:
 | 
			
		||||
                    emb_cost *= w_assoc_emb
 | 
			
		||||
 | 
			
		||||
            final_cost = -(iou_matrix + angle_diff_cost + emb_cost)
 | 
			
		||||
            matched_indices = linear_assignment(final_cost)
 | 
			
		||||
    else:
 | 
			
		||||
        matched_indices = np.empty(shape=(0, 2))
 | 
			
		||||
 | 
			
		||||
    unmatched_detections = []
 | 
			
		||||
    for d, det in enumerate(detections):
 | 
			
		||||
        if d not in matched_indices[:, 0]:
 | 
			
		||||
            unmatched_detections.append(d)
 | 
			
		||||
    unmatched_trackers = []
 | 
			
		||||
    for t, trk in enumerate(trackers):
 | 
			
		||||
        if t not in matched_indices[:, 1]:
 | 
			
		||||
            unmatched_trackers.append(t)
 | 
			
		||||
 | 
			
		||||
    # filter out matched with low IOU
 | 
			
		||||
    matches = []
 | 
			
		||||
    for m in matched_indices:
 | 
			
		||||
        if iou_matrix[m[0], m[1]] < iou_threshold:
 | 
			
		||||
            unmatched_detections.append(m[0])
 | 
			
		||||
            unmatched_trackers.append(m[1])
 | 
			
		||||
        else:
 | 
			
		||||
            matches.append(m.reshape(1, 2))
 | 
			
		||||
    if len(matches) == 0:
 | 
			
		||||
        matches = np.empty((0, 2), dtype=int)
 | 
			
		||||
    else:
 | 
			
		||||
        matches = np.concatenate(matches, axis=0)
 | 
			
		||||
 | 
			
		||||
    return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def associate_kitti(detections, trackers, det_cates, iou_threshold, velocities, previous_obs, vdc_weight):
 | 
			
		||||
    if len(trackers) == 0:
 | 
			
		||||
        return (
 | 
			
		||||
            np.empty((0, 2), dtype=int),
 | 
			
		||||
            np.arange(len(detections)),
 | 
			
		||||
            np.empty((0, 5), dtype=int),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
        Cost from the velocity direction consistency
 | 
			
		||||
    """
 | 
			
		||||
    Y, X = speed_direction_batch(detections, previous_obs)
 | 
			
		||||
    inertia_Y, inertia_X = velocities[:, 0], velocities[:, 1]
 | 
			
		||||
    inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1)
 | 
			
		||||
    inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1)
 | 
			
		||||
    diff_angle_cos = inertia_X * X + inertia_Y * Y
 | 
			
		||||
    diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1)
 | 
			
		||||
    diff_angle = np.arccos(diff_angle_cos)
 | 
			
		||||
    diff_angle = (np.pi / 2.0 - np.abs(diff_angle)) / np.pi
 | 
			
		||||
 | 
			
		||||
    valid_mask = np.ones(previous_obs.shape[0])
 | 
			
		||||
    valid_mask[np.where(previous_obs[:, 4] < 0)] = 0
 | 
			
		||||
    valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1)
 | 
			
		||||
 | 
			
		||||
    scores = np.repeat(detections[:, -1][:, np.newaxis], trackers.shape[0], axis=1)
 | 
			
		||||
    angle_diff_cost = (valid_mask * diff_angle) * vdc_weight
 | 
			
		||||
    angle_diff_cost = angle_diff_cost.T
 | 
			
		||||
    angle_diff_cost = angle_diff_cost * scores
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
        Cost from IoU
 | 
			
		||||
    """
 | 
			
		||||
    iou_matrix = iou_batch(detections, trackers)
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
        With multiple categories, generate the cost for catgory mismatch
 | 
			
		||||
    """
 | 
			
		||||
    num_dets = detections.shape[0]
 | 
			
		||||
    num_trk = trackers.shape[0]
 | 
			
		||||
    cate_matrix = np.zeros((num_dets, num_trk))
 | 
			
		||||
    for i in range(num_dets):
 | 
			
		||||
        for j in range(num_trk):
 | 
			
		||||
            if det_cates[i] != trackers[j, 4]:
 | 
			
		||||
                cate_matrix[i][j] = -1e6
 | 
			
		||||
 | 
			
		||||
    cost_matrix = -iou_matrix - angle_diff_cost - cate_matrix
 | 
			
		||||
 | 
			
		||||
    if min(iou_matrix.shape) > 0:
 | 
			
		||||
        a = (iou_matrix > iou_threshold).astype(np.int32)
 | 
			
		||||
        if a.sum(1).max() == 1 and a.sum(0).max() == 1:
 | 
			
		||||
            matched_indices = np.stack(np.where(a), axis=1)
 | 
			
		||||
        else:
 | 
			
		||||
            matched_indices = linear_assignment(cost_matrix)
 | 
			
		||||
    else:
 | 
			
		||||
        matched_indices = np.empty(shape=(0, 2))
 | 
			
		||||
 | 
			
		||||
    unmatched_detections = []
 | 
			
		||||
    for d, det in enumerate(detections):
 | 
			
		||||
        if d not in matched_indices[:, 0]:
 | 
			
		||||
            unmatched_detections.append(d)
 | 
			
		||||
    unmatched_trackers = []
 | 
			
		||||
    for t, trk in enumerate(trackers):
 | 
			
		||||
        if t not in matched_indices[:, 1]:
 | 
			
		||||
            unmatched_trackers.append(t)
 | 
			
		||||
 | 
			
		||||
    # filter out matched with low IOU
 | 
			
		||||
    matches = []
 | 
			
		||||
    for m in matched_indices:
 | 
			
		||||
        if iou_matrix[m[0], m[1]] < iou_threshold:
 | 
			
		||||
            unmatched_detections.append(m[0])
 | 
			
		||||
            unmatched_trackers.append(m[1])
 | 
			
		||||
        else:
 | 
			
		||||
            matches.append(m.reshape(1, 2))
 | 
			
		||||
    if len(matches) == 0:
 | 
			
		||||
        matches = np.empty((0, 2), dtype=int)
 | 
			
		||||
    else:
 | 
			
		||||
        matches = np.concatenate(matches, axis=0)
 | 
			
		||||
 | 
			
		||||
    return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
 | 
			
		||||
							
								
								
									
										170
									
								
								feeder/trackers/deepocsort/cmc.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										170
									
								
								feeder/trackers/deepocsort/cmc.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,170 @@
 | 
			
		|||
import pdb
 | 
			
		||||
import pickle
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
import cv2
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CMCComputer:
 | 
			
		||||
    def __init__(self, minimum_features=10, method="sparse"):
 | 
			
		||||
        assert method in ["file", "sparse", "sift"]
 | 
			
		||||
 | 
			
		||||
        os.makedirs("./cache", exist_ok=True)
 | 
			
		||||
        self.cache_path = "./cache/affine_ocsort.pkl"
 | 
			
		||||
        self.cache = {}
 | 
			
		||||
        if os.path.exists(self.cache_path):
 | 
			
		||||
            with open(self.cache_path, "rb") as fp:
 | 
			
		||||
                self.cache = pickle.load(fp)
 | 
			
		||||
        self.minimum_features = minimum_features
 | 
			
		||||
        self.prev_img = None
 | 
			
		||||
        self.prev_desc = None
 | 
			
		||||
        self.sparse_flow_param = dict(
 | 
			
		||||
            maxCorners=3000,
 | 
			
		||||
            qualityLevel=0.01,
 | 
			
		||||
            minDistance=1,
 | 
			
		||||
            blockSize=3,
 | 
			
		||||
            useHarrisDetector=False,
 | 
			
		||||
            k=0.04,
 | 
			
		||||
        )
 | 
			
		||||
        self.file_computed = {}
 | 
			
		||||
 | 
			
		||||
        self.comp_function = None
 | 
			
		||||
        if method == "sparse":
 | 
			
		||||
            self.comp_function = self._affine_sparse_flow
 | 
			
		||||
        elif method == "sift":
 | 
			
		||||
            self.comp_function = self._affine_sift
 | 
			
		||||
        # Same BoT-SORT CMC arrays
 | 
			
		||||
        elif method == "file":
 | 
			
		||||
            self.comp_function = self._affine_file
 | 
			
		||||
            self.file_affines = {}
 | 
			
		||||
            # Maps from tag name to file name
 | 
			
		||||
            self.file_names = {}
 | 
			
		||||
 | 
			
		||||
            # All the ablation file names
 | 
			
		||||
            for f_name in os.listdir("./cache/cmc_files/MOT17_ablation/"):
 | 
			
		||||
                # The tag that'll be passed into compute_affine based on image name
 | 
			
		||||
                tag = f_name.replace("GMC-", "").replace(".txt", "") + "-FRCNN"
 | 
			
		||||
                f_name = os.path.join("./cache/cmc_files/MOT17_ablation/", f_name)
 | 
			
		||||
                self.file_names[tag] = f_name
 | 
			
		||||
            for f_name in os.listdir("./cache/cmc_files/MOT20_ablation/"):
 | 
			
		||||
                tag = f_name.replace("GMC-", "").replace(".txt", "")
 | 
			
		||||
                f_name = os.path.join("./cache/cmc_files/MOT20_ablation/", f_name)
 | 
			
		||||
                self.file_names[tag] = f_name
 | 
			
		||||
 | 
			
		||||
            # All the test file names
 | 
			
		||||
            for f_name in os.listdir("./cache/cmc_files/MOTChallenge/"):
 | 
			
		||||
                tag = f_name.replace("GMC-", "").replace(".txt", "")
 | 
			
		||||
                if "MOT17" in tag:
 | 
			
		||||
                    tag = tag + "-FRCNN"
 | 
			
		||||
                # If it's an ablation one (not test) don't overwrite it
 | 
			
		||||
                if tag in self.file_names:
 | 
			
		||||
                    continue
 | 
			
		||||
                f_name = os.path.join("./cache/cmc_files/MOTChallenge/", f_name)
 | 
			
		||||
                self.file_names[tag] = f_name
 | 
			
		||||
 | 
			
		||||
    def compute_affine(self, img, bbox, tag):
 | 
			
		||||
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
 | 
			
		||||
        if tag in self.cache:
 | 
			
		||||
            A = self.cache[tag]
 | 
			
		||||
            return A
 | 
			
		||||
        mask = np.ones_like(img, dtype=np.uint8)
 | 
			
		||||
        if bbox.shape[0] > 0:
 | 
			
		||||
            bbox = np.round(bbox).astype(np.int32)
 | 
			
		||||
            bbox[bbox < 0] = 0
 | 
			
		||||
            for bb in bbox:
 | 
			
		||||
                mask[bb[1] : bb[3], bb[0] : bb[2]] = 0
 | 
			
		||||
 | 
			
		||||
        A = self.comp_function(img, mask, tag)
 | 
			
		||||
        self.cache[tag] = A
 | 
			
		||||
 | 
			
		||||
        return A
 | 
			
		||||
 | 
			
		||||
    def _load_file(self, name):
 | 
			
		||||
        affines = []
 | 
			
		||||
        with open(self.file_names[name], "r") as fp:
 | 
			
		||||
            for line in fp:
 | 
			
		||||
                tokens = [float(f) for f in line.split("\t")[1:7]]
 | 
			
		||||
                A = np.eye(2, 3)
 | 
			
		||||
                A[0, 0] = tokens[0]
 | 
			
		||||
                A[0, 1] = tokens[1]
 | 
			
		||||
                A[0, 2] = tokens[2]
 | 
			
		||||
                A[1, 0] = tokens[3]
 | 
			
		||||
                A[1, 1] = tokens[4]
 | 
			
		||||
                A[1, 2] = tokens[5]
 | 
			
		||||
                affines.append(A)
 | 
			
		||||
        self.file_affines[name] = affines
 | 
			
		||||
 | 
			
		||||
    def _affine_file(self, frame, mask, tag):
 | 
			
		||||
        name, num = tag.split(":")
 | 
			
		||||
        if name not in self.file_affines:
 | 
			
		||||
            self._load_file(name)
 | 
			
		||||
        if name not in self.file_affines:
 | 
			
		||||
            raise RuntimeError("Error loading file affines for CMC.")
 | 
			
		||||
 | 
			
		||||
        return self.file_affines[name][int(num) - 1]
 | 
			
		||||
 | 
			
		||||
    def _affine_sift(self, frame, mask, tag):
 | 
			
		||||
        A = np.eye(2, 3)
 | 
			
		||||
        detector = cv2.SIFT_create()
 | 
			
		||||
        kp, desc = detector.detectAndCompute(frame, mask)
 | 
			
		||||
        if self.prev_desc is None:
 | 
			
		||||
            self.prev_desc = [kp, desc]
 | 
			
		||||
            return A
 | 
			
		||||
        if desc.shape[0] < self.minimum_features or self.prev_desc[1].shape[0] < self.minimum_features:
 | 
			
		||||
            return A
 | 
			
		||||
 | 
			
		||||
        bf = cv2.BFMatcher(cv2.NORM_L2)
 | 
			
		||||
        matches = bf.knnMatch(self.prev_desc[1], desc, k=2)
 | 
			
		||||
        good = []
 | 
			
		||||
        for m, n in matches:
 | 
			
		||||
            if m.distance < 0.7 * n.distance:
 | 
			
		||||
                good.append(m)
 | 
			
		||||
 | 
			
		||||
        if len(good) > self.minimum_features:
 | 
			
		||||
            src_pts = np.float32([self.prev_desc[0][m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
 | 
			
		||||
            dst_pts = np.float32([kp[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
 | 
			
		||||
            A, _ = cv2.estimateAffinePartial2D(src_pts, dst_pts, method=cv2.RANSAC)
 | 
			
		||||
        else:
 | 
			
		||||
            print("Warning: not enough matching points")
 | 
			
		||||
        if A is None:
 | 
			
		||||
            A = np.eye(2, 3)
 | 
			
		||||
 | 
			
		||||
        self.prev_desc = [kp, desc]
 | 
			
		||||
        return A
 | 
			
		||||
 | 
			
		||||
    def _affine_sparse_flow(self, frame, mask, tag):
 | 
			
		||||
        # Initialize
 | 
			
		||||
        A = np.eye(2, 3)
 | 
			
		||||
 | 
			
		||||
        # find the keypoints
 | 
			
		||||
        keypoints = cv2.goodFeaturesToTrack(frame, mask=mask, **self.sparse_flow_param)
 | 
			
		||||
 | 
			
		||||
        # Handle first frame
 | 
			
		||||
        if self.prev_img is None:
 | 
			
		||||
            self.prev_img = frame
 | 
			
		||||
            self.prev_desc = keypoints
 | 
			
		||||
            return A
 | 
			
		||||
 | 
			
		||||
        matched_kp, status, err = cv2.calcOpticalFlowPyrLK(self.prev_img, frame, self.prev_desc, None)
 | 
			
		||||
        matched_kp = matched_kp.reshape(-1, 2)
 | 
			
		||||
        status = status.reshape(-1)
 | 
			
		||||
        prev_points = self.prev_desc.reshape(-1, 2)
 | 
			
		||||
        prev_points = prev_points[status]
 | 
			
		||||
        curr_points = matched_kp[status]
 | 
			
		||||
 | 
			
		||||
        # Find rigid matrix
 | 
			
		||||
        if prev_points.shape[0] > self.minimum_features:
 | 
			
		||||
            A, _ = cv2.estimateAffinePartial2D(prev_points, curr_points, method=cv2.RANSAC)
 | 
			
		||||
        else:
 | 
			
		||||
            print("Warning: not enough matching points")
 | 
			
		||||
        if A is None:
 | 
			
		||||
            A = np.eye(2, 3)
 | 
			
		||||
 | 
			
		||||
        self.prev_img = frame
 | 
			
		||||
        self.prev_desc = keypoints
 | 
			
		||||
        return A
 | 
			
		||||
 | 
			
		||||
    def dump_cache(self):
 | 
			
		||||
        with open(self.cache_path, "wb") as fp:
 | 
			
		||||
            pickle.dump(self.cache, fp)
 | 
			
		||||
							
								
								
									
										12
									
								
								feeder/trackers/deepocsort/configs/deepocsort.yaml
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								feeder/trackers/deepocsort/configs/deepocsort.yaml
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,12 @@
 | 
			
		|||
# Trial number:      137
 | 
			
		||||
# HOTA, MOTA, IDF1:  [55.567]
 | 
			
		||||
deepocsort:
 | 
			
		||||
  asso_func: giou
 | 
			
		||||
  conf_thres: 0.5122620708221085
 | 
			
		||||
  delta_t: 1
 | 
			
		||||
  det_thresh: 0
 | 
			
		||||
  inertia: 0.3941737016672115
 | 
			
		||||
  iou_thresh: 0.22136877277096445
 | 
			
		||||
  max_age: 50
 | 
			
		||||
  min_hits: 1
 | 
			
		||||
  use_byte: false
 | 
			
		||||
							
								
								
									
										116
									
								
								feeder/trackers/deepocsort/embedding.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										116
									
								
								feeder/trackers/deepocsort/embedding.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,116 @@
 | 
			
		|||
import pdb
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
import os
 | 
			
		||||
import pickle
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import cv2
 | 
			
		||||
import torchvision
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class EmbeddingComputer:
 | 
			
		||||
    def __init__(self, dataset):
 | 
			
		||||
        self.model = None
 | 
			
		||||
        self.dataset = dataset
 | 
			
		||||
        self.crop_size = (128, 384)
 | 
			
		||||
        os.makedirs("./cache/embeddings/", exist_ok=True)
 | 
			
		||||
        self.cache_path = "./cache/embeddings/{}_embedding.pkl"
 | 
			
		||||
        self.cache = {}
 | 
			
		||||
        self.cache_name = ""
 | 
			
		||||
 | 
			
		||||
    def load_cache(self, path):
 | 
			
		||||
        self.cache_name = path
 | 
			
		||||
        cache_path = self.cache_path.format(path)
 | 
			
		||||
        if os.path.exists(cache_path):
 | 
			
		||||
            with open(cache_path, "rb") as fp:
 | 
			
		||||
                self.cache = pickle.load(fp)
 | 
			
		||||
 | 
			
		||||
    def compute_embedding(self, img, bbox, tag, is_numpy=True):
 | 
			
		||||
        if self.cache_name != tag.split(":")[0]:
 | 
			
		||||
            self.load_cache(tag.split(":")[0])
 | 
			
		||||
 | 
			
		||||
        if tag in self.cache:
 | 
			
		||||
            embs = self.cache[tag]
 | 
			
		||||
            if embs.shape[0] != bbox.shape[0]:
 | 
			
		||||
                raise RuntimeError(
 | 
			
		||||
                    "ERROR: The number of cached embeddings don't match the "
 | 
			
		||||
                    "number of detections.\nWas the detector model changed? Delete cache if so."
 | 
			
		||||
                )
 | 
			
		||||
            return embs
 | 
			
		||||
 | 
			
		||||
        if self.model is None:
 | 
			
		||||
            self.initialize_model()
 | 
			
		||||
 | 
			
		||||
        # Make sure bbox is within image frame
 | 
			
		||||
        if is_numpy:
 | 
			
		||||
            h, w = img.shape[:2]
 | 
			
		||||
        else:
 | 
			
		||||
            h, w = img.shape[2:]
 | 
			
		||||
        results = np.round(bbox).astype(np.int32)
 | 
			
		||||
        results[:, 0] = results[:, 0].clip(0, w)
 | 
			
		||||
        results[:, 1] = results[:, 1].clip(0, h)
 | 
			
		||||
        results[:, 2] = results[:, 2].clip(0, w)
 | 
			
		||||
        results[:, 3] = results[:, 3].clip(0, h)
 | 
			
		||||
 | 
			
		||||
        # Generate all the crops
 | 
			
		||||
        crops = []
 | 
			
		||||
        for p in results:
 | 
			
		||||
            if is_numpy:
 | 
			
		||||
                crop = img[p[1] : p[3], p[0] : p[2]]
 | 
			
		||||
                crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)
 | 
			
		||||
                crop = cv2.resize(crop, self.crop_size, interpolation=cv2.INTER_LINEAR)
 | 
			
		||||
                crop = torch.as_tensor(crop.astype("float32").transpose(2, 0, 1))
 | 
			
		||||
                crop = crop.unsqueeze(0)
 | 
			
		||||
            else:
 | 
			
		||||
                crop = img[:, :, p[1] : p[3], p[0] : p[2]]
 | 
			
		||||
                crop = torchvision.transforms.functional.resize(crop, self.crop_size)
 | 
			
		||||
 | 
			
		||||
            crops.append(crop)
 | 
			
		||||
 | 
			
		||||
        crops = torch.cat(crops, dim=0)
 | 
			
		||||
 | 
			
		||||
        # Create embeddings and l2 normalize them
 | 
			
		||||
        with torch.no_grad():
 | 
			
		||||
            crops = crops.cuda()
 | 
			
		||||
            crops = crops.half()
 | 
			
		||||
            embs = self.model(crops)
 | 
			
		||||
        embs = torch.nn.functional.normalize(embs)
 | 
			
		||||
        embs = embs.cpu().numpy()
 | 
			
		||||
 | 
			
		||||
        self.cache[tag] = embs
 | 
			
		||||
        return embs
 | 
			
		||||
 | 
			
		||||
    def initialize_model(self):
 | 
			
		||||
        """
 | 
			
		||||
        model = torchreid.models.build_model(name="osnet_ain_x1_0", num_classes=2510, loss="softmax", pretrained=False)
 | 
			
		||||
        sd = torch.load("external/weights/osnet_ain_ms_d_c.pth.tar")["state_dict"]
 | 
			
		||||
        new_state_dict = OrderedDict()
 | 
			
		||||
        for k, v in sd.items():
 | 
			
		||||
            name = k[7:]  # remove `module.`
 | 
			
		||||
            new_state_dict[name] = v
 | 
			
		||||
        # load params
 | 
			
		||||
        model.load_state_dict(new_state_dict)
 | 
			
		||||
        model.eval()
 | 
			
		||||
        model.cuda()
 | 
			
		||||
        """
 | 
			
		||||
        if self.dataset == "mot17":
 | 
			
		||||
            path = "external/weights/mot17_sbs_S50.pth"
 | 
			
		||||
        elif self.dataset == "mot20":
 | 
			
		||||
            path = "external/weights/mot20_sbs_S50.pth"
 | 
			
		||||
        elif self.dataset == "dance":
 | 
			
		||||
            path = None
 | 
			
		||||
        else:
 | 
			
		||||
            raise RuntimeError("Need the path for a new ReID model.")
 | 
			
		||||
 | 
			
		||||
        model = FastReID(path)
 | 
			
		||||
        model.eval()
 | 
			
		||||
        model.cuda()
 | 
			
		||||
        model.half()
 | 
			
		||||
        self.model = model
 | 
			
		||||
 | 
			
		||||
    def dump_cache(self):
 | 
			
		||||
        if self.cache_name:
 | 
			
		||||
            with open(self.cache_path.format(self.cache_name), "wb") as fp:
 | 
			
		||||
                pickle.dump(self.cache, fp)
 | 
			
		||||
							
								
								
									
										1636
									
								
								feeder/trackers/deepocsort/kalmanfilter.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										1636
									
								
								feeder/trackers/deepocsort/kalmanfilter.py
									
										
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										670
									
								
								feeder/trackers/deepocsort/ocsort.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										670
									
								
								feeder/trackers/deepocsort/ocsort.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,670 @@
 | 
			
		|||
"""
 | 
			
		||||
    This script is adopted from the SORT script by Alex Bewley alex@bewley.ai
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
 | 
			
		||||
import pdb
 | 
			
		||||
import pickle
 | 
			
		||||
 | 
			
		||||
import cv2
 | 
			
		||||
import torch
 | 
			
		||||
import torchvision
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
from .association import *
 | 
			
		||||
from .embedding import EmbeddingComputer
 | 
			
		||||
from .cmc import CMCComputer
 | 
			
		||||
from reid_multibackend import ReIDDetectMultiBackend
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def k_previous_obs(observations, cur_age, k):
 | 
			
		||||
    if len(observations) == 0:
 | 
			
		||||
        return [-1, -1, -1, -1, -1]
 | 
			
		||||
    for i in range(k):
 | 
			
		||||
        dt = k - i
 | 
			
		||||
        if cur_age - dt in observations:
 | 
			
		||||
            return observations[cur_age - dt]
 | 
			
		||||
    max_age = max(observations.keys())
 | 
			
		||||
    return observations[max_age]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_bbox_to_z(bbox):
 | 
			
		||||
    """
 | 
			
		||||
    Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
 | 
			
		||||
      [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
 | 
			
		||||
      the aspect ratio
 | 
			
		||||
    """
 | 
			
		||||
    w = bbox[2] - bbox[0]
 | 
			
		||||
    h = bbox[3] - bbox[1]
 | 
			
		||||
    x = bbox[0] + w / 2.0
 | 
			
		||||
    y = bbox[1] + h / 2.0
 | 
			
		||||
    s = w * h  # scale is just area
 | 
			
		||||
    r = w / float(h + 1e-6)
 | 
			
		||||
    return np.array([x, y, s, r]).reshape((4, 1))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_bbox_to_z_new(bbox):
 | 
			
		||||
    w = bbox[2] - bbox[0]
 | 
			
		||||
    h = bbox[3] - bbox[1]
 | 
			
		||||
    x = bbox[0] + w / 2.0
 | 
			
		||||
    y = bbox[1] + h / 2.0
 | 
			
		||||
    return np.array([x, y, w, h]).reshape((4, 1))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_x_to_bbox_new(x):
 | 
			
		||||
    x, y, w, h = x.reshape(-1)[:4]
 | 
			
		||||
    return np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2]).reshape(1, 4)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_x_to_bbox(x, score=None):
 | 
			
		||||
    """
 | 
			
		||||
    Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
 | 
			
		||||
      [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
 | 
			
		||||
    """
 | 
			
		||||
    w = np.sqrt(x[2] * x[3])
 | 
			
		||||
    h = x[2] / w
 | 
			
		||||
    if score == None:
 | 
			
		||||
        return np.array([x[0] - w / 2.0, x[1] - h / 2.0, x[0] + w / 2.0, x[1] + h / 2.0]).reshape((1, 4))
 | 
			
		||||
    else:
 | 
			
		||||
        return np.array([x[0] - w / 2.0, x[1] - h / 2.0, x[0] + w / 2.0, x[1] + h / 2.0, score]).reshape((1, 5))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def speed_direction(bbox1, bbox2):
 | 
			
		||||
    cx1, cy1 = (bbox1[0] + bbox1[2]) / 2.0, (bbox1[1] + bbox1[3]) / 2.0
 | 
			
		||||
    cx2, cy2 = (bbox2[0] + bbox2[2]) / 2.0, (bbox2[1] + bbox2[3]) / 2.0
 | 
			
		||||
    speed = np.array([cy2 - cy1, cx2 - cx1])
 | 
			
		||||
    norm = np.sqrt((cy2 - cy1) ** 2 + (cx2 - cx1) ** 2) + 1e-6
 | 
			
		||||
    return speed / norm
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def new_kf_process_noise(w, h, p=1 / 20, v=1 / 160):
 | 
			
		||||
    Q = np.diag(
 | 
			
		||||
        ((p * w) ** 2, (p * h) ** 2, (p * w) ** 2, (p * h) ** 2, (v * w) ** 2, (v * h) ** 2, (v * w) ** 2, (v * h) ** 2)
 | 
			
		||||
    )
 | 
			
		||||
    return Q
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def new_kf_measurement_noise(w, h, m=1 / 20):
 | 
			
		||||
    w_var = (m * w) ** 2
 | 
			
		||||
    h_var = (m * h) ** 2
 | 
			
		||||
    R = np.diag((w_var, h_var, w_var, h_var))
 | 
			
		||||
    return R
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class KalmanBoxTracker(object):
 | 
			
		||||
    """
 | 
			
		||||
    This class represents the internal state of individual tracked objects observed as bbox.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    count = 0
 | 
			
		||||
 | 
			
		||||
    def __init__(self, bbox, cls, delta_t=3, orig=False, emb=None, alpha=0, new_kf=False):
 | 
			
		||||
        """
 | 
			
		||||
        Initialises a tracker using initial bounding box.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        # define constant velocity model
 | 
			
		||||
        if not orig:
 | 
			
		||||
            from .kalmanfilter import KalmanFilterNew as KalmanFilter
 | 
			
		||||
        else:
 | 
			
		||||
            from filterpy.kalman import KalmanFilter
 | 
			
		||||
        self.cls = cls
 | 
			
		||||
        self.conf = bbox[-1]
 | 
			
		||||
        self.new_kf = new_kf
 | 
			
		||||
        if new_kf:
 | 
			
		||||
            self.kf = KalmanFilter(dim_x=8, dim_z=4)
 | 
			
		||||
            self.kf.F = np.array(
 | 
			
		||||
                [
 | 
			
		||||
                    # x y w h x' y' w' h'
 | 
			
		||||
                    [1, 0, 0, 0, 1, 0, 0, 0],
 | 
			
		||||
                    [0, 1, 0, 0, 0, 1, 0, 0],
 | 
			
		||||
                    [0, 0, 1, 0, 0, 0, 1, 0],
 | 
			
		||||
                    [0, 0, 0, 1, 0, 0, 0, 1],
 | 
			
		||||
                    [0, 0, 0, 0, 1, 0, 0, 0],
 | 
			
		||||
                    [0, 0, 0, 0, 0, 1, 0, 0],
 | 
			
		||||
                    [0, 0, 0, 0, 0, 0, 1, 0],
 | 
			
		||||
                    [0, 0, 0, 0, 0, 0, 0, 1],
 | 
			
		||||
                ]
 | 
			
		||||
            )
 | 
			
		||||
            self.kf.H = np.array(
 | 
			
		||||
                [
 | 
			
		||||
                    [1, 0, 0, 0, 0, 0, 0, 0],
 | 
			
		||||
                    [0, 1, 0, 0, 0, 0, 0, 0],
 | 
			
		||||
                    [0, 0, 1, 0, 0, 0, 0, 0],
 | 
			
		||||
                    [0, 0, 0, 1, 0, 0, 0, 0],
 | 
			
		||||
                ]
 | 
			
		||||
            )
 | 
			
		||||
            _, _, w, h = convert_bbox_to_z_new(bbox).reshape(-1)
 | 
			
		||||
            self.kf.P = new_kf_process_noise(w, h)
 | 
			
		||||
            self.kf.P[:4, :4] *= 4
 | 
			
		||||
            self.kf.P[4:, 4:] *= 100
 | 
			
		||||
            # Process and measurement uncertainty happen in functions
 | 
			
		||||
            self.bbox_to_z_func = convert_bbox_to_z_new
 | 
			
		||||
            self.x_to_bbox_func = convert_x_to_bbox_new
 | 
			
		||||
        else:
 | 
			
		||||
            self.kf = KalmanFilter(dim_x=7, dim_z=4)
 | 
			
		||||
            self.kf.F = np.array(
 | 
			
		||||
                [
 | 
			
		||||
                    # x  y  s  r  x' y' s'
 | 
			
		||||
                    [1, 0, 0, 0, 1, 0, 0],
 | 
			
		||||
                    [0, 1, 0, 0, 0, 1, 0],
 | 
			
		||||
                    [0, 0, 1, 0, 0, 0, 1],
 | 
			
		||||
                    [0, 0, 0, 1, 0, 0, 0],
 | 
			
		||||
                    [0, 0, 0, 0, 1, 0, 0],
 | 
			
		||||
                    [0, 0, 0, 0, 0, 1, 0],
 | 
			
		||||
                    [0, 0, 0, 0, 0, 0, 1],
 | 
			
		||||
                ]
 | 
			
		||||
            )
 | 
			
		||||
            self.kf.H = np.array(
 | 
			
		||||
                [
 | 
			
		||||
                    [1, 0, 0, 0, 0, 0, 0],
 | 
			
		||||
                    [0, 1, 0, 0, 0, 0, 0],
 | 
			
		||||
                    [0, 0, 1, 0, 0, 0, 0],
 | 
			
		||||
                    [0, 0, 0, 1, 0, 0, 0],
 | 
			
		||||
                ]
 | 
			
		||||
            )
 | 
			
		||||
            self.kf.R[2:, 2:] *= 10.0
 | 
			
		||||
            self.kf.P[4:, 4:] *= 1000.0  # give high uncertainty to the unobservable initial velocities
 | 
			
		||||
            self.kf.P *= 10.0
 | 
			
		||||
            self.kf.Q[-1, -1] *= 0.01
 | 
			
		||||
            self.kf.Q[4:, 4:] *= 0.01
 | 
			
		||||
            self.bbox_to_z_func = convert_bbox_to_z
 | 
			
		||||
            self.x_to_bbox_func = convert_x_to_bbox
 | 
			
		||||
 | 
			
		||||
        self.kf.x[:4] = self.bbox_to_z_func(bbox)
 | 
			
		||||
 | 
			
		||||
        self.time_since_update = 0
 | 
			
		||||
        self.id = KalmanBoxTracker.count
 | 
			
		||||
        KalmanBoxTracker.count += 1
 | 
			
		||||
        self.history = []
 | 
			
		||||
        self.hits = 0
 | 
			
		||||
        self.hit_streak = 0
 | 
			
		||||
        self.age = 0
 | 
			
		||||
        """
 | 
			
		||||
        NOTE: [-1,-1,-1,-1,-1] is a compromising placeholder for non-observation status, the same for the return of 
 | 
			
		||||
        function k_previous_obs. It is ugly and I do not like it. But to support generate observation array in a 
 | 
			
		||||
        fast and unified way, which you would see below k_observations = np.array([k_previous_obs(...]]), let's bear it for now.
 | 
			
		||||
        """
 | 
			
		||||
        # Used for OCR
 | 
			
		||||
        self.last_observation = np.array([-1, -1, -1, -1, -1])  # placeholder
 | 
			
		||||
        # Used to output track after min_hits reached
 | 
			
		||||
        self.history_observations = []
 | 
			
		||||
        # Used for velocity
 | 
			
		||||
        self.observations = dict()
 | 
			
		||||
        self.velocity = None
 | 
			
		||||
        self.delta_t = delta_t
 | 
			
		||||
 | 
			
		||||
        self.emb = emb
 | 
			
		||||
 | 
			
		||||
        self.frozen = False
 | 
			
		||||
 | 
			
		||||
    def update(self, bbox, cls):
 | 
			
		||||
        """
 | 
			
		||||
        Updates the state vector with observed bbox.
 | 
			
		||||
        """
 | 
			
		||||
        if bbox is not None:
 | 
			
		||||
            self.frozen = False
 | 
			
		||||
            self.cls = cls
 | 
			
		||||
            if self.last_observation.sum() >= 0:  # no previous observation
 | 
			
		||||
                previous_box = None
 | 
			
		||||
                for dt in range(self.delta_t, 0, -1):
 | 
			
		||||
                    if self.age - dt in self.observations:
 | 
			
		||||
                        previous_box = self.observations[self.age - dt]
 | 
			
		||||
                        break
 | 
			
		||||
                if previous_box is None:
 | 
			
		||||
                    previous_box = self.last_observation
 | 
			
		||||
                """
 | 
			
		||||
                  Estimate the track speed direction with observations \Delta t steps away
 | 
			
		||||
                """
 | 
			
		||||
                self.velocity = speed_direction(previous_box, bbox)
 | 
			
		||||
            """
 | 
			
		||||
              Insert new observations. This is a ugly way to maintain both self.observations
 | 
			
		||||
              and self.history_observations. Bear it for the moment.
 | 
			
		||||
            """
 | 
			
		||||
            self.last_observation = bbox
 | 
			
		||||
            self.observations[self.age] = bbox
 | 
			
		||||
            self.history_observations.append(bbox)
 | 
			
		||||
 | 
			
		||||
            self.time_since_update = 0
 | 
			
		||||
            self.history = []
 | 
			
		||||
            self.hits += 1
 | 
			
		||||
            self.hit_streak += 1
 | 
			
		||||
            if self.new_kf:
 | 
			
		||||
                R = new_kf_measurement_noise(self.kf.x[2, 0], self.kf.x[3, 0])
 | 
			
		||||
                self.kf.update(self.bbox_to_z_func(bbox), R=R)
 | 
			
		||||
            else:
 | 
			
		||||
                self.kf.update(self.bbox_to_z_func(bbox))
 | 
			
		||||
        else:
 | 
			
		||||
            self.kf.update(bbox)
 | 
			
		||||
            self.frozen = True
 | 
			
		||||
 | 
			
		||||
    def update_emb(self, emb, alpha=0.9):
 | 
			
		||||
        self.emb = alpha * self.emb + (1 - alpha) * emb
 | 
			
		||||
        self.emb /= np.linalg.norm(self.emb)
 | 
			
		||||
 | 
			
		||||
    def get_emb(self):
 | 
			
		||||
        return self.emb.cpu()
 | 
			
		||||
 | 
			
		||||
    def apply_affine_correction(self, affine):
 | 
			
		||||
        m = affine[:, :2]
 | 
			
		||||
        t = affine[:, 2].reshape(2, 1)
 | 
			
		||||
        # For OCR
 | 
			
		||||
        if self.last_observation.sum() > 0:
 | 
			
		||||
            ps = self.last_observation[:4].reshape(2, 2).T
 | 
			
		||||
            ps = m @ ps + t
 | 
			
		||||
            self.last_observation[:4] = ps.T.reshape(-1)
 | 
			
		||||
 | 
			
		||||
        # Apply to each box in the range of velocity computation
 | 
			
		||||
        for dt in range(self.delta_t, -1, -1):
 | 
			
		||||
            if self.age - dt in self.observations:
 | 
			
		||||
                ps = self.observations[self.age - dt][:4].reshape(2, 2).T
 | 
			
		||||
                ps = m @ ps + t
 | 
			
		||||
                self.observations[self.age - dt][:4] = ps.T.reshape(-1)
 | 
			
		||||
 | 
			
		||||
        # Also need to change kf state, but might be frozen
 | 
			
		||||
        self.kf.apply_affine_correction(m, t, self.new_kf)
 | 
			
		||||
 | 
			
		||||
    def predict(self):
 | 
			
		||||
        """
 | 
			
		||||
        Advances the state vector and returns the predicted bounding box estimate.
 | 
			
		||||
        """
 | 
			
		||||
        # Don't allow negative bounding boxes
 | 
			
		||||
        if self.new_kf:
 | 
			
		||||
            if self.kf.x[2] + self.kf.x[6] <= 0:
 | 
			
		||||
                self.kf.x[6] = 0
 | 
			
		||||
            if self.kf.x[3] + self.kf.x[7] <= 0:
 | 
			
		||||
                self.kf.x[7] = 0
 | 
			
		||||
 | 
			
		||||
            # Stop velocity, will update in kf during OOS
 | 
			
		||||
            if self.frozen:
 | 
			
		||||
                self.kf.x[6] = self.kf.x[7] = 0
 | 
			
		||||
            Q = new_kf_process_noise(self.kf.x[2, 0], self.kf.x[3, 0])
 | 
			
		||||
        else:
 | 
			
		||||
            if (self.kf.x[6] + self.kf.x[2]) <= 0:
 | 
			
		||||
                self.kf.x[6] *= 0.0
 | 
			
		||||
            Q = None
 | 
			
		||||
 | 
			
		||||
        self.kf.predict(Q=Q)
 | 
			
		||||
        self.age += 1
 | 
			
		||||
        if self.time_since_update > 0:
 | 
			
		||||
            self.hit_streak = 0
 | 
			
		||||
        self.time_since_update += 1
 | 
			
		||||
        self.history.append(self.x_to_bbox_func(self.kf.x))
 | 
			
		||||
        return self.history[-1]
 | 
			
		||||
 | 
			
		||||
    def get_state(self):
 | 
			
		||||
        """
 | 
			
		||||
        Returns the current bounding box estimate.
 | 
			
		||||
        """
 | 
			
		||||
        return self.x_to_bbox_func(self.kf.x)
 | 
			
		||||
 | 
			
		||||
    def mahalanobis(self, bbox):
 | 
			
		||||
        """Should be run after a predict() call for accuracy."""
 | 
			
		||||
        return self.kf.md_for_measurement(self.bbox_to_z_func(bbox))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
    We support multiple ways for association cost calculation, by default
 | 
			
		||||
    we use IoU. GIoU may have better performance in some situations. We note 
 | 
			
		||||
    that we hardly normalize the cost by all methods to (0,1) which may not be 
 | 
			
		||||
    the best practice.
 | 
			
		||||
"""
 | 
			
		||||
ASSO_FUNCS = {
 | 
			
		||||
    "iou": iou_batch,
 | 
			
		||||
    "giou": giou_batch,
 | 
			
		||||
    "ciou": ciou_batch,
 | 
			
		||||
    "diou": diou_batch,
 | 
			
		||||
    "ct_dist": ct_dist,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OCSort(object):
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        model_weights,
 | 
			
		||||
        device,
 | 
			
		||||
        fp16,
 | 
			
		||||
        det_thresh,
 | 
			
		||||
        max_age=30,
 | 
			
		||||
        min_hits=3,
 | 
			
		||||
        iou_threshold=0.3,
 | 
			
		||||
        delta_t=3,
 | 
			
		||||
        asso_func="iou",
 | 
			
		||||
        inertia=0.2,
 | 
			
		||||
        w_association_emb=0.75,
 | 
			
		||||
        alpha_fixed_emb=0.95,
 | 
			
		||||
        aw_param=0.5,
 | 
			
		||||
        embedding_off=False,
 | 
			
		||||
        cmc_off=False,
 | 
			
		||||
        aw_off=False,
 | 
			
		||||
        new_kf_off=False,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        """
 | 
			
		||||
        Sets key parameters for SORT
 | 
			
		||||
        """
 | 
			
		||||
        self.max_age = max_age
 | 
			
		||||
        self.min_hits = min_hits
 | 
			
		||||
        self.iou_threshold = iou_threshold
 | 
			
		||||
        self.trackers = []
 | 
			
		||||
        self.frame_count = 0
 | 
			
		||||
        self.det_thresh = det_thresh
 | 
			
		||||
        self.delta_t = delta_t
 | 
			
		||||
        self.asso_func = ASSO_FUNCS[asso_func]
 | 
			
		||||
        self.inertia = inertia
 | 
			
		||||
        self.w_association_emb = w_association_emb
 | 
			
		||||
        self.alpha_fixed_emb = alpha_fixed_emb
 | 
			
		||||
        self.aw_param = aw_param
 | 
			
		||||
        KalmanBoxTracker.count = 0
 | 
			
		||||
 | 
			
		||||
        self.embedder = ReIDDetectMultiBackend(weights=model_weights, device=device, fp16=fp16)
 | 
			
		||||
        self.cmc = CMCComputer()
 | 
			
		||||
        self.embedding_off = embedding_off
 | 
			
		||||
        self.cmc_off = cmc_off
 | 
			
		||||
        self.aw_off = aw_off
 | 
			
		||||
        self.new_kf_off = new_kf_off
 | 
			
		||||
 | 
			
		||||
    def update(self, dets, img_numpy, tag='blub'):
 | 
			
		||||
        """
 | 
			
		||||
        Params:
 | 
			
		||||
          dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
 | 
			
		||||
        Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
 | 
			
		||||
        Returns the a similar array, where the last column is the object ID.
 | 
			
		||||
        NOTE: The number of objects returned may differ from the number of detections provided.
 | 
			
		||||
        """
 | 
			
		||||
        xyxys = dets[:, 0:4]
 | 
			
		||||
        scores = dets[:, 4]
 | 
			
		||||
        clss = dets[:, 5]
 | 
			
		||||
        
 | 
			
		||||
        classes = clss.numpy()
 | 
			
		||||
        xyxys = xyxys.numpy()
 | 
			
		||||
        scores = scores.numpy()
 | 
			
		||||
        
 | 
			
		||||
        dets = dets[:, 0:6].numpy()
 | 
			
		||||
        remain_inds = scores > self.det_thresh
 | 
			
		||||
        dets = dets[remain_inds]
 | 
			
		||||
        self.height, self.width = img_numpy.shape[:2]
 | 
			
		||||
 | 
			
		||||
        # Rescale
 | 
			
		||||
        #scale = min(img_tensor.shape[2] / img_numpy.shape[0], img_tensor.shape[3] / img_numpy.shape[1])
 | 
			
		||||
        #dets[:, :4] /= scale
 | 
			
		||||
 | 
			
		||||
        # Embedding
 | 
			
		||||
        if self.embedding_off or dets.shape[0] == 0:
 | 
			
		||||
            dets_embs = np.ones((dets.shape[0], 1))
 | 
			
		||||
        else:
 | 
			
		||||
            # (Ndets x X) [512, 1024, 2048]
 | 
			
		||||
            #dets_embs = self.embedder.compute_embedding(img_numpy, dets[:, :4], tag)
 | 
			
		||||
            dets_embs = self._get_features(dets[:, :4], img_numpy)
 | 
			
		||||
 | 
			
		||||
        # CMC
 | 
			
		||||
        if not self.cmc_off:
 | 
			
		||||
            transform = self.cmc.compute_affine(img_numpy, dets[:, :4], tag)
 | 
			
		||||
            for trk in self.trackers:
 | 
			
		||||
                trk.apply_affine_correction(transform)
 | 
			
		||||
 | 
			
		||||
        trust = (dets[:, 4] - self.det_thresh) / (1 - self.det_thresh)
 | 
			
		||||
        af = self.alpha_fixed_emb
 | 
			
		||||
        # From [self.alpha_fixed_emb, 1], goes to 1 as detector is less confident
 | 
			
		||||
        dets_alpha = af + (1 - af) * (1 - trust)
 | 
			
		||||
 | 
			
		||||
        # get predicted locations from existing trackers.
 | 
			
		||||
        trks = np.zeros((len(self.trackers), 5))
 | 
			
		||||
        trk_embs = []
 | 
			
		||||
        to_del = []
 | 
			
		||||
        ret = []
 | 
			
		||||
        for t, trk in enumerate(trks):
 | 
			
		||||
            pos = self.trackers[t].predict()[0]
 | 
			
		||||
            trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
 | 
			
		||||
            if np.any(np.isnan(pos)):
 | 
			
		||||
                to_del.append(t)
 | 
			
		||||
            else:  
 | 
			
		||||
                trk_embs.append(self.trackers[t].get_emb())
 | 
			
		||||
        trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
 | 
			
		||||
 | 
			
		||||
        if len(trk_embs) > 0:
 | 
			
		||||
            trk_embs = np.vstack(trk_embs)
 | 
			
		||||
        else:
 | 
			
		||||
            trk_embs = np.array(trk_embs)
 | 
			
		||||
 | 
			
		||||
        for t in reversed(to_del):
 | 
			
		||||
            self.trackers.pop(t)
 | 
			
		||||
 | 
			
		||||
        velocities = np.array([trk.velocity if trk.velocity is not None else np.array((0, 0)) for trk in self.trackers])
 | 
			
		||||
        last_boxes = np.array([trk.last_observation for trk in self.trackers])
 | 
			
		||||
        k_observations = np.array([k_previous_obs(trk.observations, trk.age, self.delta_t) for trk in self.trackers])
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
            First round of association
 | 
			
		||||
        """
 | 
			
		||||
        # (M detections X N tracks, final score)
 | 
			
		||||
        if self.embedding_off or dets.shape[0] == 0 or trk_embs.shape[0] == 0:
 | 
			
		||||
            stage1_emb_cost = None
 | 
			
		||||
        else:
 | 
			
		||||
            stage1_emb_cost = dets_embs @ trk_embs.T
 | 
			
		||||
        matched, unmatched_dets, unmatched_trks = associate(
 | 
			
		||||
            dets,
 | 
			
		||||
            trks,
 | 
			
		||||
            self.iou_threshold,
 | 
			
		||||
            velocities,
 | 
			
		||||
            k_observations,
 | 
			
		||||
            self.inertia,
 | 
			
		||||
            stage1_emb_cost,
 | 
			
		||||
            self.w_association_emb,
 | 
			
		||||
            self.aw_off,
 | 
			
		||||
            self.aw_param,
 | 
			
		||||
        )
 | 
			
		||||
        for m in matched:
 | 
			
		||||
            self.trackers[m[1]].update(dets[m[0], :5], dets[m[0], 5])
 | 
			
		||||
            self.trackers[m[1]].update_emb(dets_embs[m[0]], alpha=dets_alpha[m[0]])
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
            Second round of associaton by OCR
 | 
			
		||||
        """
 | 
			
		||||
        if unmatched_dets.shape[0] > 0 and unmatched_trks.shape[0] > 0:
 | 
			
		||||
            left_dets = dets[unmatched_dets]
 | 
			
		||||
            left_dets_embs = dets_embs[unmatched_dets]
 | 
			
		||||
            left_trks = last_boxes[unmatched_trks]
 | 
			
		||||
            left_trks_embs = trk_embs[unmatched_trks]
 | 
			
		||||
 | 
			
		||||
            iou_left = self.asso_func(left_dets, left_trks)
 | 
			
		||||
            # TODO: is better without this
 | 
			
		||||
            emb_cost_left = left_dets_embs @ left_trks_embs.T
 | 
			
		||||
            if self.embedding_off:
 | 
			
		||||
                emb_cost_left = np.zeros_like(emb_cost_left)
 | 
			
		||||
            iou_left = np.array(iou_left)
 | 
			
		||||
            if iou_left.max() > self.iou_threshold:
 | 
			
		||||
                """
 | 
			
		||||
                NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may
 | 
			
		||||
                get a higher performance especially on MOT17/MOT20 datasets. But we keep it
 | 
			
		||||
                uniform here for simplicity
 | 
			
		||||
                """
 | 
			
		||||
                rematched_indices = linear_assignment(-iou_left)
 | 
			
		||||
                to_remove_det_indices = []
 | 
			
		||||
                to_remove_trk_indices = []
 | 
			
		||||
                for m in rematched_indices:
 | 
			
		||||
                    det_ind, trk_ind = unmatched_dets[m[0]], unmatched_trks[m[1]]
 | 
			
		||||
                    if iou_left[m[0], m[1]] < self.iou_threshold:
 | 
			
		||||
                        continue
 | 
			
		||||
                    self.trackers[trk_ind].update(dets[det_ind, :5], dets[det_ind, 5])
 | 
			
		||||
                    self.trackers[trk_ind].update_emb(dets_embs[det_ind], alpha=dets_alpha[det_ind])
 | 
			
		||||
                    to_remove_det_indices.append(det_ind)
 | 
			
		||||
                    to_remove_trk_indices.append(trk_ind)
 | 
			
		||||
                unmatched_dets = np.setdiff1d(unmatched_dets, np.array(to_remove_det_indices))
 | 
			
		||||
                unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices))
 | 
			
		||||
 | 
			
		||||
        for m in unmatched_trks:
 | 
			
		||||
            self.trackers[m].update(None, None)
 | 
			
		||||
 | 
			
		||||
        # create and initialise new trackers for unmatched detections
 | 
			
		||||
        for i in unmatched_dets:
 | 
			
		||||
            trk = KalmanBoxTracker(
 | 
			
		||||
                dets[i, :5], dets[i, 5], delta_t=self.delta_t, emb=dets_embs[i], alpha=dets_alpha[i], new_kf=not self.new_kf_off
 | 
			
		||||
            )
 | 
			
		||||
            self.trackers.append(trk)
 | 
			
		||||
        i = len(self.trackers)
 | 
			
		||||
        for trk in reversed(self.trackers):
 | 
			
		||||
            if trk.last_observation.sum() < 0:
 | 
			
		||||
                d = trk.get_state()[0]
 | 
			
		||||
            else:
 | 
			
		||||
                """
 | 
			
		||||
                this is optional to use the recent observation or the kalman filter prediction,
 | 
			
		||||
                we didn't notice significant difference here
 | 
			
		||||
                """
 | 
			
		||||
                d = trk.last_observation[:4]
 | 
			
		||||
            if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
 | 
			
		||||
                # +1 as MOT benchmark requires positive
 | 
			
		||||
                ret.append(np.concatenate((d, [trk.id + 1], [trk.cls], [trk.conf])).reshape(1, -1))
 | 
			
		||||
            i -= 1
 | 
			
		||||
            # remove dead tracklet
 | 
			
		||||
            if trk.time_since_update > self.max_age:
 | 
			
		||||
                self.trackers.pop(i)
 | 
			
		||||
        if len(ret) > 0:
 | 
			
		||||
            return np.concatenate(ret)
 | 
			
		||||
        return np.empty((0, 5))
 | 
			
		||||
    
 | 
			
		||||
    def _xywh_to_xyxy(self, bbox_xywh):
 | 
			
		||||
        x, y, w, h = bbox_xywh
 | 
			
		||||
        x1 = max(int(x - w / 2), 0)
 | 
			
		||||
        x2 = min(int(x + w / 2), self.width - 1)
 | 
			
		||||
        y1 = max(int(y - h / 2), 0)
 | 
			
		||||
        y2 = min(int(y + h / 2), self.height - 1)
 | 
			
		||||
        return x1, y1, x2, y2
 | 
			
		||||
    
 | 
			
		||||
    def _get_features(self, bbox_xywh, ori_img):
 | 
			
		||||
        im_crops = []
 | 
			
		||||
        for box in bbox_xywh:
 | 
			
		||||
            x1, y1, x2, y2 = self._xywh_to_xyxy(box)
 | 
			
		||||
            im = ori_img[y1:y2, x1:x2]
 | 
			
		||||
            im_crops.append(im)
 | 
			
		||||
        if im_crops:
 | 
			
		||||
            features = self.embedder(im_crops).cpu()
 | 
			
		||||
        else:
 | 
			
		||||
            features = np.array([])
 | 
			
		||||
        
 | 
			
		||||
        return features
 | 
			
		||||
 | 
			
		||||
    def update_public(self, dets, cates, scores):
 | 
			
		||||
        self.frame_count += 1
 | 
			
		||||
 | 
			
		||||
        det_scores = np.ones((dets.shape[0], 1))
 | 
			
		||||
        dets = np.concatenate((dets, det_scores), axis=1)
 | 
			
		||||
 | 
			
		||||
        remain_inds = scores > self.det_thresh
 | 
			
		||||
 | 
			
		||||
        cates = cates[remain_inds]
 | 
			
		||||
        dets = dets[remain_inds]
 | 
			
		||||
 | 
			
		||||
        trks = np.zeros((len(self.trackers), 5))
 | 
			
		||||
        to_del = []
 | 
			
		||||
        ret = []
 | 
			
		||||
        for t, trk in enumerate(trks):
 | 
			
		||||
            pos = self.trackers[t].predict()[0]
 | 
			
		||||
            cat = self.trackers[t].cate
 | 
			
		||||
            trk[:] = [pos[0], pos[1], pos[2], pos[3], cat]
 | 
			
		||||
            if np.any(np.isnan(pos)):
 | 
			
		||||
                to_del.append(t)
 | 
			
		||||
        trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
 | 
			
		||||
        for t in reversed(to_del):
 | 
			
		||||
            self.trackers.pop(t)
 | 
			
		||||
 | 
			
		||||
        velocities = np.array([trk.velocity if trk.velocity is not None else np.array((0, 0)) for trk in self.trackers])
 | 
			
		||||
        last_boxes = np.array([trk.last_observation for trk in self.trackers])
 | 
			
		||||
        k_observations = np.array([k_previous_obs(trk.observations, trk.age, self.delta_t) for trk in self.trackers])
 | 
			
		||||
 | 
			
		||||
        matched, unmatched_dets, unmatched_trks = associate_kitti(
 | 
			
		||||
            dets,
 | 
			
		||||
            trks,
 | 
			
		||||
            cates,
 | 
			
		||||
            self.iou_threshold,
 | 
			
		||||
            velocities,
 | 
			
		||||
            k_observations,
 | 
			
		||||
            self.inertia,
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        for m in matched:
 | 
			
		||||
            self.trackers[m[1]].update(dets[m[0], :])
 | 
			
		||||
 | 
			
		||||
        if unmatched_dets.shape[0] > 0 and unmatched_trks.shape[0] > 0:
 | 
			
		||||
            """
 | 
			
		||||
            The re-association stage by OCR.
 | 
			
		||||
            NOTE: at this stage, adding other strategy might be able to continue improve
 | 
			
		||||
            the performance, such as BYTE association by ByteTrack.
 | 
			
		||||
            """
 | 
			
		||||
            left_dets = dets[unmatched_dets]
 | 
			
		||||
            left_trks = last_boxes[unmatched_trks]
 | 
			
		||||
            left_dets_c = left_dets.copy()
 | 
			
		||||
            left_trks_c = left_trks.copy()
 | 
			
		||||
 | 
			
		||||
            iou_left = self.asso_func(left_dets_c, left_trks_c)
 | 
			
		||||
            iou_left = np.array(iou_left)
 | 
			
		||||
            det_cates_left = cates[unmatched_dets]
 | 
			
		||||
            trk_cates_left = trks[unmatched_trks][:, 4]
 | 
			
		||||
            num_dets = unmatched_dets.shape[0]
 | 
			
		||||
            num_trks = unmatched_trks.shape[0]
 | 
			
		||||
            cate_matrix = np.zeros((num_dets, num_trks))
 | 
			
		||||
            for i in range(num_dets):
 | 
			
		||||
                for j in range(num_trks):
 | 
			
		||||
                    if det_cates_left[i] != trk_cates_left[j]:
 | 
			
		||||
                        """
 | 
			
		||||
                        For some datasets, such as KITTI, there are different categories,
 | 
			
		||||
                        we have to avoid associate them together.
 | 
			
		||||
                        """
 | 
			
		||||
                        cate_matrix[i][j] = -1e6
 | 
			
		||||
            iou_left = iou_left + cate_matrix
 | 
			
		||||
            if iou_left.max() > self.iou_threshold - 0.1:
 | 
			
		||||
                rematched_indices = linear_assignment(-iou_left)
 | 
			
		||||
                to_remove_det_indices = []
 | 
			
		||||
                to_remove_trk_indices = []
 | 
			
		||||
                for m in rematched_indices:
 | 
			
		||||
                    det_ind, trk_ind = unmatched_dets[m[0]], unmatched_trks[m[1]]
 | 
			
		||||
                    if iou_left[m[0], m[1]] < self.iou_threshold - 0.1:
 | 
			
		||||
                        continue
 | 
			
		||||
                    self.trackers[trk_ind].update(dets[det_ind, :])
 | 
			
		||||
                    to_remove_det_indices.append(det_ind)
 | 
			
		||||
                    to_remove_trk_indices.append(trk_ind)
 | 
			
		||||
                unmatched_dets = np.setdiff1d(unmatched_dets, np.array(to_remove_det_indices))
 | 
			
		||||
                unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices))
 | 
			
		||||
 | 
			
		||||
        for i in unmatched_dets:
 | 
			
		||||
            trk = KalmanBoxTracker(dets[i, :])
 | 
			
		||||
            trk.cate = cates[i]
 | 
			
		||||
            self.trackers.append(trk)
 | 
			
		||||
        i = len(self.trackers)
 | 
			
		||||
 | 
			
		||||
        for trk in reversed(self.trackers):
 | 
			
		||||
            if trk.last_observation.sum() > 0:
 | 
			
		||||
                d = trk.last_observation[:4]
 | 
			
		||||
            else:
 | 
			
		||||
                d = trk.get_state()[0]
 | 
			
		||||
            if trk.time_since_update < 1:
 | 
			
		||||
                if (self.frame_count <= self.min_hits) or (trk.hit_streak >= self.min_hits):
 | 
			
		||||
                    # id+1 as MOT benchmark requires positive
 | 
			
		||||
                    ret.append(np.concatenate((d, [trk.id + 1], [trk.cls], [trk.conf])).reshape(1, -1))
 | 
			
		||||
                if trk.hit_streak == self.min_hits:
 | 
			
		||||
                    # Head Padding (HP): recover the lost steps during initializing the track
 | 
			
		||||
                    for prev_i in range(self.min_hits - 1):
 | 
			
		||||
                        prev_observation = trk.history_observations[-(prev_i + 2)]
 | 
			
		||||
                        ret.append(
 | 
			
		||||
                            (
 | 
			
		||||
                                np.concatenate(
 | 
			
		||||
                                    (
 | 
			
		||||
                                        prev_observation[:4],
 | 
			
		||||
                                        [trk.id + 1],
 | 
			
		||||
                                        [trk.cls],
 | 
			
		||||
                                        [trk.conf],
 | 
			
		||||
                                    )
 | 
			
		||||
                                )
 | 
			
		||||
                            ).reshape(1, -1)
 | 
			
		||||
                        )
 | 
			
		||||
            i -= 1
 | 
			
		||||
            if trk.time_since_update > self.max_age:
 | 
			
		||||
                self.trackers.pop(i)
 | 
			
		||||
 | 
			
		||||
        if len(ret) > 0:
 | 
			
		||||
            return np.concatenate(ret)
 | 
			
		||||
        return np.empty((0, 7))
 | 
			
		||||
 | 
			
		||||
    def dump_cache(self):
 | 
			
		||||
        self.cmc.dump_cache()
 | 
			
		||||
        self.embedder.dump_cache()
 | 
			
		||||
							
								
								
									
										237
									
								
								feeder/trackers/deepocsort/reid_multibackend.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										237
									
								
								feeder/trackers/deepocsort/reid_multibackend.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,237 @@
 | 
			
		|||
import torch.nn as nn
 | 
			
		||||
import torch
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
import numpy as np
 | 
			
		||||
from itertools import islice
 | 
			
		||||
import torchvision.transforms as transforms
 | 
			
		||||
import cv2
 | 
			
		||||
import sys
 | 
			
		||||
import torchvision.transforms as T
 | 
			
		||||
from collections import OrderedDict, namedtuple
 | 
			
		||||
import gdown
 | 
			
		||||
from os.path import exists as file_exists
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
from yolov8.ultralytics.yolo.utils.checks import check_requirements, check_version
 | 
			
		||||
from yolov8.ultralytics.yolo.utils import LOGGER
 | 
			
		||||
from trackers.strongsort.deep.reid_model_factory import (show_downloadeable_models, get_model_url, get_model_name,
 | 
			
		||||
                                                          download_url, load_pretrained_weights)
 | 
			
		||||
from trackers.strongsort.deep.models import build_model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
 | 
			
		||||
    # Check file(s) for acceptable suffix
 | 
			
		||||
    if file and suffix:
 | 
			
		||||
        if isinstance(suffix, str):
 | 
			
		||||
            suffix = [suffix]
 | 
			
		||||
        for f in file if isinstance(file, (list, tuple)) else [file]:
 | 
			
		||||
            s = Path(f).suffix.lower()  # file suffix
 | 
			
		||||
            if len(s):
 | 
			
		||||
                assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ReIDDetectMultiBackend(nn.Module):
 | 
			
		||||
    # ReID models MultiBackend class for python inference on various backends
 | 
			
		||||
    def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'), fp16=False):
 | 
			
		||||
        super().__init__()
 | 
			
		||||
 | 
			
		||||
        w = weights[0] if isinstance(weights, list) else weights
 | 
			
		||||
        self.pt, self.jit, self.onnx, self.xml, self.engine, self.tflite = self.model_type(w)  # get backend
 | 
			
		||||
        self.fp16 = fp16
 | 
			
		||||
        self.fp16 &= self.pt or self.jit or self.engine  # FP16
 | 
			
		||||
 | 
			
		||||
        # Build transform functions
 | 
			
		||||
        self.device = device
 | 
			
		||||
        self.image_size=(256, 128)
 | 
			
		||||
        self.pixel_mean=[0.485, 0.456, 0.406]
 | 
			
		||||
        self.pixel_std=[0.229, 0.224, 0.225]
 | 
			
		||||
        self.transforms = []
 | 
			
		||||
        self.transforms += [T.Resize(self.image_size)]
 | 
			
		||||
        self.transforms += [T.ToTensor()]
 | 
			
		||||
        self.transforms += [T.Normalize(mean=self.pixel_mean, std=self.pixel_std)]
 | 
			
		||||
        self.preprocess = T.Compose(self.transforms)
 | 
			
		||||
        self.to_pil = T.ToPILImage()
 | 
			
		||||
 | 
			
		||||
        model_name = get_model_name(w)
 | 
			
		||||
 | 
			
		||||
        if w.suffix == '.pt':
 | 
			
		||||
            model_url = get_model_url(w)
 | 
			
		||||
            if not file_exists(w) and model_url is not None:
 | 
			
		||||
                gdown.download(model_url, str(w), quiet=False)
 | 
			
		||||
            elif file_exists(w):
 | 
			
		||||
                pass
 | 
			
		||||
            else:
 | 
			
		||||
                print(f'No URL associated to the chosen StrongSORT weights ({w}). Choose between:')
 | 
			
		||||
                show_downloadeable_models()
 | 
			
		||||
                exit()
 | 
			
		||||
 | 
			
		||||
        # Build model
 | 
			
		||||
        self.model = build_model(
 | 
			
		||||
            model_name,
 | 
			
		||||
            num_classes=1,
 | 
			
		||||
            pretrained=not (w and w.is_file()),
 | 
			
		||||
            use_gpu=device
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        if self.pt:  # PyTorch
 | 
			
		||||
            # populate model arch with weights
 | 
			
		||||
            if w and w.is_file() and w.suffix == '.pt':
 | 
			
		||||
                load_pretrained_weights(self.model, w)
 | 
			
		||||
                
 | 
			
		||||
            self.model.to(device).eval()
 | 
			
		||||
            self.model.half() if self.fp16 else  self.model.float()
 | 
			
		||||
        elif self.jit:
 | 
			
		||||
            LOGGER.info(f'Loading {w} for TorchScript inference...')
 | 
			
		||||
            self.model = torch.jit.load(w)
 | 
			
		||||
            self.model.half() if self.fp16 else self.model.float()
 | 
			
		||||
        elif self.onnx:  # ONNX Runtime
 | 
			
		||||
            LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
 | 
			
		||||
            cuda = torch.cuda.is_available() and device.type != 'cpu'
 | 
			
		||||
            #check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
 | 
			
		||||
            import onnxruntime
 | 
			
		||||
            providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
 | 
			
		||||
            self.session = onnxruntime.InferenceSession(str(w), providers=providers)
 | 
			
		||||
        elif self.engine:  # TensorRT
 | 
			
		||||
            LOGGER.info(f'Loading {w} for TensorRT inference...')
 | 
			
		||||
            import tensorrt as trt  # https://developer.nvidia.com/nvidia-tensorrt-download
 | 
			
		||||
            check_version(trt.__version__, '7.0.0', hard=True)  # require tensorrt>=7.0.0
 | 
			
		||||
            if device.type == 'cpu':
 | 
			
		||||
                device = torch.device('cuda:0')
 | 
			
		||||
            Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
 | 
			
		||||
            logger = trt.Logger(trt.Logger.INFO)
 | 
			
		||||
            with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
 | 
			
		||||
                self.model_ = runtime.deserialize_cuda_engine(f.read())
 | 
			
		||||
            self.context = self.model_.create_execution_context()
 | 
			
		||||
            self.bindings = OrderedDict()
 | 
			
		||||
            self.fp16 = False  # default updated below
 | 
			
		||||
            dynamic = False
 | 
			
		||||
            for index in range(self.model_.num_bindings):
 | 
			
		||||
                name = self.model_.get_binding_name(index)
 | 
			
		||||
                dtype = trt.nptype(self.model_.get_binding_dtype(index))
 | 
			
		||||
                if self.model_.binding_is_input(index):
 | 
			
		||||
                    if -1 in tuple(self.model_.get_binding_shape(index)):  # dynamic
 | 
			
		||||
                        dynamic = True
 | 
			
		||||
                        self.context.set_binding_shape(index, tuple(self.model_.get_profile_shape(0, index)[2]))
 | 
			
		||||
                    if dtype == np.float16:
 | 
			
		||||
                        self.fp16 = True
 | 
			
		||||
                shape = tuple(self.context.get_binding_shape(index))
 | 
			
		||||
                im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
 | 
			
		||||
                self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
 | 
			
		||||
            self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items())
 | 
			
		||||
            batch_size = self.bindings['images'].shape[0]  # if dynamic, this is instead max batch size
 | 
			
		||||
        elif self.xml:  # OpenVINO
 | 
			
		||||
            LOGGER.info(f'Loading {w} for OpenVINO inference...')
 | 
			
		||||
            check_requirements(('openvino',))  # requires openvino-dev: https://pypi.org/project/openvino-dev/
 | 
			
		||||
            from openvino.runtime import Core, Layout, get_batch
 | 
			
		||||
            ie = Core()
 | 
			
		||||
            if not Path(w).is_file():  # if not *.xml
 | 
			
		||||
                w = next(Path(w).glob('*.xml'))  # get *.xml file from *_openvino_model dir
 | 
			
		||||
            network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
 | 
			
		||||
            if network.get_parameters()[0].get_layout().empty:
 | 
			
		||||
                network.get_parameters()[0].set_layout(Layout("NCWH"))
 | 
			
		||||
            batch_dim = get_batch(network)
 | 
			
		||||
            if batch_dim.is_static:
 | 
			
		||||
                batch_size = batch_dim.get_length()
 | 
			
		||||
            self.executable_network = ie.compile_model(network, device_name="CPU")  # device_name="MYRIAD" for Intel NCS2
 | 
			
		||||
            self.output_layer = next(iter(self.executable_network.outputs))
 | 
			
		||||
        
 | 
			
		||||
        elif self.tflite:
 | 
			
		||||
            LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
 | 
			
		||||
            try:  # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
 | 
			
		||||
                from tflite_runtime.interpreter import Interpreter, load_delegate
 | 
			
		||||
            except ImportError:
 | 
			
		||||
                import tensorflow as tf
 | 
			
		||||
                Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
 | 
			
		||||
            self.interpreter = tf.lite.Interpreter(model_path=w)
 | 
			
		||||
            self.interpreter.allocate_tensors()
 | 
			
		||||
            # Get input and output tensors.
 | 
			
		||||
            self.input_details = self.interpreter.get_input_details()
 | 
			
		||||
            self.output_details = self.interpreter.get_output_details()
 | 
			
		||||
            
 | 
			
		||||
            # Test model on random input data.
 | 
			
		||||
            input_data = np.array(np.random.random_sample((1,256,128,3)), dtype=np.float32)
 | 
			
		||||
            self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
 | 
			
		||||
            
 | 
			
		||||
            self.interpreter.invoke()
 | 
			
		||||
 | 
			
		||||
            # The function `get_tensor()` returns a copy of the tensor data.
 | 
			
		||||
            output_data = self.interpreter.get_tensor(self.output_details[0]['index'])
 | 
			
		||||
        else:
 | 
			
		||||
            print('This model framework is not supported yet!')
 | 
			
		||||
            exit()
 | 
			
		||||
        
 | 
			
		||||
        
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def model_type(p='path/to/model.pt'):
 | 
			
		||||
        # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
 | 
			
		||||
        from trackers.reid_export import export_formats
 | 
			
		||||
        sf = list(export_formats().Suffix)  # export suffixes
 | 
			
		||||
        check_suffix(p, sf)  # checks
 | 
			
		||||
        types = [s in Path(p).name for s in sf]
 | 
			
		||||
        return types
 | 
			
		||||
 | 
			
		||||
    def _preprocess(self, im_batch):
 | 
			
		||||
 | 
			
		||||
        images = []
 | 
			
		||||
        for element in im_batch:
 | 
			
		||||
            image = self.to_pil(element)
 | 
			
		||||
            image = self.preprocess(image)
 | 
			
		||||
            images.append(image)
 | 
			
		||||
 | 
			
		||||
        images = torch.stack(images, dim=0)
 | 
			
		||||
        images = images.to(self.device)
 | 
			
		||||
 | 
			
		||||
        return images
 | 
			
		||||
    
 | 
			
		||||
    
 | 
			
		||||
    def forward(self, im_batch):
 | 
			
		||||
        
 | 
			
		||||
        # preprocess batch
 | 
			
		||||
        im_batch = self._preprocess(im_batch)
 | 
			
		||||
 | 
			
		||||
        # batch to half
 | 
			
		||||
        if self.fp16 and im_batch.dtype != torch.float16:
 | 
			
		||||
           im_batch = im_batch.half()
 | 
			
		||||
 | 
			
		||||
        # batch processing
 | 
			
		||||
        features = []
 | 
			
		||||
        if self.pt:
 | 
			
		||||
            features = self.model(im_batch)
 | 
			
		||||
        elif self.jit:  # TorchScript
 | 
			
		||||
            features = self.model(im_batch)
 | 
			
		||||
        elif self.onnx:  # ONNX Runtime
 | 
			
		||||
            im_batch = im_batch.cpu().numpy()  # torch to numpy
 | 
			
		||||
            features = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im_batch})[0]
 | 
			
		||||
        elif self.engine:  # TensorRT
 | 
			
		||||
            if True and im_batch.shape != self.bindings['images'].shape:
 | 
			
		||||
                i_in, i_out = (self.model_.get_binding_index(x) for x in ('images', 'output'))
 | 
			
		||||
                self.context.set_binding_shape(i_in, im_batch.shape)  # reshape if dynamic
 | 
			
		||||
                self.bindings['images'] = self.bindings['images']._replace(shape=im_batch.shape)
 | 
			
		||||
                self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out)))
 | 
			
		||||
            s = self.bindings['images'].shape
 | 
			
		||||
            assert im_batch.shape == s, f"input size {im_batch.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
 | 
			
		||||
            self.binding_addrs['images'] = int(im_batch.data_ptr())
 | 
			
		||||
            self.context.execute_v2(list(self.binding_addrs.values()))
 | 
			
		||||
            features = self.bindings['output'].data
 | 
			
		||||
        elif self.xml:  # OpenVINO
 | 
			
		||||
            im_batch = im_batch.cpu().numpy()  # FP32
 | 
			
		||||
            features = self.executable_network([im_batch])[self.output_layer]
 | 
			
		||||
        else:
 | 
			
		||||
            print('Framework not supported at the moment, we are working on it...')
 | 
			
		||||
            exit()
 | 
			
		||||
 | 
			
		||||
        if isinstance(features, (list, tuple)):
 | 
			
		||||
            return self.from_numpy(features[0]) if len(features) == 1 else [self.from_numpy(x) for x in features]
 | 
			
		||||
        else:
 | 
			
		||||
            return self.from_numpy(features)
 | 
			
		||||
 | 
			
		||||
    def from_numpy(self, x):
 | 
			
		||||
        return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
 | 
			
		||||
 | 
			
		||||
    def warmup(self, imgsz=[(256, 128, 3)]):
 | 
			
		||||
        # Warmup model by running inference once
 | 
			
		||||
        warmup_types = self.pt, self.jit, self.onnx, self.engine, self.tflite
 | 
			
		||||
        if any(warmup_types) and self.device.type != 'cpu':
 | 
			
		||||
            im = [np.empty(*imgsz).astype(np.uint8)]  # input
 | 
			
		||||
            for _ in range(2 if self.jit else 1):  #
 | 
			
		||||
                self.forward(im)  # warmup
 | 
			
		||||
							
								
								
									
										84
									
								
								feeder/trackers/multi_tracker_zoo.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										84
									
								
								feeder/trackers/multi_tracker_zoo.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,84 @@
 | 
			
		|||
from trackers.strongsort.utils.parser import get_config
 | 
			
		||||
 | 
			
		||||
def create_tracker(tracker_type, tracker_config, reid_weights, device, half):
 | 
			
		||||
    
 | 
			
		||||
    cfg = get_config()
 | 
			
		||||
    cfg.merge_from_file(tracker_config)
 | 
			
		||||
    
 | 
			
		||||
    if tracker_type == 'strongsort':
 | 
			
		||||
        from trackers.strongsort.strong_sort import StrongSORT
 | 
			
		||||
        strongsort = StrongSORT(
 | 
			
		||||
            reid_weights,
 | 
			
		||||
            device,
 | 
			
		||||
            half,
 | 
			
		||||
            max_dist=cfg.strongsort.max_dist,
 | 
			
		||||
            max_iou_dist=cfg.strongsort.max_iou_dist,
 | 
			
		||||
            max_age=cfg.strongsort.max_age,
 | 
			
		||||
            max_unmatched_preds=cfg.strongsort.max_unmatched_preds,
 | 
			
		||||
            n_init=cfg.strongsort.n_init,
 | 
			
		||||
            nn_budget=cfg.strongsort.nn_budget,
 | 
			
		||||
            mc_lambda=cfg.strongsort.mc_lambda,
 | 
			
		||||
            ema_alpha=cfg.strongsort.ema_alpha,
 | 
			
		||||
 | 
			
		||||
        )
 | 
			
		||||
        return strongsort
 | 
			
		||||
    
 | 
			
		||||
    elif tracker_type == 'ocsort':
 | 
			
		||||
        from trackers.ocsort.ocsort import OCSort
 | 
			
		||||
        ocsort = OCSort(
 | 
			
		||||
            det_thresh=cfg.ocsort.det_thresh,
 | 
			
		||||
            max_age=cfg.ocsort.max_age,
 | 
			
		||||
            min_hits=cfg.ocsort.min_hits,
 | 
			
		||||
            iou_threshold=cfg.ocsort.iou_thresh,
 | 
			
		||||
            delta_t=cfg.ocsort.delta_t,
 | 
			
		||||
            asso_func=cfg.ocsort.asso_func,
 | 
			
		||||
            inertia=cfg.ocsort.inertia,
 | 
			
		||||
            use_byte=cfg.ocsort.use_byte,
 | 
			
		||||
        )
 | 
			
		||||
        return ocsort
 | 
			
		||||
    
 | 
			
		||||
    elif tracker_type == 'bytetrack':
 | 
			
		||||
        from trackers.bytetrack.byte_tracker import BYTETracker
 | 
			
		||||
        bytetracker = BYTETracker(
 | 
			
		||||
            track_thresh=cfg.bytetrack.track_thresh,
 | 
			
		||||
            match_thresh=cfg.bytetrack.match_thresh,
 | 
			
		||||
            track_buffer=cfg.bytetrack.track_buffer,
 | 
			
		||||
            frame_rate=cfg.bytetrack.frame_rate
 | 
			
		||||
        )
 | 
			
		||||
        return bytetracker
 | 
			
		||||
    
 | 
			
		||||
    elif tracker_type == 'botsort':
 | 
			
		||||
        from trackers.botsort.bot_sort import BoTSORT
 | 
			
		||||
        botsort = BoTSORT(
 | 
			
		||||
            reid_weights,
 | 
			
		||||
            device,
 | 
			
		||||
            half,
 | 
			
		||||
            track_high_thresh=cfg.botsort.track_high_thresh,
 | 
			
		||||
            new_track_thresh=cfg.botsort.new_track_thresh,
 | 
			
		||||
            track_buffer =cfg.botsort.track_buffer,
 | 
			
		||||
            match_thresh=cfg.botsort.match_thresh,
 | 
			
		||||
            proximity_thresh=cfg.botsort.proximity_thresh,
 | 
			
		||||
            appearance_thresh=cfg.botsort.appearance_thresh,
 | 
			
		||||
            cmc_method =cfg.botsort.cmc_method,
 | 
			
		||||
            frame_rate=cfg.botsort.frame_rate,
 | 
			
		||||
            lambda_=cfg.botsort.lambda_
 | 
			
		||||
        )
 | 
			
		||||
        return botsort
 | 
			
		||||
    elif tracker_type == 'deepocsort':
 | 
			
		||||
        from trackers.deepocsort.ocsort import OCSort
 | 
			
		||||
        botsort = OCSort(
 | 
			
		||||
            reid_weights,
 | 
			
		||||
            device,
 | 
			
		||||
            half,
 | 
			
		||||
            det_thresh=cfg.deepocsort.det_thresh,
 | 
			
		||||
            max_age=cfg.deepocsort.max_age,
 | 
			
		||||
            min_hits=cfg.deepocsort.min_hits,
 | 
			
		||||
            iou_threshold=cfg.deepocsort.iou_thresh,
 | 
			
		||||
            delta_t=cfg.deepocsort.delta_t,
 | 
			
		||||
            asso_func=cfg.deepocsort.asso_func,
 | 
			
		||||
            inertia=cfg.deepocsort.inertia,
 | 
			
		||||
        )
 | 
			
		||||
        return botsort
 | 
			
		||||
    else:
 | 
			
		||||
        print('No such tracker')
 | 
			
		||||
        exit()
 | 
			
		||||
							
								
								
									
										377
									
								
								feeder/trackers/ocsort/association.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										377
									
								
								feeder/trackers/ocsort/association.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,377 @@
 | 
			
		|||
import os
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def iou_batch(bboxes1, bboxes2):
 | 
			
		||||
    """
 | 
			
		||||
    From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2]
 | 
			
		||||
    """
 | 
			
		||||
    bboxes2 = np.expand_dims(bboxes2, 0)
 | 
			
		||||
    bboxes1 = np.expand_dims(bboxes1, 1)
 | 
			
		||||
    
 | 
			
		||||
    xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
    w = np.maximum(0., xx2 - xx1)
 | 
			
		||||
    h = np.maximum(0., yy2 - yy1)
 | 
			
		||||
    wh = w * h
 | 
			
		||||
    o = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])                                      
 | 
			
		||||
        + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh)                                              
 | 
			
		||||
    return(o)  
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def giou_batch(bboxes1, bboxes2):
 | 
			
		||||
    """
 | 
			
		||||
    :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :return:
 | 
			
		||||
    """
 | 
			
		||||
    # for details should go to https://arxiv.org/pdf/1902.09630.pdf
 | 
			
		||||
    # ensure predict's bbox form
 | 
			
		||||
    bboxes2 = np.expand_dims(bboxes2, 0)
 | 
			
		||||
    bboxes1 = np.expand_dims(bboxes1, 1)
 | 
			
		||||
 | 
			
		||||
    xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
    w = np.maximum(0., xx2 - xx1)
 | 
			
		||||
    h = np.maximum(0., yy2 - yy1)
 | 
			
		||||
    wh = w * h
 | 
			
		||||
    iou = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])                                      
 | 
			
		||||
        + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh)  
 | 
			
		||||
 | 
			
		||||
    xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
    wc = xxc2 - xxc1 
 | 
			
		||||
    hc = yyc2 - yyc1 
 | 
			
		||||
    assert((wc > 0).all() and (hc > 0).all())
 | 
			
		||||
    area_enclose = wc * hc 
 | 
			
		||||
    giou = iou - (area_enclose - wh) / area_enclose
 | 
			
		||||
    giou = (giou + 1.)/2.0 # resize from (-1,1) to (0,1)
 | 
			
		||||
    return giou
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def diou_batch(bboxes1, bboxes2):
 | 
			
		||||
    """
 | 
			
		||||
    :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :return:
 | 
			
		||||
    """
 | 
			
		||||
    # for details should go to https://arxiv.org/pdf/1902.09630.pdf
 | 
			
		||||
    # ensure predict's bbox form
 | 
			
		||||
    bboxes2 = np.expand_dims(bboxes2, 0)
 | 
			
		||||
    bboxes1 = np.expand_dims(bboxes1, 1)
 | 
			
		||||
 | 
			
		||||
    # calculate the intersection box
 | 
			
		||||
    xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
    w = np.maximum(0., xx2 - xx1)
 | 
			
		||||
    h = np.maximum(0., yy2 - yy1)
 | 
			
		||||
    wh = w * h
 | 
			
		||||
    iou = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])                                      
 | 
			
		||||
        + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh) 
 | 
			
		||||
 | 
			
		||||
    centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0
 | 
			
		||||
    centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0
 | 
			
		||||
    centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0
 | 
			
		||||
    centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0
 | 
			
		||||
 | 
			
		||||
    inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2
 | 
			
		||||
 | 
			
		||||
    xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
 | 
			
		||||
    outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2
 | 
			
		||||
    diou = iou - inner_diag / outer_diag
 | 
			
		||||
 | 
			
		||||
    return (diou + 1) / 2.0 # resize from (-1,1) to (0,1)
 | 
			
		||||
 | 
			
		||||
def ciou_batch(bboxes1, bboxes2):
 | 
			
		||||
    """
 | 
			
		||||
    :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2)
 | 
			
		||||
    :return:
 | 
			
		||||
    """
 | 
			
		||||
    # for details should go to https://arxiv.org/pdf/1902.09630.pdf
 | 
			
		||||
    # ensure predict's bbox form
 | 
			
		||||
    bboxes2 = np.expand_dims(bboxes2, 0)
 | 
			
		||||
    bboxes1 = np.expand_dims(bboxes1, 1)
 | 
			
		||||
 | 
			
		||||
    # calculate the intersection box
 | 
			
		||||
    xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
    w = np.maximum(0., xx2 - xx1)
 | 
			
		||||
    h = np.maximum(0., yy2 - yy1)
 | 
			
		||||
    wh = w * h
 | 
			
		||||
    iou = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])                                      
 | 
			
		||||
        + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh) 
 | 
			
		||||
 | 
			
		||||
    centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0
 | 
			
		||||
    centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0
 | 
			
		||||
    centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0
 | 
			
		||||
    centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0
 | 
			
		||||
 | 
			
		||||
    inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2
 | 
			
		||||
 | 
			
		||||
    xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0])
 | 
			
		||||
    yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1])
 | 
			
		||||
    xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2])
 | 
			
		||||
    yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3])
 | 
			
		||||
 | 
			
		||||
    outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2
 | 
			
		||||
    
 | 
			
		||||
    w1 = bboxes1[..., 2] - bboxes1[..., 0]
 | 
			
		||||
    h1 = bboxes1[..., 3] - bboxes1[..., 1]
 | 
			
		||||
    w2 = bboxes2[..., 2] - bboxes2[..., 0]
 | 
			
		||||
    h2 = bboxes2[..., 3] - bboxes2[..., 1]
 | 
			
		||||
 | 
			
		||||
    # prevent dividing over zero. add one pixel shift
 | 
			
		||||
    h2 = h2 + 1.
 | 
			
		||||
    h1 = h1 + 1.
 | 
			
		||||
    arctan = np.arctan(w2/h2) - np.arctan(w1/h1)
 | 
			
		||||
    v = (4 / (np.pi ** 2)) * (arctan ** 2)
 | 
			
		||||
    S = 1 - iou 
 | 
			
		||||
    alpha = v / (S+v)
 | 
			
		||||
    ciou = iou - inner_diag / outer_diag - alpha * v
 | 
			
		||||
    
 | 
			
		||||
    return (ciou + 1) / 2.0 # resize from (-1,1) to (0,1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ct_dist(bboxes1, bboxes2):
 | 
			
		||||
    """
 | 
			
		||||
        Measure the center distance between two sets of bounding boxes,
 | 
			
		||||
        this is a coarse implementation, we don't recommend using it only
 | 
			
		||||
        for association, which can be unstable and sensitive to frame rate
 | 
			
		||||
        and object speed.
 | 
			
		||||
    """
 | 
			
		||||
    bboxes2 = np.expand_dims(bboxes2, 0)
 | 
			
		||||
    bboxes1 = np.expand_dims(bboxes1, 1)
 | 
			
		||||
 | 
			
		||||
    centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0
 | 
			
		||||
    centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0
 | 
			
		||||
    centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0
 | 
			
		||||
    centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0
 | 
			
		||||
 | 
			
		||||
    ct_dist2 = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2
 | 
			
		||||
 | 
			
		||||
    ct_dist = np.sqrt(ct_dist2)
 | 
			
		||||
 | 
			
		||||
    # The linear rescaling is a naive version and needs more study
 | 
			
		||||
    ct_dist = ct_dist / ct_dist.max()
 | 
			
		||||
    return ct_dist.max() - ct_dist # resize to (0,1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def speed_direction_batch(dets, tracks):
 | 
			
		||||
    tracks = tracks[..., np.newaxis]
 | 
			
		||||
    CX1, CY1 = (dets[:,0] + dets[:,2])/2.0, (dets[:,1]+dets[:,3])/2.0
 | 
			
		||||
    CX2, CY2 = (tracks[:,0] + tracks[:,2]) /2.0, (tracks[:,1]+tracks[:,3])/2.0
 | 
			
		||||
    dx = CX1 - CX2 
 | 
			
		||||
    dy = CY1 - CY2 
 | 
			
		||||
    norm = np.sqrt(dx**2 + dy**2) + 1e-6
 | 
			
		||||
    dx = dx / norm 
 | 
			
		||||
    dy = dy / norm
 | 
			
		||||
    return dy, dx # size: num_track x num_det
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def linear_assignment(cost_matrix):
 | 
			
		||||
    try:
 | 
			
		||||
        import lap
 | 
			
		||||
        _, x, y = lap.lapjv(cost_matrix, extend_cost=True)
 | 
			
		||||
        return np.array([[y[i],i] for i in x if i >= 0]) #
 | 
			
		||||
    except ImportError:
 | 
			
		||||
        from scipy.optimize import linear_sum_assignment
 | 
			
		||||
        x, y = linear_sum_assignment(cost_matrix)
 | 
			
		||||
        return np.array(list(zip(x, y)))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def associate_detections_to_trackers(detections,trackers, iou_threshold = 0.3):
 | 
			
		||||
    """
 | 
			
		||||
    Assigns detections to tracked object (both represented as bounding boxes)
 | 
			
		||||
    Returns 3 lists of matches, unmatched_detections and unmatched_trackers
 | 
			
		||||
    """
 | 
			
		||||
    if(len(trackers)==0):
 | 
			
		||||
        return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
 | 
			
		||||
 | 
			
		||||
    iou_matrix = iou_batch(detections, trackers)
 | 
			
		||||
 | 
			
		||||
    if min(iou_matrix.shape) > 0:
 | 
			
		||||
        a = (iou_matrix > iou_threshold).astype(np.int32)
 | 
			
		||||
        if a.sum(1).max() == 1 and a.sum(0).max() == 1:
 | 
			
		||||
            matched_indices = np.stack(np.where(a), axis=1)
 | 
			
		||||
        else:
 | 
			
		||||
            matched_indices = linear_assignment(-iou_matrix)
 | 
			
		||||
    else:
 | 
			
		||||
        matched_indices = np.empty(shape=(0,2))
 | 
			
		||||
 | 
			
		||||
    unmatched_detections = []
 | 
			
		||||
    for d, det in enumerate(detections):
 | 
			
		||||
        if(d not in matched_indices[:,0]):
 | 
			
		||||
            unmatched_detections.append(d)
 | 
			
		||||
    unmatched_trackers = []
 | 
			
		||||
    for t, trk in enumerate(trackers):
 | 
			
		||||
        if(t not in matched_indices[:,1]):
 | 
			
		||||
            unmatched_trackers.append(t)
 | 
			
		||||
 | 
			
		||||
    #filter out matched with low IOU
 | 
			
		||||
    matches = []
 | 
			
		||||
    for m in matched_indices:
 | 
			
		||||
        if(iou_matrix[m[0], m[1]]<iou_threshold):
 | 
			
		||||
            unmatched_detections.append(m[0])
 | 
			
		||||
            unmatched_trackers.append(m[1])
 | 
			
		||||
        else:
 | 
			
		||||
            matches.append(m.reshape(1,2))
 | 
			
		||||
    if(len(matches)==0):
 | 
			
		||||
        matches = np.empty((0,2),dtype=int)
 | 
			
		||||
    else:
 | 
			
		||||
        matches = np.concatenate(matches,axis=0)
 | 
			
		||||
 | 
			
		||||
    return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def associate(detections, trackers, iou_threshold, velocities, previous_obs, vdc_weight):    
 | 
			
		||||
    if(len(trackers)==0):
 | 
			
		||||
        return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
 | 
			
		||||
 | 
			
		||||
    Y, X = speed_direction_batch(detections, previous_obs)
 | 
			
		||||
    inertia_Y, inertia_X = velocities[:,0], velocities[:,1]
 | 
			
		||||
    inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1)
 | 
			
		||||
    inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1)
 | 
			
		||||
    diff_angle_cos = inertia_X * X + inertia_Y * Y
 | 
			
		||||
    diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1)
 | 
			
		||||
    diff_angle = np.arccos(diff_angle_cos)
 | 
			
		||||
    diff_angle = (np.pi /2.0 - np.abs(diff_angle)) / np.pi
 | 
			
		||||
 | 
			
		||||
    valid_mask = np.ones(previous_obs.shape[0])
 | 
			
		||||
    valid_mask[np.where(previous_obs[:,4]<0)] = 0
 | 
			
		||||
    
 | 
			
		||||
    iou_matrix = iou_batch(detections, trackers)
 | 
			
		||||
    scores = np.repeat(detections[:,-1][:, np.newaxis], trackers.shape[0], axis=1)
 | 
			
		||||
    # iou_matrix = iou_matrix * scores # a trick sometiems works, we don't encourage this
 | 
			
		||||
    valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1)
 | 
			
		||||
 | 
			
		||||
    angle_diff_cost = (valid_mask * diff_angle) * vdc_weight
 | 
			
		||||
    angle_diff_cost = angle_diff_cost.T
 | 
			
		||||
    angle_diff_cost = angle_diff_cost * scores
 | 
			
		||||
 | 
			
		||||
    if min(iou_matrix.shape) > 0:
 | 
			
		||||
        a = (iou_matrix > iou_threshold).astype(np.int32)
 | 
			
		||||
        if a.sum(1).max() == 1 and a.sum(0).max() == 1:
 | 
			
		||||
            matched_indices = np.stack(np.where(a), axis=1)
 | 
			
		||||
        else:
 | 
			
		||||
            matched_indices = linear_assignment(-(iou_matrix+angle_diff_cost))
 | 
			
		||||
    else:
 | 
			
		||||
        matched_indices = np.empty(shape=(0,2))
 | 
			
		||||
 | 
			
		||||
    unmatched_detections = []
 | 
			
		||||
    for d, det in enumerate(detections):
 | 
			
		||||
        if(d not in matched_indices[:,0]):
 | 
			
		||||
            unmatched_detections.append(d)
 | 
			
		||||
    unmatched_trackers = []
 | 
			
		||||
    for t, trk in enumerate(trackers):
 | 
			
		||||
        if(t not in matched_indices[:,1]):
 | 
			
		||||
            unmatched_trackers.append(t)
 | 
			
		||||
 | 
			
		||||
    # filter out matched with low IOU
 | 
			
		||||
    matches = []
 | 
			
		||||
    for m in matched_indices:
 | 
			
		||||
        if(iou_matrix[m[0], m[1]]<iou_threshold):
 | 
			
		||||
            unmatched_detections.append(m[0])
 | 
			
		||||
            unmatched_trackers.append(m[1])
 | 
			
		||||
        else:
 | 
			
		||||
            matches.append(m.reshape(1,2))
 | 
			
		||||
    if(len(matches)==0):
 | 
			
		||||
        matches = np.empty((0,2),dtype=int)
 | 
			
		||||
    else:
 | 
			
		||||
        matches = np.concatenate(matches,axis=0)
 | 
			
		||||
 | 
			
		||||
    return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def associate_kitti(detections, trackers, det_cates, iou_threshold, 
 | 
			
		||||
        velocities, previous_obs, vdc_weight):
 | 
			
		||||
    if(len(trackers)==0):
 | 
			
		||||
        return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
        Cost from the velocity direction consistency
 | 
			
		||||
    """
 | 
			
		||||
    Y, X = speed_direction_batch(detections, previous_obs)
 | 
			
		||||
    inertia_Y, inertia_X = velocities[:,0], velocities[:,1]
 | 
			
		||||
    inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1)
 | 
			
		||||
    inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1)
 | 
			
		||||
    diff_angle_cos = inertia_X * X + inertia_Y * Y
 | 
			
		||||
    diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1)
 | 
			
		||||
    diff_angle = np.arccos(diff_angle_cos)
 | 
			
		||||
    diff_angle = (np.pi /2.0 - np.abs(diff_angle)) / np.pi
 | 
			
		||||
 | 
			
		||||
    valid_mask = np.ones(previous_obs.shape[0])
 | 
			
		||||
    valid_mask[np.where(previous_obs[:,4]<0)]=0  
 | 
			
		||||
    valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1)
 | 
			
		||||
 | 
			
		||||
    scores = np.repeat(detections[:,-1][:, np.newaxis], trackers.shape[0], axis=1)
 | 
			
		||||
    angle_diff_cost = (valid_mask * diff_angle) * vdc_weight
 | 
			
		||||
    angle_diff_cost = angle_diff_cost.T
 | 
			
		||||
    angle_diff_cost = angle_diff_cost * scores
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
        Cost from IoU
 | 
			
		||||
    """
 | 
			
		||||
    iou_matrix = iou_batch(detections, trackers)
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
        With multiple categories, generate the cost for catgory mismatch
 | 
			
		||||
    """
 | 
			
		||||
    num_dets = detections.shape[0]
 | 
			
		||||
    num_trk = trackers.shape[0]
 | 
			
		||||
    cate_matrix = np.zeros((num_dets, num_trk))
 | 
			
		||||
    for i in range(num_dets):
 | 
			
		||||
            for j in range(num_trk):
 | 
			
		||||
                if det_cates[i] != trackers[j, 4]:
 | 
			
		||||
                        cate_matrix[i][j] = -1e6
 | 
			
		||||
    
 | 
			
		||||
    cost_matrix = - iou_matrix -angle_diff_cost - cate_matrix
 | 
			
		||||
 | 
			
		||||
    if min(iou_matrix.shape) > 0:
 | 
			
		||||
        a = (iou_matrix > iou_threshold).astype(np.int32)
 | 
			
		||||
        if a.sum(1).max() == 1 and a.sum(0).max() == 1:
 | 
			
		||||
            matched_indices = np.stack(np.where(a), axis=1)
 | 
			
		||||
        else:
 | 
			
		||||
            matched_indices = linear_assignment(cost_matrix)
 | 
			
		||||
    else:
 | 
			
		||||
        matched_indices = np.empty(shape=(0,2))
 | 
			
		||||
 | 
			
		||||
    unmatched_detections = []
 | 
			
		||||
    for d, det in enumerate(detections):
 | 
			
		||||
        if(d not in matched_indices[:,0]):
 | 
			
		||||
            unmatched_detections.append(d)
 | 
			
		||||
    unmatched_trackers = []
 | 
			
		||||
    for t, trk in enumerate(trackers):
 | 
			
		||||
        if(t not in matched_indices[:,1]):
 | 
			
		||||
            unmatched_trackers.append(t)
 | 
			
		||||
 | 
			
		||||
    #filter out matched with low IOU
 | 
			
		||||
    matches = []
 | 
			
		||||
    for m in matched_indices:
 | 
			
		||||
        if(iou_matrix[m[0], m[1]]<iou_threshold):
 | 
			
		||||
            unmatched_detections.append(m[0])
 | 
			
		||||
            unmatched_trackers.append(m[1])
 | 
			
		||||
        else:
 | 
			
		||||
            matches.append(m.reshape(1,2))
 | 
			
		||||
    if(len(matches)==0):
 | 
			
		||||
        matches = np.empty((0,2),dtype=int)
 | 
			
		||||
    else:
 | 
			
		||||
        matches = np.concatenate(matches,axis=0)
 | 
			
		||||
 | 
			
		||||
    return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
 | 
			
		||||
							
								
								
									
										12
									
								
								feeder/trackers/ocsort/configs/ocsort.yaml
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								feeder/trackers/ocsort/configs/ocsort.yaml
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,12 @@
 | 
			
		|||
# Trial number:      137
 | 
			
		||||
# HOTA, MOTA, IDF1:  [55.567]
 | 
			
		||||
ocsort:
 | 
			
		||||
  asso_func: giou
 | 
			
		||||
  conf_thres: 0.5122620708221085
 | 
			
		||||
  delta_t: 1
 | 
			
		||||
  det_thresh: 0
 | 
			
		||||
  inertia: 0.3941737016672115
 | 
			
		||||
  iou_thresh: 0.22136877277096445
 | 
			
		||||
  max_age: 50
 | 
			
		||||
  min_hits: 1
 | 
			
		||||
  use_byte: false
 | 
			
		||||
							
								
								
									
										1581
									
								
								feeder/trackers/ocsort/kalmanfilter.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										1581
									
								
								feeder/trackers/ocsort/kalmanfilter.py
									
										
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										328
									
								
								feeder/trackers/ocsort/ocsort.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										328
									
								
								feeder/trackers/ocsort/ocsort.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,328 @@
 | 
			
		|||
"""
 | 
			
		||||
    This script is adopted from the SORT script by Alex Bewley alex@bewley.ai
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
from .association import *
 | 
			
		||||
from ultralytics.yolo.utils.ops import xywh2xyxy
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def k_previous_obs(observations, cur_age, k):
 | 
			
		||||
    if len(observations) == 0:
 | 
			
		||||
        return [-1, -1, -1, -1, -1]
 | 
			
		||||
    for i in range(k):
 | 
			
		||||
        dt = k - i
 | 
			
		||||
        if cur_age - dt in observations:
 | 
			
		||||
            return observations[cur_age-dt]
 | 
			
		||||
    max_age = max(observations.keys())
 | 
			
		||||
    return observations[max_age]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_bbox_to_z(bbox):
 | 
			
		||||
    """
 | 
			
		||||
    Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
 | 
			
		||||
      [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
 | 
			
		||||
      the aspect ratio
 | 
			
		||||
    """
 | 
			
		||||
    w = bbox[2] - bbox[0]
 | 
			
		||||
    h = bbox[3] - bbox[1]
 | 
			
		||||
    x = bbox[0] + w/2.
 | 
			
		||||
    y = bbox[1] + h/2.
 | 
			
		||||
    s = w * h  # scale is just area
 | 
			
		||||
    r = w / float(h+1e-6)
 | 
			
		||||
    return np.array([x, y, s, r]).reshape((4, 1))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_x_to_bbox(x, score=None):
 | 
			
		||||
    """
 | 
			
		||||
    Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
 | 
			
		||||
      [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
 | 
			
		||||
    """
 | 
			
		||||
    w = np.sqrt(x[2] * x[3])
 | 
			
		||||
    h = x[2] / w
 | 
			
		||||
    if(score == None):
 | 
			
		||||
      return np.array([x[0]-w/2., x[1]-h/2., x[0]+w/2., x[1]+h/2.]).reshape((1, 4))
 | 
			
		||||
    else:
 | 
			
		||||
      return np.array([x[0]-w/2., x[1]-h/2., x[0]+w/2., x[1]+h/2., score]).reshape((1, 5))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def speed_direction(bbox1, bbox2):
 | 
			
		||||
    cx1, cy1 = (bbox1[0]+bbox1[2]) / 2.0, (bbox1[1]+bbox1[3])/2.0
 | 
			
		||||
    cx2, cy2 = (bbox2[0]+bbox2[2]) / 2.0, (bbox2[1]+bbox2[3])/2.0
 | 
			
		||||
    speed = np.array([cy2-cy1, cx2-cx1])
 | 
			
		||||
    norm = np.sqrt((cy2-cy1)**2 + (cx2-cx1)**2) + 1e-6
 | 
			
		||||
    return speed / norm
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class KalmanBoxTracker(object):
 | 
			
		||||
    """
 | 
			
		||||
    This class represents the internal state of individual tracked objects observed as bbox.
 | 
			
		||||
    """
 | 
			
		||||
    count = 0
 | 
			
		||||
 | 
			
		||||
    def __init__(self, bbox, cls, delta_t=3, orig=False):
 | 
			
		||||
        """
 | 
			
		||||
        Initialises a tracker using initial bounding box.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        # define constant velocity model
 | 
			
		||||
        if not orig:
 | 
			
		||||
          from .kalmanfilter import KalmanFilterNew as KalmanFilter
 | 
			
		||||
          self.kf = KalmanFilter(dim_x=7, dim_z=4)
 | 
			
		||||
        else:
 | 
			
		||||
          from filterpy.kalman import KalmanFilter
 | 
			
		||||
          self.kf = KalmanFilter(dim_x=7, dim_z=4)
 | 
			
		||||
        self.kf.F = np.array([[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 1], [
 | 
			
		||||
                            0, 0, 0, 1, 0, 0, 0],  [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1]])
 | 
			
		||||
        self.kf.H = np.array([[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0],
 | 
			
		||||
                            [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]])
 | 
			
		||||
 | 
			
		||||
        self.kf.R[2:, 2:] *= 10.
 | 
			
		||||
        self.kf.P[4:, 4:] *= 1000.  # give high uncertainty to the unobservable initial velocities
 | 
			
		||||
        self.kf.P *= 10.
 | 
			
		||||
        self.kf.Q[-1, -1] *= 0.01
 | 
			
		||||
        self.kf.Q[4:, 4:] *= 0.01
 | 
			
		||||
 | 
			
		||||
        self.kf.x[:4] = convert_bbox_to_z(bbox)
 | 
			
		||||
        self.time_since_update = 0
 | 
			
		||||
        self.id = KalmanBoxTracker.count
 | 
			
		||||
        KalmanBoxTracker.count += 1
 | 
			
		||||
        self.history = []
 | 
			
		||||
        self.hits = 0
 | 
			
		||||
        self.hit_streak = 0
 | 
			
		||||
        self.age = 0
 | 
			
		||||
        self.conf = bbox[-1]
 | 
			
		||||
        self.cls = cls
 | 
			
		||||
        """
 | 
			
		||||
        NOTE: [-1,-1,-1,-1,-1] is a compromising placeholder for non-observation status, the same for the return of 
 | 
			
		||||
        function k_previous_obs. It is ugly and I do not like it. But to support generate observation array in a 
 | 
			
		||||
        fast and unified way, which you would see below k_observations = np.array([k_previous_obs(...]]), let's bear it for now.
 | 
			
		||||
        """
 | 
			
		||||
        self.last_observation = np.array([-1, -1, -1, -1, -1])  # placeholder
 | 
			
		||||
        self.observations = dict()
 | 
			
		||||
        self.history_observations = []
 | 
			
		||||
        self.velocity = None
 | 
			
		||||
        self.delta_t = delta_t
 | 
			
		||||
 | 
			
		||||
    def update(self, bbox, cls):
 | 
			
		||||
        """
 | 
			
		||||
        Updates the state vector with observed bbox.
 | 
			
		||||
        """
 | 
			
		||||
        
 | 
			
		||||
        if bbox is not None:
 | 
			
		||||
            self.conf = bbox[-1]
 | 
			
		||||
            self.cls = cls
 | 
			
		||||
            if self.last_observation.sum() >= 0:  # no previous observation
 | 
			
		||||
                previous_box = None
 | 
			
		||||
                for i in range(self.delta_t):
 | 
			
		||||
                    dt = self.delta_t - i
 | 
			
		||||
                    if self.age - dt in self.observations:
 | 
			
		||||
                        previous_box = self.observations[self.age-dt]
 | 
			
		||||
                        break
 | 
			
		||||
                if previous_box is None:
 | 
			
		||||
                    previous_box = self.last_observation
 | 
			
		||||
                """
 | 
			
		||||
                  Estimate the track speed direction with observations \Delta t steps away
 | 
			
		||||
                """
 | 
			
		||||
                self.velocity = speed_direction(previous_box, bbox)
 | 
			
		||||
            
 | 
			
		||||
            """
 | 
			
		||||
              Insert new observations. This is a ugly way to maintain both self.observations
 | 
			
		||||
              and self.history_observations. Bear it for the moment.
 | 
			
		||||
            """
 | 
			
		||||
            self.last_observation = bbox
 | 
			
		||||
            self.observations[self.age] = bbox
 | 
			
		||||
            self.history_observations.append(bbox)
 | 
			
		||||
 | 
			
		||||
            self.time_since_update = 0
 | 
			
		||||
            self.history = []
 | 
			
		||||
            self.hits += 1
 | 
			
		||||
            self.hit_streak += 1
 | 
			
		||||
            self.kf.update(convert_bbox_to_z(bbox))
 | 
			
		||||
        else:
 | 
			
		||||
            self.kf.update(bbox)
 | 
			
		||||
 | 
			
		||||
    def predict(self):
 | 
			
		||||
        """
 | 
			
		||||
        Advances the state vector and returns the predicted bounding box estimate.
 | 
			
		||||
        """
 | 
			
		||||
        if((self.kf.x[6]+self.kf.x[2]) <= 0):
 | 
			
		||||
            self.kf.x[6] *= 0.0
 | 
			
		||||
 | 
			
		||||
        self.kf.predict()
 | 
			
		||||
        self.age += 1
 | 
			
		||||
        if(self.time_since_update > 0):
 | 
			
		||||
            self.hit_streak = 0
 | 
			
		||||
        self.time_since_update += 1
 | 
			
		||||
        self.history.append(convert_x_to_bbox(self.kf.x))
 | 
			
		||||
        return self.history[-1]
 | 
			
		||||
 | 
			
		||||
    def get_state(self):
 | 
			
		||||
        """
 | 
			
		||||
        Returns the current bounding box estimate.
 | 
			
		||||
        """
 | 
			
		||||
        return convert_x_to_bbox(self.kf.x)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
    We support multiple ways for association cost calculation, by default
 | 
			
		||||
    we use IoU. GIoU may have better performance in some situations. We note 
 | 
			
		||||
    that we hardly normalize the cost by all methods to (0,1) which may not be 
 | 
			
		||||
    the best practice.
 | 
			
		||||
"""
 | 
			
		||||
ASSO_FUNCS = {  "iou": iou_batch,
 | 
			
		||||
                "giou": giou_batch,
 | 
			
		||||
                "ciou": ciou_batch,
 | 
			
		||||
                "diou": diou_batch,
 | 
			
		||||
                "ct_dist": ct_dist}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OCSort(object):
 | 
			
		||||
    def __init__(self, det_thresh, max_age=30, min_hits=3, 
 | 
			
		||||
        iou_threshold=0.3, delta_t=3, asso_func="iou", inertia=0.2, use_byte=False):
 | 
			
		||||
        """
 | 
			
		||||
        Sets key parameters for SORT
 | 
			
		||||
        """
 | 
			
		||||
        self.max_age = max_age
 | 
			
		||||
        self.min_hits = min_hits
 | 
			
		||||
        self.iou_threshold = iou_threshold
 | 
			
		||||
        self.trackers = []
 | 
			
		||||
        self.frame_count = 0
 | 
			
		||||
        self.det_thresh = det_thresh
 | 
			
		||||
        self.delta_t = delta_t
 | 
			
		||||
        self.asso_func = ASSO_FUNCS[asso_func]
 | 
			
		||||
        self.inertia = inertia
 | 
			
		||||
        self.use_byte = use_byte
 | 
			
		||||
        KalmanBoxTracker.count = 0
 | 
			
		||||
 | 
			
		||||
    def update(self, dets, _):
 | 
			
		||||
        """
 | 
			
		||||
        Params:
 | 
			
		||||
          dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
 | 
			
		||||
        Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
 | 
			
		||||
        Returns the a similar array, where the last column is the object ID.
 | 
			
		||||
        NOTE: The number of objects returned may differ from the number of detections provided.
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        self.frame_count += 1
 | 
			
		||||
        
 | 
			
		||||
        xyxys = dets[:, 0:4]
 | 
			
		||||
        confs = dets[:, 4]
 | 
			
		||||
        clss = dets[:, 5]
 | 
			
		||||
        
 | 
			
		||||
        classes = clss.numpy()
 | 
			
		||||
        xyxys = xyxys.numpy()
 | 
			
		||||
        confs = confs.numpy()
 | 
			
		||||
 | 
			
		||||
        output_results = np.column_stack((xyxys, confs, classes))
 | 
			
		||||
        
 | 
			
		||||
        inds_low = confs > 0.1
 | 
			
		||||
        inds_high = confs < self.det_thresh
 | 
			
		||||
        inds_second = np.logical_and(inds_low, inds_high)  # self.det_thresh > score > 0.1, for second matching
 | 
			
		||||
        dets_second = output_results[inds_second]  # detections for second matching
 | 
			
		||||
        remain_inds = confs > self.det_thresh
 | 
			
		||||
        dets = output_results[remain_inds]
 | 
			
		||||
 | 
			
		||||
        # get predicted locations from existing trackers.
 | 
			
		||||
        trks = np.zeros((len(self.trackers), 5))
 | 
			
		||||
        to_del = []
 | 
			
		||||
        ret = []
 | 
			
		||||
        for t, trk in enumerate(trks):
 | 
			
		||||
            pos = self.trackers[t].predict()[0]
 | 
			
		||||
            trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
 | 
			
		||||
            if np.any(np.isnan(pos)):
 | 
			
		||||
                to_del.append(t)
 | 
			
		||||
        trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
 | 
			
		||||
        for t in reversed(to_del):
 | 
			
		||||
            self.trackers.pop(t)
 | 
			
		||||
 | 
			
		||||
        velocities = np.array(
 | 
			
		||||
            [trk.velocity if trk.velocity is not None else np.array((0, 0)) for trk in self.trackers])
 | 
			
		||||
        last_boxes = np.array([trk.last_observation for trk in self.trackers])
 | 
			
		||||
        k_observations = np.array(
 | 
			
		||||
            [k_previous_obs(trk.observations, trk.age, self.delta_t) for trk in self.trackers])
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
            First round of association
 | 
			
		||||
        """
 | 
			
		||||
        matched, unmatched_dets, unmatched_trks = associate(
 | 
			
		||||
            dets, trks, self.iou_threshold, velocities, k_observations, self.inertia)
 | 
			
		||||
        for m in matched:
 | 
			
		||||
            self.trackers[m[1]].update(dets[m[0], :5], dets[m[0], 5])
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
            Second round of associaton by OCR
 | 
			
		||||
        """
 | 
			
		||||
        # BYTE association
 | 
			
		||||
        if self.use_byte and len(dets_second) > 0 and unmatched_trks.shape[0] > 0:
 | 
			
		||||
            u_trks = trks[unmatched_trks]
 | 
			
		||||
            iou_left = self.asso_func(dets_second, u_trks)          # iou between low score detections and unmatched tracks
 | 
			
		||||
            iou_left = np.array(iou_left)
 | 
			
		||||
            if iou_left.max() > self.iou_threshold:
 | 
			
		||||
                """
 | 
			
		||||
                    NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may
 | 
			
		||||
                    get a higher performance especially on MOT17/MOT20 datasets. But we keep it
 | 
			
		||||
                    uniform here for simplicity
 | 
			
		||||
                """
 | 
			
		||||
                matched_indices = linear_assignment(-iou_left)
 | 
			
		||||
                to_remove_trk_indices = []
 | 
			
		||||
                for m in matched_indices:
 | 
			
		||||
                    det_ind, trk_ind = m[0], unmatched_trks[m[1]]
 | 
			
		||||
                    if iou_left[m[0], m[1]] < self.iou_threshold:
 | 
			
		||||
                        continue
 | 
			
		||||
                    self.trackers[trk_ind].update(dets_second[det_ind, :5], dets_second[det_ind, 5])
 | 
			
		||||
                    to_remove_trk_indices.append(trk_ind)
 | 
			
		||||
                unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices))
 | 
			
		||||
 | 
			
		||||
        if unmatched_dets.shape[0] > 0 and unmatched_trks.shape[0] > 0:
 | 
			
		||||
            left_dets = dets[unmatched_dets]
 | 
			
		||||
            left_trks = last_boxes[unmatched_trks]
 | 
			
		||||
            iou_left = self.asso_func(left_dets, left_trks)
 | 
			
		||||
            iou_left = np.array(iou_left)
 | 
			
		||||
            if iou_left.max() > self.iou_threshold:
 | 
			
		||||
                """
 | 
			
		||||
                    NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may
 | 
			
		||||
                    get a higher performance especially on MOT17/MOT20 datasets. But we keep it
 | 
			
		||||
                    uniform here for simplicity
 | 
			
		||||
                """
 | 
			
		||||
                rematched_indices = linear_assignment(-iou_left)
 | 
			
		||||
                to_remove_det_indices = []
 | 
			
		||||
                to_remove_trk_indices = []
 | 
			
		||||
                for m in rematched_indices:
 | 
			
		||||
                    det_ind, trk_ind = unmatched_dets[m[0]], unmatched_trks[m[1]]
 | 
			
		||||
                    if iou_left[m[0], m[1]] < self.iou_threshold:
 | 
			
		||||
                        continue
 | 
			
		||||
                    self.trackers[trk_ind].update(dets[det_ind, :5], dets[det_ind, 5])
 | 
			
		||||
                    to_remove_det_indices.append(det_ind)
 | 
			
		||||
                    to_remove_trk_indices.append(trk_ind)
 | 
			
		||||
                unmatched_dets = np.setdiff1d(unmatched_dets, np.array(to_remove_det_indices))
 | 
			
		||||
                unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices))
 | 
			
		||||
 | 
			
		||||
        for m in unmatched_trks:
 | 
			
		||||
            self.trackers[m].update(None, None)
 | 
			
		||||
 | 
			
		||||
        # create and initialise new trackers for unmatched detections
 | 
			
		||||
        for i in unmatched_dets:
 | 
			
		||||
            trk = KalmanBoxTracker(dets[i, :5], dets[i, 5], delta_t=self.delta_t)
 | 
			
		||||
            self.trackers.append(trk)
 | 
			
		||||
        i = len(self.trackers)
 | 
			
		||||
        for trk in reversed(self.trackers):
 | 
			
		||||
            if trk.last_observation.sum() < 0:
 | 
			
		||||
                d = trk.get_state()[0]
 | 
			
		||||
            else:
 | 
			
		||||
                """
 | 
			
		||||
                    this is optional to use the recent observation or the kalman filter prediction,
 | 
			
		||||
                    we didn't notice significant difference here
 | 
			
		||||
                """
 | 
			
		||||
                d = trk.last_observation[:4]
 | 
			
		||||
            if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
 | 
			
		||||
                # +1 as MOT benchmark requires positive
 | 
			
		||||
                ret.append(np.concatenate((d, [trk.id+1], [trk.cls], [trk.conf])).reshape(1, -1))
 | 
			
		||||
            i -= 1
 | 
			
		||||
            # remove dead tracklet
 | 
			
		||||
            if(trk.time_since_update > self.max_age):
 | 
			
		||||
                self.trackers.pop(i)
 | 
			
		||||
        if(len(ret) > 0):
 | 
			
		||||
            return np.concatenate(ret)
 | 
			
		||||
        return np.empty((0, 5))
 | 
			
		||||
							
								
								
									
										313
									
								
								feeder/trackers/reid_export.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										313
									
								
								feeder/trackers/reid_export.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,313 @@
 | 
			
		|||
import argparse
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
# limit the number of cpus used by high performance libraries
 | 
			
		||||
os.environ["OMP_NUM_THREADS"] = "1"
 | 
			
		||||
os.environ["OPENBLAS_NUM_THREADS"] = "1"
 | 
			
		||||
os.environ["MKL_NUM_THREADS"] = "1"
 | 
			
		||||
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
 | 
			
		||||
os.environ["NUMEXPR_NUM_THREADS"] = "1"
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import numpy as np
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
import torch
 | 
			
		||||
import time
 | 
			
		||||
import platform
 | 
			
		||||
import pandas as pd
 | 
			
		||||
import subprocess
 | 
			
		||||
import torch.backends.cudnn as cudnn
 | 
			
		||||
from torch.utils.mobile_optimizer import optimize_for_mobile
 | 
			
		||||
 | 
			
		||||
FILE = Path(__file__).resolve()
 | 
			
		||||
ROOT = FILE.parents[0].parents[0]  # yolov5 strongsort root directory
 | 
			
		||||
WEIGHTS = ROOT / 'weights'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if str(ROOT) not in sys.path:
 | 
			
		||||
    sys.path.append(str(ROOT))  # add ROOT to PATH
 | 
			
		||||
if str(ROOT / 'yolov5') not in sys.path:
 | 
			
		||||
    sys.path.append(str(ROOT / 'yolov5'))  # add yolov5 ROOT to PATH
 | 
			
		||||
 | 
			
		||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
 | 
			
		||||
 | 
			
		||||
import logging
 | 
			
		||||
from ultralytics.yolo.utils.torch_utils import select_device
 | 
			
		||||
from ultralytics.yolo.utils import LOGGER, colorstr, ops
 | 
			
		||||
from ultralytics.yolo.utils.checks import check_requirements, check_version
 | 
			
		||||
from trackers.strongsort.deep.models import build_model
 | 
			
		||||
from trackers.strongsort.deep.reid_model_factory import get_model_name, load_pretrained_weights
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def file_size(path):
 | 
			
		||||
    # Return file/dir size (MB)
 | 
			
		||||
    path = Path(path)
 | 
			
		||||
    if path.is_file():
 | 
			
		||||
        return path.stat().st_size / 1E6
 | 
			
		||||
    elif path.is_dir():
 | 
			
		||||
        return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6
 | 
			
		||||
    else:
 | 
			
		||||
        return 0.0
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def export_formats():
 | 
			
		||||
    # YOLOv5 export formats
 | 
			
		||||
    x = [
 | 
			
		||||
        ['PyTorch', '-', '.pt', True, True],
 | 
			
		||||
        ['TorchScript', 'torchscript', '.torchscript', True, True],
 | 
			
		||||
        ['ONNX', 'onnx', '.onnx', True, True],
 | 
			
		||||
        ['OpenVINO', 'openvino', '_openvino_model', True, False],
 | 
			
		||||
        ['TensorRT', 'engine', '.engine', False, True],
 | 
			
		||||
        ['TensorFlow Lite', 'tflite', '.tflite', True, False],
 | 
			
		||||
    ]
 | 
			
		||||
    return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
 | 
			
		||||
    # YOLOv5 TorchScript model export
 | 
			
		||||
    try:
 | 
			
		||||
        LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
 | 
			
		||||
        f = file.with_suffix('.torchscript')
 | 
			
		||||
 | 
			
		||||
        ts = torch.jit.trace(model, im, strict=False)
 | 
			
		||||
        if optimize:  # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
 | 
			
		||||
            optimize_for_mobile(ts)._save_for_lite_interpreter(str(f))
 | 
			
		||||
        else:
 | 
			
		||||
            ts.save(str(f))
 | 
			
		||||
 | 
			
		||||
        LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
 | 
			
		||||
        return f
 | 
			
		||||
    except Exception as e:
 | 
			
		||||
        LOGGER.info(f'{prefix} export failure: {e}')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')):
 | 
			
		||||
    # ONNX export
 | 
			
		||||
    try:
 | 
			
		||||
        check_requirements(('onnx',))
 | 
			
		||||
        import onnx
 | 
			
		||||
 | 
			
		||||
        f = file.with_suffix('.onnx')
 | 
			
		||||
        LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
 | 
			
		||||
        
 | 
			
		||||
        if dynamic:
 | 
			
		||||
            dynamic = {'images': {0: 'batch'}}  # shape(1,3,640,640)
 | 
			
		||||
            dynamic['output'] = {0: 'batch'}  # shape(1,25200,85)
 | 
			
		||||
 | 
			
		||||
        torch.onnx.export(
 | 
			
		||||
            model.cpu() if dynamic else model,  # --dynamic only compatible with cpu
 | 
			
		||||
            im.cpu() if dynamic else im,
 | 
			
		||||
            f,
 | 
			
		||||
            verbose=False,
 | 
			
		||||
            opset_version=opset,
 | 
			
		||||
            do_constant_folding=True,
 | 
			
		||||
            input_names=['images'],
 | 
			
		||||
            output_names=['output'],
 | 
			
		||||
            dynamic_axes=dynamic or None
 | 
			
		||||
        )
 | 
			
		||||
        # Checks
 | 
			
		||||
        model_onnx = onnx.load(f)  # load onnx model
 | 
			
		||||
        onnx.checker.check_model(model_onnx)  # check onnx model
 | 
			
		||||
        onnx.save(model_onnx, f)
 | 
			
		||||
 | 
			
		||||
        # Simplify
 | 
			
		||||
        if simplify:
 | 
			
		||||
            try:
 | 
			
		||||
                cuda = torch.cuda.is_available()
 | 
			
		||||
                check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1'))
 | 
			
		||||
                import onnxsim
 | 
			
		||||
 | 
			
		||||
                LOGGER.info(f'simplifying with onnx-simplifier {onnxsim.__version__}...')
 | 
			
		||||
                model_onnx, check = onnxsim.simplify(model_onnx)
 | 
			
		||||
                assert check, 'assert check failed'
 | 
			
		||||
                onnx.save(model_onnx, f)
 | 
			
		||||
            except Exception as e:
 | 
			
		||||
                LOGGER.info(f'simplifier failure: {e}')
 | 
			
		||||
        LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
 | 
			
		||||
        return f
 | 
			
		||||
    except Exception as e:
 | 
			
		||||
        LOGGER.info(f'export failure: {e}')
 | 
			
		||||
    
 | 
			
		||||
        
 | 
			
		||||
        
 | 
			
		||||
def export_openvino(file, half, prefix=colorstr('OpenVINO:')):
 | 
			
		||||
    # YOLOv5 OpenVINO export
 | 
			
		||||
    check_requirements(('openvino-dev',))  # requires openvino-dev: https://pypi.org/project/openvino-dev/
 | 
			
		||||
    import openvino.inference_engine as ie
 | 
			
		||||
    try:
 | 
			
		||||
        LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
 | 
			
		||||
        f = str(file).replace('.pt', f'_openvino_model{os.sep}')
 | 
			
		||||
 | 
			
		||||
        cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}"
 | 
			
		||||
        subprocess.check_output(cmd.split())  # export
 | 
			
		||||
    except Exception as e:
 | 
			
		||||
        LOGGER.info(f'export failure: {e}')
 | 
			
		||||
    LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
 | 
			
		||||
    return f
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
def export_tflite(file, half, prefix=colorstr('TFLite:')):
 | 
			
		||||
    # YOLOv5 OpenVINO export
 | 
			
		||||
    try:
 | 
			
		||||
        check_requirements(('openvino2tensorflow', 'tensorflow', 'tensorflow_datasets'))  # requires openvino-dev: https://pypi.org/project/openvino-dev/
 | 
			
		||||
        import openvino.inference_engine as ie
 | 
			
		||||
        LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
 | 
			
		||||
        output = Path(str(file).replace(f'_openvino_model{os.sep}', f'_tflite_model{os.sep}'))
 | 
			
		||||
        modelxml = list(Path(file).glob('*.xml'))[0]
 | 
			
		||||
        cmd = f"openvino2tensorflow \
 | 
			
		||||
            --model_path {modelxml} \
 | 
			
		||||
            --model_output_path {output} \
 | 
			
		||||
            --output_pb \
 | 
			
		||||
            --output_saved_model \
 | 
			
		||||
            --output_no_quant_float32_tflite \
 | 
			
		||||
            --output_dynamic_range_quant_tflite"
 | 
			
		||||
        subprocess.check_output(cmd.split())  # export
 | 
			
		||||
 | 
			
		||||
        LOGGER.info(f'{prefix} export success, results saved in {output} ({file_size(f):.1f} MB)')
 | 
			
		||||
        return f
 | 
			
		||||
    except Exception as e:
 | 
			
		||||
        LOGGER.info(f'\n{prefix} export failure: {e}')
 | 
			
		||||
        
 | 
			
		||||
        
 | 
			
		||||
def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
 | 
			
		||||
    # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
 | 
			
		||||
    try:
 | 
			
		||||
        assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
 | 
			
		||||
        try:
 | 
			
		||||
            import tensorrt as trt
 | 
			
		||||
        except Exception:
 | 
			
		||||
            if platform.system() == 'Linux':
 | 
			
		||||
                check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',))
 | 
			
		||||
            import tensorrt as trt
 | 
			
		||||
 | 
			
		||||
        if trt.__version__[0] == '7':  # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
 | 
			
		||||
            grid = model.model[-1].anchor_grid
 | 
			
		||||
            model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
 | 
			
		||||
            export_onnx(model, im, file, 12, dynamic, simplify)  # opset 12
 | 
			
		||||
            model.model[-1].anchor_grid = grid
 | 
			
		||||
        else:  # TensorRT >= 8
 | 
			
		||||
            check_version(trt.__version__, '8.0.0', hard=True)  # require tensorrt>=8.0.0
 | 
			
		||||
            export_onnx(model, im, file, 12, dynamic, simplify)  # opset 13
 | 
			
		||||
        onnx = file.with_suffix('.onnx')
 | 
			
		||||
 | 
			
		||||
        LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
 | 
			
		||||
        assert onnx.exists(), f'failed to export ONNX file: {onnx}'
 | 
			
		||||
        f = file.with_suffix('.engine')  # TensorRT engine file
 | 
			
		||||
        logger = trt.Logger(trt.Logger.INFO)
 | 
			
		||||
        if verbose:
 | 
			
		||||
            logger.min_severity = trt.Logger.Severity.VERBOSE
 | 
			
		||||
 | 
			
		||||
        builder = trt.Builder(logger)
 | 
			
		||||
        config = builder.create_builder_config()
 | 
			
		||||
        config.max_workspace_size = workspace * 1 << 30
 | 
			
		||||
        # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30)  # fix TRT 8.4 deprecation notice
 | 
			
		||||
 | 
			
		||||
        flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
 | 
			
		||||
        network = builder.create_network(flag)
 | 
			
		||||
        parser = trt.OnnxParser(network, logger)
 | 
			
		||||
        if not parser.parse_from_file(str(onnx)):
 | 
			
		||||
            raise RuntimeError(f'failed to load ONNX file: {onnx}')
 | 
			
		||||
 | 
			
		||||
        inputs = [network.get_input(i) for i in range(network.num_inputs)]
 | 
			
		||||
        outputs = [network.get_output(i) for i in range(network.num_outputs)]
 | 
			
		||||
        LOGGER.info(f'{prefix} Network Description:')
 | 
			
		||||
        for inp in inputs:
 | 
			
		||||
            LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}')
 | 
			
		||||
        for out in outputs:
 | 
			
		||||
            LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}')
 | 
			
		||||
 | 
			
		||||
        if dynamic:
 | 
			
		||||
            if im.shape[0] <= 1:
 | 
			
		||||
                LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument")
 | 
			
		||||
            profile = builder.create_optimization_profile()
 | 
			
		||||
            for inp in inputs:
 | 
			
		||||
                profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
 | 
			
		||||
            config.add_optimization_profile(profile)
 | 
			
		||||
 | 
			
		||||
        LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}')
 | 
			
		||||
        if builder.platform_has_fast_fp16 and half:
 | 
			
		||||
            config.set_flag(trt.BuilderFlag.FP16)
 | 
			
		||||
        with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
 | 
			
		||||
            t.write(engine.serialize())
 | 
			
		||||
        LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
 | 
			
		||||
        return f
 | 
			
		||||
    except Exception as e:
 | 
			
		||||
        LOGGER.info(f'\n{prefix} export failure: {e}')
 | 
			
		||||
        
 | 
			
		||||
        
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
 | 
			
		||||
    parser = argparse.ArgumentParser(description="ReID export")
 | 
			
		||||
    parser.add_argument('--batch-size', type=int, default=1, help='batch size')
 | 
			
		||||
    parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[256, 128], help='image (h, w)')
 | 
			
		||||
    parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
 | 
			
		||||
    parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
 | 
			
		||||
    parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes')
 | 
			
		||||
    parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
 | 
			
		||||
    parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version')
 | 
			
		||||
    parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)')
 | 
			
		||||
    parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log')
 | 
			
		||||
    parser.add_argument('--weights', nargs='+', type=str, default=WEIGHTS / 'osnet_x0_25_msmt17.pt', help='model.pt path(s)')
 | 
			
		||||
    parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
 | 
			
		||||
    parser.add_argument('--include',
 | 
			
		||||
                        nargs='+',
 | 
			
		||||
                        default=['torchscript'],
 | 
			
		||||
                        help='torchscript, onnx, openvino, engine')
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    t = time.time()
 | 
			
		||||
 | 
			
		||||
    include = [x.lower() for x in args.include]  # to lowercase
 | 
			
		||||
    fmts = tuple(export_formats()['Argument'][1:])  # --include arguments
 | 
			
		||||
    flags = [x in include for x in fmts]
 | 
			
		||||
    assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}'
 | 
			
		||||
    jit, onnx, openvino, engine, tflite = flags  # export booleans
 | 
			
		||||
 | 
			
		||||
    args.device = select_device(args.device)
 | 
			
		||||
    if args.half:
 | 
			
		||||
        assert args.device.type != 'cpu', '--half only compatible with GPU export, i.e. use --device 0'
 | 
			
		||||
        assert not args.dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both'
 | 
			
		||||
    
 | 
			
		||||
    if type(args.weights) is list:
 | 
			
		||||
        args.weights = Path(args.weights[0])
 | 
			
		||||
 | 
			
		||||
    model = build_model(
 | 
			
		||||
        get_model_name(args.weights),
 | 
			
		||||
        num_classes=1,
 | 
			
		||||
        pretrained=not (args.weights and args.weights.is_file() and args.weights.suffix == '.pt'),
 | 
			
		||||
        use_gpu=args.device
 | 
			
		||||
    ).to(args.device)
 | 
			
		||||
    load_pretrained_weights(model, args.weights)
 | 
			
		||||
    model.eval()
 | 
			
		||||
 | 
			
		||||
    if args.optimize:
 | 
			
		||||
        assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu'
 | 
			
		||||
    
 | 
			
		||||
    im = torch.zeros(args.batch_size, 3, args.imgsz[0], args.imgsz[1]).to(args.device)  # image size(1,3,640,480) BCHW iDetection
 | 
			
		||||
    for _ in range(2):
 | 
			
		||||
        y = model(im)  # dry runs
 | 
			
		||||
    if args.half:
 | 
			
		||||
        im, model = im.half(), model.half()  # to FP16
 | 
			
		||||
    shape = tuple((y[0] if isinstance(y, tuple) else y).shape)  # model output shape
 | 
			
		||||
    LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {args.weights} with output shape {shape} ({file_size(args.weights):.1f} MB)")
 | 
			
		||||
    
 | 
			
		||||
    # Exports
 | 
			
		||||
    f = [''] * len(fmts)  # exported filenames
 | 
			
		||||
    if jit:
 | 
			
		||||
        f[0] = export_torchscript(model, im, args.weights, args.optimize)  # opset 12
 | 
			
		||||
    if engine:  # TensorRT required before ONNX
 | 
			
		||||
        f[1] = export_engine(model, im, args.weights, args.half, args.dynamic, args.simplify, args.workspace, args.verbose)
 | 
			
		||||
    if onnx:  # OpenVINO requires ONNX
 | 
			
		||||
        f[2] = export_onnx(model, im, args.weights, args.opset, args.dynamic, args.simplify)  # opset 12
 | 
			
		||||
    if openvino:
 | 
			
		||||
        f[3] = export_openvino(args.weights, args.half)
 | 
			
		||||
    if tflite:
 | 
			
		||||
        export_tflite(f, False)
 | 
			
		||||
 | 
			
		||||
    # Finish
 | 
			
		||||
    f = [str(x) for x in f if x]  # filter out '' and None
 | 
			
		||||
    if any(f):
 | 
			
		||||
        LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)'
 | 
			
		||||
                    f"\nResults saved to {colorstr('bold', args.weights.parent.resolve())}"
 | 
			
		||||
                    f"\nVisualize:       https://netron.app")
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										0
									
								
								feeder/trackers/strongsort/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								feeder/trackers/strongsort/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
								
								
									
										11
									
								
								feeder/trackers/strongsort/configs/strongsort.yaml
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								feeder/trackers/strongsort/configs/strongsort.yaml
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,11 @@
 | 
			
		|||
strongsort:
 | 
			
		||||
  ecc: true
 | 
			
		||||
  ema_alpha: 0.8962157769329083
 | 
			
		||||
  max_age: 40
 | 
			
		||||
  max_dist: 0.1594374041012136
 | 
			
		||||
  max_iou_dist: 0.5431835667667874
 | 
			
		||||
  max_unmatched_preds: 0
 | 
			
		||||
  mc_lambda: 0.995
 | 
			
		||||
  n_init: 3
 | 
			
		||||
  nn_budget: 100
 | 
			
		||||
  conf_thres: 0.5122620708221085
 | 
			
		||||
							
								
								
									
										0
									
								
								feeder/trackers/strongsort/deep/checkpoint/.gitkeep
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								feeder/trackers/strongsort/deep/checkpoint/.gitkeep
									
										
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								feeder/trackers/strongsort/deep/checkpoint/osnet_x1_0_msmt17.pth
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								feeder/trackers/strongsort/deep/checkpoint/osnet_x1_0_msmt17.pth
									
										
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										122
									
								
								feeder/trackers/strongsort/deep/models/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										122
									
								
								feeder/trackers/strongsort/deep/models/__init__.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,122 @@
 | 
			
		|||
from __future__ import absolute_import
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from .pcb import *
 | 
			
		||||
from .mlfn import *
 | 
			
		||||
from .hacnn import *
 | 
			
		||||
from .osnet import *
 | 
			
		||||
from .senet import *
 | 
			
		||||
from .mudeep import *
 | 
			
		||||
from .nasnet import *
 | 
			
		||||
from .resnet import *
 | 
			
		||||
from .densenet import *
 | 
			
		||||
from .xception import *
 | 
			
		||||
from .osnet_ain import *
 | 
			
		||||
from .resnetmid import *
 | 
			
		||||
from .shufflenet import *
 | 
			
		||||
from .squeezenet import *
 | 
			
		||||
from .inceptionv4 import *
 | 
			
		||||
from .mobilenetv2 import *
 | 
			
		||||
from .resnet_ibn_a import *
 | 
			
		||||
from .resnet_ibn_b import *
 | 
			
		||||
from .shufflenetv2 import *
 | 
			
		||||
from .inceptionresnetv2 import *
 | 
			
		||||
 | 
			
		||||
__model_factory = {
 | 
			
		||||
    # image classification models
 | 
			
		||||
    'resnet18': resnet18,
 | 
			
		||||
    'resnet34': resnet34,
 | 
			
		||||
    'resnet50': resnet50,
 | 
			
		||||
    'resnet101': resnet101,
 | 
			
		||||
    'resnet152': resnet152,
 | 
			
		||||
    'resnext50_32x4d': resnext50_32x4d,
 | 
			
		||||
    'resnext101_32x8d': resnext101_32x8d,
 | 
			
		||||
    'resnet50_fc512': resnet50_fc512,
 | 
			
		||||
    'se_resnet50': se_resnet50,
 | 
			
		||||
    'se_resnet50_fc512': se_resnet50_fc512,
 | 
			
		||||
    'se_resnet101': se_resnet101,
 | 
			
		||||
    'se_resnext50_32x4d': se_resnext50_32x4d,
 | 
			
		||||
    'se_resnext101_32x4d': se_resnext101_32x4d,
 | 
			
		||||
    'densenet121': densenet121,
 | 
			
		||||
    'densenet169': densenet169,
 | 
			
		||||
    'densenet201': densenet201,
 | 
			
		||||
    'densenet161': densenet161,
 | 
			
		||||
    'densenet121_fc512': densenet121_fc512,
 | 
			
		||||
    'inceptionresnetv2': inceptionresnetv2,
 | 
			
		||||
    'inceptionv4': inceptionv4,
 | 
			
		||||
    'xception': xception,
 | 
			
		||||
    'resnet50_ibn_a': resnet50_ibn_a,
 | 
			
		||||
    'resnet50_ibn_b': resnet50_ibn_b,
 | 
			
		||||
    # lightweight models
 | 
			
		||||
    'nasnsetmobile': nasnetamobile,
 | 
			
		||||
    'mobilenetv2_x1_0': mobilenetv2_x1_0,
 | 
			
		||||
    'mobilenetv2_x1_4': mobilenetv2_x1_4,
 | 
			
		||||
    'shufflenet': shufflenet,
 | 
			
		||||
    'squeezenet1_0': squeezenet1_0,
 | 
			
		||||
    'squeezenet1_0_fc512': squeezenet1_0_fc512,
 | 
			
		||||
    'squeezenet1_1': squeezenet1_1,
 | 
			
		||||
    'shufflenet_v2_x0_5': shufflenet_v2_x0_5,
 | 
			
		||||
    'shufflenet_v2_x1_0': shufflenet_v2_x1_0,
 | 
			
		||||
    'shufflenet_v2_x1_5': shufflenet_v2_x1_5,
 | 
			
		||||
    'shufflenet_v2_x2_0': shufflenet_v2_x2_0,
 | 
			
		||||
    # reid-specific models
 | 
			
		||||
    'mudeep': MuDeep,
 | 
			
		||||
    'resnet50mid': resnet50mid,
 | 
			
		||||
    'hacnn': HACNN,
 | 
			
		||||
    'pcb_p6': pcb_p6,
 | 
			
		||||
    'pcb_p4': pcb_p4,
 | 
			
		||||
    'mlfn': mlfn,
 | 
			
		||||
    'osnet_x1_0': osnet_x1_0,
 | 
			
		||||
    'osnet_x0_75': osnet_x0_75,
 | 
			
		||||
    'osnet_x0_5': osnet_x0_5,
 | 
			
		||||
    'osnet_x0_25': osnet_x0_25,
 | 
			
		||||
    'osnet_ibn_x1_0': osnet_ibn_x1_0,
 | 
			
		||||
    'osnet_ain_x1_0': osnet_ain_x1_0,
 | 
			
		||||
    'osnet_ain_x0_75': osnet_ain_x0_75,
 | 
			
		||||
    'osnet_ain_x0_5': osnet_ain_x0_5,
 | 
			
		||||
    'osnet_ain_x0_25': osnet_ain_x0_25
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def show_avai_models():
 | 
			
		||||
    """Displays available models.
 | 
			
		||||
 | 
			
		||||
    Examples::
 | 
			
		||||
        >>> from torchreid import models
 | 
			
		||||
        >>> models.show_avai_models()
 | 
			
		||||
    """
 | 
			
		||||
    print(list(__model_factory.keys()))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def build_model(
 | 
			
		||||
    name, num_classes, loss='softmax', pretrained=True, use_gpu=True
 | 
			
		||||
):
 | 
			
		||||
    """A function wrapper for building a model.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        name (str): model name.
 | 
			
		||||
        num_classes (int): number of training identities.
 | 
			
		||||
        loss (str, optional): loss function to optimize the model. Currently
 | 
			
		||||
            supports "softmax" and "triplet". Default is "softmax".
 | 
			
		||||
        pretrained (bool, optional): whether to load ImageNet-pretrained weights.
 | 
			
		||||
            Default is True.
 | 
			
		||||
        use_gpu (bool, optional): whether to use gpu. Default is True.
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
        nn.Module
 | 
			
		||||
 | 
			
		||||
    Examples::
 | 
			
		||||
        >>> from torchreid import models
 | 
			
		||||
        >>> model = models.build_model('resnet50', 751, loss='softmax')
 | 
			
		||||
    """
 | 
			
		||||
    avai_models = list(__model_factory.keys())
 | 
			
		||||
    if name not in avai_models:
 | 
			
		||||
        raise KeyError(
 | 
			
		||||
            'Unknown model: {}. Must be one of {}'.format(name, avai_models)
 | 
			
		||||
        )
 | 
			
		||||
    return __model_factory[name](
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        pretrained=pretrained,
 | 
			
		||||
        use_gpu=use_gpu
 | 
			
		||||
    )
 | 
			
		||||
							
								
								
									
										380
									
								
								feeder/trackers/strongsort/deep/models/densenet.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										380
									
								
								feeder/trackers/strongsort/deep/models/densenet.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,380 @@
 | 
			
		|||
"""
 | 
			
		||||
Code source: https://github.com/pytorch/vision
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import division, absolute_import
 | 
			
		||||
import re
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
from torch.nn import functional as F
 | 
			
		||||
from torch.utils import model_zoo
 | 
			
		||||
 | 
			
		||||
__all__ = [
 | 
			
		||||
    'densenet121', 'densenet169', 'densenet201', 'densenet161',
 | 
			
		||||
    'densenet121_fc512'
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
model_urls = {
 | 
			
		||||
    'densenet121':
 | 
			
		||||
    'https://download.pytorch.org/models/densenet121-a639ec97.pth',
 | 
			
		||||
    'densenet169':
 | 
			
		||||
    'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
 | 
			
		||||
    'densenet201':
 | 
			
		||||
    'https://download.pytorch.org/models/densenet201-c1103571.pth',
 | 
			
		||||
    'densenet161':
 | 
			
		||||
    'https://download.pytorch.org/models/densenet161-8d451a50.pth',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class _DenseLayer(nn.Sequential):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
 | 
			
		||||
        super(_DenseLayer, self).__init__()
 | 
			
		||||
        self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
 | 
			
		||||
        self.add_module('relu1', nn.ReLU(inplace=True)),
 | 
			
		||||
        self.add_module(
 | 
			
		||||
            'conv1',
 | 
			
		||||
            nn.Conv2d(
 | 
			
		||||
                num_input_features,
 | 
			
		||||
                bn_size * growth_rate,
 | 
			
		||||
                kernel_size=1,
 | 
			
		||||
                stride=1,
 | 
			
		||||
                bias=False
 | 
			
		||||
            )
 | 
			
		||||
        ),
 | 
			
		||||
        self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
 | 
			
		||||
        self.add_module('relu2', nn.ReLU(inplace=True)),
 | 
			
		||||
        self.add_module(
 | 
			
		||||
            'conv2',
 | 
			
		||||
            nn.Conv2d(
 | 
			
		||||
                bn_size * growth_rate,
 | 
			
		||||
                growth_rate,
 | 
			
		||||
                kernel_size=3,
 | 
			
		||||
                stride=1,
 | 
			
		||||
                padding=1,
 | 
			
		||||
                bias=False
 | 
			
		||||
            )
 | 
			
		||||
        ),
 | 
			
		||||
        self.drop_rate = drop_rate
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        new_features = super(_DenseLayer, self).forward(x)
 | 
			
		||||
        if self.drop_rate > 0:
 | 
			
		||||
            new_features = F.dropout(
 | 
			
		||||
                new_features, p=self.drop_rate, training=self.training
 | 
			
		||||
            )
 | 
			
		||||
        return torch.cat([x, new_features], 1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class _DenseBlock(nn.Sequential):
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self, num_layers, num_input_features, bn_size, growth_rate, drop_rate
 | 
			
		||||
    ):
 | 
			
		||||
        super(_DenseBlock, self).__init__()
 | 
			
		||||
        for i in range(num_layers):
 | 
			
		||||
            layer = _DenseLayer(
 | 
			
		||||
                num_input_features + i*growth_rate, growth_rate, bn_size,
 | 
			
		||||
                drop_rate
 | 
			
		||||
            )
 | 
			
		||||
            self.add_module('denselayer%d' % (i+1), layer)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class _Transition(nn.Sequential):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, num_input_features, num_output_features):
 | 
			
		||||
        super(_Transition, self).__init__()
 | 
			
		||||
        self.add_module('norm', nn.BatchNorm2d(num_input_features))
 | 
			
		||||
        self.add_module('relu', nn.ReLU(inplace=True))
 | 
			
		||||
        self.add_module(
 | 
			
		||||
            'conv',
 | 
			
		||||
            nn.Conv2d(
 | 
			
		||||
                num_input_features,
 | 
			
		||||
                num_output_features,
 | 
			
		||||
                kernel_size=1,
 | 
			
		||||
                stride=1,
 | 
			
		||||
                bias=False
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
        self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DenseNet(nn.Module):
 | 
			
		||||
    """Densely connected network.
 | 
			
		||||
    
 | 
			
		||||
    Reference:
 | 
			
		||||
        Huang et al. Densely Connected Convolutional Networks. CVPR 2017.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``densenet121``: DenseNet121.
 | 
			
		||||
        - ``densenet169``: DenseNet169.
 | 
			
		||||
        - ``densenet201``: DenseNet201.
 | 
			
		||||
        - ``densenet161``: DenseNet161.
 | 
			
		||||
        - ``densenet121_fc512``: DenseNet121 + FC.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        num_classes,
 | 
			
		||||
        loss,
 | 
			
		||||
        growth_rate=32,
 | 
			
		||||
        block_config=(6, 12, 24, 16),
 | 
			
		||||
        num_init_features=64,
 | 
			
		||||
        bn_size=4,
 | 
			
		||||
        drop_rate=0,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
 | 
			
		||||
        super(DenseNet, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
 | 
			
		||||
        # First convolution
 | 
			
		||||
        self.features = nn.Sequential(
 | 
			
		||||
            OrderedDict(
 | 
			
		||||
                [
 | 
			
		||||
                    (
 | 
			
		||||
                        'conv0',
 | 
			
		||||
                        nn.Conv2d(
 | 
			
		||||
                            3,
 | 
			
		||||
                            num_init_features,
 | 
			
		||||
                            kernel_size=7,
 | 
			
		||||
                            stride=2,
 | 
			
		||||
                            padding=3,
 | 
			
		||||
                            bias=False
 | 
			
		||||
                        )
 | 
			
		||||
                    ),
 | 
			
		||||
                    ('norm0', nn.BatchNorm2d(num_init_features)),
 | 
			
		||||
                    ('relu0', nn.ReLU(inplace=True)),
 | 
			
		||||
                    (
 | 
			
		||||
                        'pool0',
 | 
			
		||||
                        nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
 | 
			
		||||
                    ),
 | 
			
		||||
                ]
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        # Each denseblock
 | 
			
		||||
        num_features = num_init_features
 | 
			
		||||
        for i, num_layers in enumerate(block_config):
 | 
			
		||||
            block = _DenseBlock(
 | 
			
		||||
                num_layers=num_layers,
 | 
			
		||||
                num_input_features=num_features,
 | 
			
		||||
                bn_size=bn_size,
 | 
			
		||||
                growth_rate=growth_rate,
 | 
			
		||||
                drop_rate=drop_rate
 | 
			
		||||
            )
 | 
			
		||||
            self.features.add_module('denseblock%d' % (i+1), block)
 | 
			
		||||
            num_features = num_features + num_layers*growth_rate
 | 
			
		||||
            if i != len(block_config) - 1:
 | 
			
		||||
                trans = _Transition(
 | 
			
		||||
                    num_input_features=num_features,
 | 
			
		||||
                    num_output_features=num_features // 2
 | 
			
		||||
                )
 | 
			
		||||
                self.features.add_module('transition%d' % (i+1), trans)
 | 
			
		||||
                num_features = num_features // 2
 | 
			
		||||
 | 
			
		||||
        # Final batch norm
 | 
			
		||||
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))
 | 
			
		||||
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        self.feature_dim = num_features
 | 
			
		||||
        self.fc = self._construct_fc_layer(fc_dims, num_features, dropout_p)
 | 
			
		||||
 | 
			
		||||
        # Linear layer
 | 
			
		||||
        self.classifier = nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        self._init_params()
 | 
			
		||||
 | 
			
		||||
    def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
 | 
			
		||||
        """Constructs fully connected layer.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
 | 
			
		||||
            input_dim (int): input dimension
 | 
			
		||||
            dropout_p (float): dropout probability, if None, dropout is unused
 | 
			
		||||
        """
 | 
			
		||||
        if fc_dims is None:
 | 
			
		||||
            self.feature_dim = input_dim
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        assert isinstance(
 | 
			
		||||
            fc_dims, (list, tuple)
 | 
			
		||||
        ), 'fc_dims must be either list or tuple, but got {}'.format(
 | 
			
		||||
            type(fc_dims)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        for dim in fc_dims:
 | 
			
		||||
            layers.append(nn.Linear(input_dim, dim))
 | 
			
		||||
            layers.append(nn.BatchNorm1d(dim))
 | 
			
		||||
            layers.append(nn.ReLU(inplace=True))
 | 
			
		||||
            if dropout_p is not None:
 | 
			
		||||
                layers.append(nn.Dropout(p=dropout_p))
 | 
			
		||||
            input_dim = dim
 | 
			
		||||
 | 
			
		||||
        self.feature_dim = fc_dims[-1]
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _init_params(self):
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                nn.init.kaiming_normal_(
 | 
			
		||||
                    m.weight, mode='fan_out', nonlinearity='relu'
 | 
			
		||||
                )
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm1d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.Linear):
 | 
			
		||||
                nn.init.normal_(m.weight, 0, 0.01)
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.features(x)
 | 
			
		||||
        f = F.relu(f, inplace=True)
 | 
			
		||||
        v = self.global_avgpool(f)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        if self.fc is not None:
 | 
			
		||||
            v = self.fc(v)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError('Unsupported loss: {}'.format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
 | 
			
		||||
    # '.'s are no longer allowed in module names, but pervious _DenseLayer
 | 
			
		||||
    # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
 | 
			
		||||
    # They are also in the checkpoints in model_urls. This pattern is used
 | 
			
		||||
    # to find such keys.
 | 
			
		||||
    pattern = re.compile(
 | 
			
		||||
        r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
 | 
			
		||||
    )
 | 
			
		||||
    for key in list(pretrain_dict.keys()):
 | 
			
		||||
        res = pattern.match(key)
 | 
			
		||||
        if res:
 | 
			
		||||
            new_key = res.group(1) + res.group(2)
 | 
			
		||||
            pretrain_dict[new_key] = pretrain_dict[key]
 | 
			
		||||
            del pretrain_dict[key]
 | 
			
		||||
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
Dense network configurations:
 | 
			
		||||
--
 | 
			
		||||
densenet121: num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16)
 | 
			
		||||
densenet169: num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32)
 | 
			
		||||
densenet201: num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32)
 | 
			
		||||
densenet161: num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24)
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def densenet121(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = DenseNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        num_init_features=64,
 | 
			
		||||
        growth_rate=32,
 | 
			
		||||
        block_config=(6, 12, 24, 16),
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['densenet121'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def densenet169(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = DenseNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        num_init_features=64,
 | 
			
		||||
        growth_rate=32,
 | 
			
		||||
        block_config=(6, 12, 32, 32),
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['densenet169'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def densenet201(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = DenseNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        num_init_features=64,
 | 
			
		||||
        growth_rate=32,
 | 
			
		||||
        block_config=(6, 12, 48, 32),
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['densenet201'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def densenet161(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = DenseNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        num_init_features=96,
 | 
			
		||||
        growth_rate=48,
 | 
			
		||||
        block_config=(6, 12, 36, 24),
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['densenet161'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def densenet121_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = DenseNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        num_init_features=64,
 | 
			
		||||
        growth_rate=32,
 | 
			
		||||
        block_config=(6, 12, 24, 16),
 | 
			
		||||
        fc_dims=[512],
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['densenet121'])
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										414
									
								
								feeder/trackers/strongsort/deep/models/hacnn.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										414
									
								
								feeder/trackers/strongsort/deep/models/hacnn.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,414 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import torch
 | 
			
		||||
from torch import nn
 | 
			
		||||
from torch.nn import functional as F
 | 
			
		||||
 | 
			
		||||
__all__ = ['HACNN']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ConvBlock(nn.Module):
 | 
			
		||||
    """Basic convolutional block.
 | 
			
		||||
    
 | 
			
		||||
    convolution + batch normalization + relu.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        in_c (int): number of input channels.
 | 
			
		||||
        out_c (int): number of output channels.
 | 
			
		||||
        k (int or tuple): kernel size.
 | 
			
		||||
        s (int or tuple): stride.
 | 
			
		||||
        p (int or tuple): padding.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_c, out_c, k, s=1, p=0):
 | 
			
		||||
        super(ConvBlock, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p)
 | 
			
		||||
        self.bn = nn.BatchNorm2d(out_c)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        return F.relu(self.bn(self.conv(x)))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InceptionA(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels):
 | 
			
		||||
        super(InceptionA, self).__init__()
 | 
			
		||||
        mid_channels = out_channels // 4
 | 
			
		||||
 | 
			
		||||
        self.stream1 = nn.Sequential(
 | 
			
		||||
            ConvBlock(in_channels, mid_channels, 1),
 | 
			
		||||
            ConvBlock(mid_channels, mid_channels, 3, p=1),
 | 
			
		||||
        )
 | 
			
		||||
        self.stream2 = nn.Sequential(
 | 
			
		||||
            ConvBlock(in_channels, mid_channels, 1),
 | 
			
		||||
            ConvBlock(mid_channels, mid_channels, 3, p=1),
 | 
			
		||||
        )
 | 
			
		||||
        self.stream3 = nn.Sequential(
 | 
			
		||||
            ConvBlock(in_channels, mid_channels, 1),
 | 
			
		||||
            ConvBlock(mid_channels, mid_channels, 3, p=1),
 | 
			
		||||
        )
 | 
			
		||||
        self.stream4 = nn.Sequential(
 | 
			
		||||
            nn.AvgPool2d(3, stride=1, padding=1),
 | 
			
		||||
            ConvBlock(in_channels, mid_channels, 1),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        s1 = self.stream1(x)
 | 
			
		||||
        s2 = self.stream2(x)
 | 
			
		||||
        s3 = self.stream3(x)
 | 
			
		||||
        s4 = self.stream4(x)
 | 
			
		||||
        y = torch.cat([s1, s2, s3, s4], dim=1)
 | 
			
		||||
        return y
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InceptionB(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels):
 | 
			
		||||
        super(InceptionB, self).__init__()
 | 
			
		||||
        mid_channels = out_channels // 4
 | 
			
		||||
 | 
			
		||||
        self.stream1 = nn.Sequential(
 | 
			
		||||
            ConvBlock(in_channels, mid_channels, 1),
 | 
			
		||||
            ConvBlock(mid_channels, mid_channels, 3, s=2, p=1),
 | 
			
		||||
        )
 | 
			
		||||
        self.stream2 = nn.Sequential(
 | 
			
		||||
            ConvBlock(in_channels, mid_channels, 1),
 | 
			
		||||
            ConvBlock(mid_channels, mid_channels, 3, p=1),
 | 
			
		||||
            ConvBlock(mid_channels, mid_channels, 3, s=2, p=1),
 | 
			
		||||
        )
 | 
			
		||||
        self.stream3 = nn.Sequential(
 | 
			
		||||
            nn.MaxPool2d(3, stride=2, padding=1),
 | 
			
		||||
            ConvBlock(in_channels, mid_channels * 2, 1),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        s1 = self.stream1(x)
 | 
			
		||||
        s2 = self.stream2(x)
 | 
			
		||||
        s3 = self.stream3(x)
 | 
			
		||||
        y = torch.cat([s1, s2, s3], dim=1)
 | 
			
		||||
        return y
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SpatialAttn(nn.Module):
 | 
			
		||||
    """Spatial Attention (Sec. 3.1.I.1)"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(SpatialAttn, self).__init__()
 | 
			
		||||
        self.conv1 = ConvBlock(1, 1, 3, s=2, p=1)
 | 
			
		||||
        self.conv2 = ConvBlock(1, 1, 1)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        # global cross-channel averaging
 | 
			
		||||
        x = x.mean(1, keepdim=True)
 | 
			
		||||
        # 3-by-3 conv
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        # bilinear resizing
 | 
			
		||||
        x = F.upsample(
 | 
			
		||||
            x, (x.size(2) * 2, x.size(3) * 2),
 | 
			
		||||
            mode='bilinear',
 | 
			
		||||
            align_corners=True
 | 
			
		||||
        )
 | 
			
		||||
        # scaling conv
 | 
			
		||||
        x = self.conv2(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ChannelAttn(nn.Module):
 | 
			
		||||
    """Channel Attention (Sec. 3.1.I.2)"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, reduction_rate=16):
 | 
			
		||||
        super(ChannelAttn, self).__init__()
 | 
			
		||||
        assert in_channels % reduction_rate == 0
 | 
			
		||||
        self.conv1 = ConvBlock(in_channels, in_channels // reduction_rate, 1)
 | 
			
		||||
        self.conv2 = ConvBlock(in_channels // reduction_rate, in_channels, 1)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        # squeeze operation (global average pooling)
 | 
			
		||||
        x = F.avg_pool2d(x, x.size()[2:])
 | 
			
		||||
        # excitation operation (2 conv layers)
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.conv2(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SoftAttn(nn.Module):
 | 
			
		||||
    """Soft Attention (Sec. 3.1.I)
 | 
			
		||||
    
 | 
			
		||||
    Aim: Spatial Attention + Channel Attention
 | 
			
		||||
    
 | 
			
		||||
    Output: attention maps with shape identical to input.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels):
 | 
			
		||||
        super(SoftAttn, self).__init__()
 | 
			
		||||
        self.spatial_attn = SpatialAttn()
 | 
			
		||||
        self.channel_attn = ChannelAttn(in_channels)
 | 
			
		||||
        self.conv = ConvBlock(in_channels, in_channels, 1)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        y_spatial = self.spatial_attn(x)
 | 
			
		||||
        y_channel = self.channel_attn(x)
 | 
			
		||||
        y = y_spatial * y_channel
 | 
			
		||||
        y = torch.sigmoid(self.conv(y))
 | 
			
		||||
        return y
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class HardAttn(nn.Module):
 | 
			
		||||
    """Hard Attention (Sec. 3.1.II)"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels):
 | 
			
		||||
        super(HardAttn, self).__init__()
 | 
			
		||||
        self.fc = nn.Linear(in_channels, 4 * 2)
 | 
			
		||||
        self.init_params()
 | 
			
		||||
 | 
			
		||||
    def init_params(self):
 | 
			
		||||
        self.fc.weight.data.zero_()
 | 
			
		||||
        self.fc.bias.data.copy_(
 | 
			
		||||
            torch.tensor(
 | 
			
		||||
                [0, -0.75, 0, -0.25, 0, 0.25, 0, 0.75], dtype=torch.float
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        # squeeze operation (global average pooling)
 | 
			
		||||
        x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), x.size(1))
 | 
			
		||||
        # predict transformation parameters
 | 
			
		||||
        theta = torch.tanh(self.fc(x))
 | 
			
		||||
        theta = theta.view(-1, 4, 2)
 | 
			
		||||
        return theta
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class HarmAttn(nn.Module):
 | 
			
		||||
    """Harmonious Attention (Sec. 3.1)"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels):
 | 
			
		||||
        super(HarmAttn, self).__init__()
 | 
			
		||||
        self.soft_attn = SoftAttn(in_channels)
 | 
			
		||||
        self.hard_attn = HardAttn(in_channels)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        y_soft_attn = self.soft_attn(x)
 | 
			
		||||
        theta = self.hard_attn(x)
 | 
			
		||||
        return y_soft_attn, theta
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class HACNN(nn.Module):
 | 
			
		||||
    """Harmonious Attention Convolutional Neural Network.
 | 
			
		||||
 | 
			
		||||
    Reference:
 | 
			
		||||
        Li et al. Harmonious Attention Network for Person Re-identification. CVPR 2018.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``hacnn``: HACNN.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    # Args:
 | 
			
		||||
    #    num_classes (int): number of classes to predict
 | 
			
		||||
    #    nchannels (list): number of channels AFTER concatenation
 | 
			
		||||
    #    feat_dim (int): feature dimension for a single stream
 | 
			
		||||
    #    learn_region (bool): whether to learn region features (i.e. local branch)
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        num_classes,
 | 
			
		||||
        loss='softmax',
 | 
			
		||||
        nchannels=[128, 256, 384],
 | 
			
		||||
        feat_dim=512,
 | 
			
		||||
        learn_region=True,
 | 
			
		||||
        use_gpu=True,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        super(HACNN, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
        self.learn_region = learn_region
 | 
			
		||||
        self.use_gpu = use_gpu
 | 
			
		||||
 | 
			
		||||
        self.conv = ConvBlock(3, 32, 3, s=2, p=1)
 | 
			
		||||
 | 
			
		||||
        # Construct Inception + HarmAttn blocks
 | 
			
		||||
        # ============== Block 1 ==============
 | 
			
		||||
        self.inception1 = nn.Sequential(
 | 
			
		||||
            InceptionA(32, nchannels[0]),
 | 
			
		||||
            InceptionB(nchannels[0], nchannels[0]),
 | 
			
		||||
        )
 | 
			
		||||
        self.ha1 = HarmAttn(nchannels[0])
 | 
			
		||||
 | 
			
		||||
        # ============== Block 2 ==============
 | 
			
		||||
        self.inception2 = nn.Sequential(
 | 
			
		||||
            InceptionA(nchannels[0], nchannels[1]),
 | 
			
		||||
            InceptionB(nchannels[1], nchannels[1]),
 | 
			
		||||
        )
 | 
			
		||||
        self.ha2 = HarmAttn(nchannels[1])
 | 
			
		||||
 | 
			
		||||
        # ============== Block 3 ==============
 | 
			
		||||
        self.inception3 = nn.Sequential(
 | 
			
		||||
            InceptionA(nchannels[1], nchannels[2]),
 | 
			
		||||
            InceptionB(nchannels[2], nchannels[2]),
 | 
			
		||||
        )
 | 
			
		||||
        self.ha3 = HarmAttn(nchannels[2])
 | 
			
		||||
 | 
			
		||||
        self.fc_global = nn.Sequential(
 | 
			
		||||
            nn.Linear(nchannels[2], feat_dim),
 | 
			
		||||
            nn.BatchNorm1d(feat_dim),
 | 
			
		||||
            nn.ReLU(),
 | 
			
		||||
        )
 | 
			
		||||
        self.classifier_global = nn.Linear(feat_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        if self.learn_region:
 | 
			
		||||
            self.init_scale_factors()
 | 
			
		||||
            self.local_conv1 = InceptionB(32, nchannels[0])
 | 
			
		||||
            self.local_conv2 = InceptionB(nchannels[0], nchannels[1])
 | 
			
		||||
            self.local_conv3 = InceptionB(nchannels[1], nchannels[2])
 | 
			
		||||
            self.fc_local = nn.Sequential(
 | 
			
		||||
                nn.Linear(nchannels[2] * 4, feat_dim),
 | 
			
		||||
                nn.BatchNorm1d(feat_dim),
 | 
			
		||||
                nn.ReLU(),
 | 
			
		||||
            )
 | 
			
		||||
            self.classifier_local = nn.Linear(feat_dim, num_classes)
 | 
			
		||||
            self.feat_dim = feat_dim * 2
 | 
			
		||||
        else:
 | 
			
		||||
            self.feat_dim = feat_dim
 | 
			
		||||
 | 
			
		||||
    def init_scale_factors(self):
 | 
			
		||||
        # initialize scale factors (s_w, s_h) for four regions
 | 
			
		||||
        self.scale_factors = []
 | 
			
		||||
        self.scale_factors.append(
 | 
			
		||||
            torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float)
 | 
			
		||||
        )
 | 
			
		||||
        self.scale_factors.append(
 | 
			
		||||
            torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float)
 | 
			
		||||
        )
 | 
			
		||||
        self.scale_factors.append(
 | 
			
		||||
            torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float)
 | 
			
		||||
        )
 | 
			
		||||
        self.scale_factors.append(
 | 
			
		||||
            torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def stn(self, x, theta):
 | 
			
		||||
        """Performs spatial transform
 | 
			
		||||
        
 | 
			
		||||
        x: (batch, channel, height, width)
 | 
			
		||||
        theta: (batch, 2, 3)
 | 
			
		||||
        """
 | 
			
		||||
        grid = F.affine_grid(theta, x.size())
 | 
			
		||||
        x = F.grid_sample(x, grid)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def transform_theta(self, theta_i, region_idx):
 | 
			
		||||
        """Transforms theta to include (s_w, s_h), resulting in (batch, 2, 3)"""
 | 
			
		||||
        scale_factors = self.scale_factors[region_idx]
 | 
			
		||||
        theta = torch.zeros(theta_i.size(0), 2, 3)
 | 
			
		||||
        theta[:, :, :2] = scale_factors
 | 
			
		||||
        theta[:, :, -1] = theta_i
 | 
			
		||||
        if self.use_gpu:
 | 
			
		||||
            theta = theta.cuda()
 | 
			
		||||
        return theta
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        assert x.size(2) == 160 and x.size(3) == 64, \
 | 
			
		||||
            'Input size does not match, expected (160, 64) but got ({}, {})'.format(x.size(2), x.size(3))
 | 
			
		||||
        x = self.conv(x)
 | 
			
		||||
 | 
			
		||||
        # ============== Block 1 ==============
 | 
			
		||||
        # global branch
 | 
			
		||||
        x1 = self.inception1(x)
 | 
			
		||||
        x1_attn, x1_theta = self.ha1(x1)
 | 
			
		||||
        x1_out = x1 * x1_attn
 | 
			
		||||
        # local branch
 | 
			
		||||
        if self.learn_region:
 | 
			
		||||
            x1_local_list = []
 | 
			
		||||
            for region_idx in range(4):
 | 
			
		||||
                x1_theta_i = x1_theta[:, region_idx, :]
 | 
			
		||||
                x1_theta_i = self.transform_theta(x1_theta_i, region_idx)
 | 
			
		||||
                x1_trans_i = self.stn(x, x1_theta_i)
 | 
			
		||||
                x1_trans_i = F.upsample(
 | 
			
		||||
                    x1_trans_i, (24, 28), mode='bilinear', align_corners=True
 | 
			
		||||
                )
 | 
			
		||||
                x1_local_i = self.local_conv1(x1_trans_i)
 | 
			
		||||
                x1_local_list.append(x1_local_i)
 | 
			
		||||
 | 
			
		||||
        # ============== Block 2 ==============
 | 
			
		||||
        # Block 2
 | 
			
		||||
        # global branch
 | 
			
		||||
        x2 = self.inception2(x1_out)
 | 
			
		||||
        x2_attn, x2_theta = self.ha2(x2)
 | 
			
		||||
        x2_out = x2 * x2_attn
 | 
			
		||||
        # local branch
 | 
			
		||||
        if self.learn_region:
 | 
			
		||||
            x2_local_list = []
 | 
			
		||||
            for region_idx in range(4):
 | 
			
		||||
                x2_theta_i = x2_theta[:, region_idx, :]
 | 
			
		||||
                x2_theta_i = self.transform_theta(x2_theta_i, region_idx)
 | 
			
		||||
                x2_trans_i = self.stn(x1_out, x2_theta_i)
 | 
			
		||||
                x2_trans_i = F.upsample(
 | 
			
		||||
                    x2_trans_i, (12, 14), mode='bilinear', align_corners=True
 | 
			
		||||
                )
 | 
			
		||||
                x2_local_i = x2_trans_i + x1_local_list[region_idx]
 | 
			
		||||
                x2_local_i = self.local_conv2(x2_local_i)
 | 
			
		||||
                x2_local_list.append(x2_local_i)
 | 
			
		||||
 | 
			
		||||
        # ============== Block 3 ==============
 | 
			
		||||
        # Block 3
 | 
			
		||||
        # global branch
 | 
			
		||||
        x3 = self.inception3(x2_out)
 | 
			
		||||
        x3_attn, x3_theta = self.ha3(x3)
 | 
			
		||||
        x3_out = x3 * x3_attn
 | 
			
		||||
        # local branch
 | 
			
		||||
        if self.learn_region:
 | 
			
		||||
            x3_local_list = []
 | 
			
		||||
            for region_idx in range(4):
 | 
			
		||||
                x3_theta_i = x3_theta[:, region_idx, :]
 | 
			
		||||
                x3_theta_i = self.transform_theta(x3_theta_i, region_idx)
 | 
			
		||||
                x3_trans_i = self.stn(x2_out, x3_theta_i)
 | 
			
		||||
                x3_trans_i = F.upsample(
 | 
			
		||||
                    x3_trans_i, (6, 7), mode='bilinear', align_corners=True
 | 
			
		||||
                )
 | 
			
		||||
                x3_local_i = x3_trans_i + x2_local_list[region_idx]
 | 
			
		||||
                x3_local_i = self.local_conv3(x3_local_i)
 | 
			
		||||
                x3_local_list.append(x3_local_i)
 | 
			
		||||
 | 
			
		||||
        # ============== Feature generation ==============
 | 
			
		||||
        # global branch
 | 
			
		||||
        x_global = F.avg_pool2d(x3_out,
 | 
			
		||||
                                x3_out.size()[2:]
 | 
			
		||||
                                ).view(x3_out.size(0), x3_out.size(1))
 | 
			
		||||
        x_global = self.fc_global(x_global)
 | 
			
		||||
        # local branch
 | 
			
		||||
        if self.learn_region:
 | 
			
		||||
            x_local_list = []
 | 
			
		||||
            for region_idx in range(4):
 | 
			
		||||
                x_local_i = x3_local_list[region_idx]
 | 
			
		||||
                x_local_i = F.avg_pool2d(x_local_i,
 | 
			
		||||
                                         x_local_i.size()[2:]
 | 
			
		||||
                                         ).view(x_local_i.size(0), -1)
 | 
			
		||||
                x_local_list.append(x_local_i)
 | 
			
		||||
            x_local = torch.cat(x_local_list, 1)
 | 
			
		||||
            x_local = self.fc_local(x_local)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            # l2 normalization before concatenation
 | 
			
		||||
            if self.learn_region:
 | 
			
		||||
                x_global = x_global / x_global.norm(p=2, dim=1, keepdim=True)
 | 
			
		||||
                x_local = x_local / x_local.norm(p=2, dim=1, keepdim=True)
 | 
			
		||||
                return torch.cat([x_global, x_local], 1)
 | 
			
		||||
            else:
 | 
			
		||||
                return x_global
 | 
			
		||||
 | 
			
		||||
        prelogits_global = self.classifier_global(x_global)
 | 
			
		||||
        if self.learn_region:
 | 
			
		||||
            prelogits_local = self.classifier_local(x_local)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            if self.learn_region:
 | 
			
		||||
                return (prelogits_global, prelogits_local)
 | 
			
		||||
            else:
 | 
			
		||||
                return prelogits_global
 | 
			
		||||
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            if self.learn_region:
 | 
			
		||||
                return (prelogits_global, prelogits_local), (x_global, x_local)
 | 
			
		||||
            else:
 | 
			
		||||
                return prelogits_global, x_global
 | 
			
		||||
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError("Unsupported loss: {}".format(self.loss))
 | 
			
		||||
							
								
								
									
										361
									
								
								feeder/trackers/strongsort/deep/models/inceptionresnetv2.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										361
									
								
								feeder/trackers/strongsort/deep/models/inceptionresnetv2.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,361 @@
 | 
			
		|||
"""
 | 
			
		||||
Code imported from https://github.com/Cadene/pretrained-models.pytorch
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import division, absolute_import
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
 | 
			
		||||
__all__ = ['inceptionresnetv2']
 | 
			
		||||
 | 
			
		||||
pretrained_settings = {
 | 
			
		||||
    'inceptionresnetv2': {
 | 
			
		||||
        'imagenet': {
 | 
			
		||||
            'url':
 | 
			
		||||
            'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',
 | 
			
		||||
            'input_space': 'RGB',
 | 
			
		||||
            'input_size': [3, 299, 299],
 | 
			
		||||
            'input_range': [0, 1],
 | 
			
		||||
            'mean': [0.5, 0.5, 0.5],
 | 
			
		||||
            'std': [0.5, 0.5, 0.5],
 | 
			
		||||
            'num_classes': 1000
 | 
			
		||||
        },
 | 
			
		||||
        'imagenet+background': {
 | 
			
		||||
            'url':
 | 
			
		||||
            'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',
 | 
			
		||||
            'input_space': 'RGB',
 | 
			
		||||
            'input_size': [3, 299, 299],
 | 
			
		||||
            'input_range': [0, 1],
 | 
			
		||||
            'mean': [0.5, 0.5, 0.5],
 | 
			
		||||
            'std': [0.5, 0.5, 0.5],
 | 
			
		||||
            'num_classes': 1001
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BasicConv2d(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
 | 
			
		||||
        super(BasicConv2d, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(
 | 
			
		||||
            in_planes,
 | 
			
		||||
            out_planes,
 | 
			
		||||
            kernel_size=kernel_size,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=padding,
 | 
			
		||||
            bias=False
 | 
			
		||||
        ) # verify bias false
 | 
			
		||||
        self.bn = nn.BatchNorm2d(
 | 
			
		||||
            out_planes,
 | 
			
		||||
            eps=0.001, # value found in tensorflow
 | 
			
		||||
            momentum=0.1, # default pytorch value
 | 
			
		||||
            affine=True
 | 
			
		||||
        )
 | 
			
		||||
        self.relu = nn.ReLU(inplace=False)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv(x)
 | 
			
		||||
        x = self.bn(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Mixed_5b(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Mixed_5b, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1)
 | 
			
		||||
 | 
			
		||||
        self.branch1 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(192, 48, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch2 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(192, 64, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
 | 
			
		||||
            BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch3 = nn.Sequential(
 | 
			
		||||
            nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
 | 
			
		||||
            BasicConv2d(192, 64, kernel_size=1, stride=1)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
        x1 = self.branch1(x)
 | 
			
		||||
        x2 = self.branch2(x)
 | 
			
		||||
        x3 = self.branch3(x)
 | 
			
		||||
        out = torch.cat((x0, x1, x2, x3), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Block35(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, scale=1.0):
 | 
			
		||||
        super(Block35, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.scale = scale
 | 
			
		||||
 | 
			
		||||
        self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1)
 | 
			
		||||
 | 
			
		||||
        self.branch1 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(320, 32, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch2 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(320, 32, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1),
 | 
			
		||||
            BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=False)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
        x1 = self.branch1(x)
 | 
			
		||||
        x2 = self.branch2(x)
 | 
			
		||||
        out = torch.cat((x0, x1, x2), 1)
 | 
			
		||||
        out = self.conv2d(out)
 | 
			
		||||
        out = out * self.scale + x
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Mixed_6a(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Mixed_6a, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2)
 | 
			
		||||
 | 
			
		||||
        self.branch1 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(320, 256, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),
 | 
			
		||||
            BasicConv2d(256, 384, kernel_size=3, stride=2)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch2 = nn.MaxPool2d(3, stride=2)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
        x1 = self.branch1(x)
 | 
			
		||||
        x2 = self.branch2(x)
 | 
			
		||||
        out = torch.cat((x0, x1, x2), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Block17(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, scale=1.0):
 | 
			
		||||
        super(Block17, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.scale = scale
 | 
			
		||||
 | 
			
		||||
        self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1)
 | 
			
		||||
 | 
			
		||||
        self.branch1 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(1088, 128, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)
 | 
			
		||||
            ),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=False)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
        x1 = self.branch1(x)
 | 
			
		||||
        out = torch.cat((x0, x1), 1)
 | 
			
		||||
        out = self.conv2d(out)
 | 
			
		||||
        out = out * self.scale + x
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Mixed_7a(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Mixed_7a, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.branch0 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(1088, 256, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(256, 384, kernel_size=3, stride=2)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch1 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(1088, 256, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(256, 288, kernel_size=3, stride=2)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch2 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(1088, 256, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1),
 | 
			
		||||
            BasicConv2d(288, 320, kernel_size=3, stride=2)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch3 = nn.MaxPool2d(3, stride=2)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
        x1 = self.branch1(x)
 | 
			
		||||
        x2 = self.branch2(x)
 | 
			
		||||
        x3 = self.branch3(x)
 | 
			
		||||
        out = torch.cat((x0, x1, x2, x3), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Block8(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, scale=1.0, noReLU=False):
 | 
			
		||||
        super(Block8, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.scale = scale
 | 
			
		||||
        self.noReLU = noReLU
 | 
			
		||||
 | 
			
		||||
        self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1)
 | 
			
		||||
 | 
			
		||||
        self.branch1 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(2080, 192, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)
 | 
			
		||||
            ),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1)
 | 
			
		||||
        if not self.noReLU:
 | 
			
		||||
            self.relu = nn.ReLU(inplace=False)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
        x1 = self.branch1(x)
 | 
			
		||||
        out = torch.cat((x0, x1), 1)
 | 
			
		||||
        out = self.conv2d(out)
 | 
			
		||||
        out = out * self.scale + x
 | 
			
		||||
        if not self.noReLU:
 | 
			
		||||
            out = self.relu(out)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ----------------
 | 
			
		||||
# Model Definition
 | 
			
		||||
# ----------------
 | 
			
		||||
class InceptionResNetV2(nn.Module):
 | 
			
		||||
    """Inception-ResNet-V2.
 | 
			
		||||
 | 
			
		||||
    Reference:
 | 
			
		||||
        Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual
 | 
			
		||||
        Connections on Learning. AAAI 2017.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``inceptionresnetv2``: Inception-ResNet-V2.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, num_classes, loss='softmax', **kwargs):
 | 
			
		||||
        super(InceptionResNetV2, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
 | 
			
		||||
        # Modules
 | 
			
		||||
        self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)
 | 
			
		||||
        self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)
 | 
			
		||||
        self.conv2d_2b = BasicConv2d(
 | 
			
		||||
            32, 64, kernel_size=3, stride=1, padding=1
 | 
			
		||||
        )
 | 
			
		||||
        self.maxpool_3a = nn.MaxPool2d(3, stride=2)
 | 
			
		||||
        self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)
 | 
			
		||||
        self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)
 | 
			
		||||
        self.maxpool_5a = nn.MaxPool2d(3, stride=2)
 | 
			
		||||
        self.mixed_5b = Mixed_5b()
 | 
			
		||||
        self.repeat = nn.Sequential(
 | 
			
		||||
            Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17),
 | 
			
		||||
            Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17),
 | 
			
		||||
            Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17),
 | 
			
		||||
            Block35(scale=0.17)
 | 
			
		||||
        )
 | 
			
		||||
        self.mixed_6a = Mixed_6a()
 | 
			
		||||
        self.repeat_1 = nn.Sequential(
 | 
			
		||||
            Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
 | 
			
		||||
            Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
 | 
			
		||||
            Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
 | 
			
		||||
            Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
 | 
			
		||||
            Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
 | 
			
		||||
            Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
 | 
			
		||||
            Block17(scale=0.10), Block17(scale=0.10)
 | 
			
		||||
        )
 | 
			
		||||
        self.mixed_7a = Mixed_7a()
 | 
			
		||||
        self.repeat_2 = nn.Sequential(
 | 
			
		||||
            Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20),
 | 
			
		||||
            Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20),
 | 
			
		||||
            Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.block8 = Block8(noReLU=True)
 | 
			
		||||
        self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1)
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        self.classifier = nn.Linear(1536, num_classes)
 | 
			
		||||
 | 
			
		||||
    def load_imagenet_weights(self):
 | 
			
		||||
        settings = pretrained_settings['inceptionresnetv2']['imagenet']
 | 
			
		||||
        pretrain_dict = model_zoo.load_url(settings['url'])
 | 
			
		||||
        model_dict = self.state_dict()
 | 
			
		||||
        pretrain_dict = {
 | 
			
		||||
            k: v
 | 
			
		||||
            for k, v in pretrain_dict.items()
 | 
			
		||||
            if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
        }
 | 
			
		||||
        model_dict.update(pretrain_dict)
 | 
			
		||||
        self.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.conv2d_1a(x)
 | 
			
		||||
        x = self.conv2d_2a(x)
 | 
			
		||||
        x = self.conv2d_2b(x)
 | 
			
		||||
        x = self.maxpool_3a(x)
 | 
			
		||||
        x = self.conv2d_3b(x)
 | 
			
		||||
        x = self.conv2d_4a(x)
 | 
			
		||||
        x = self.maxpool_5a(x)
 | 
			
		||||
        x = self.mixed_5b(x)
 | 
			
		||||
        x = self.repeat(x)
 | 
			
		||||
        x = self.mixed_6a(x)
 | 
			
		||||
        x = self.repeat_1(x)
 | 
			
		||||
        x = self.mixed_7a(x)
 | 
			
		||||
        x = self.repeat_2(x)
 | 
			
		||||
        x = self.block8(x)
 | 
			
		||||
        x = self.conv2d_7b(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.featuremaps(x)
 | 
			
		||||
        v = self.global_avgpool(f)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError('Unsupported loss: {}'.format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def inceptionresnetv2(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = InceptionResNetV2(num_classes=num_classes, loss=loss, **kwargs)
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        model.load_imagenet_weights()
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										381
									
								
								feeder/trackers/strongsort/deep/models/inceptionv4.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										381
									
								
								feeder/trackers/strongsort/deep/models/inceptionv4.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,381 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
 | 
			
		||||
__all__ = ['inceptionv4']
 | 
			
		||||
"""
 | 
			
		||||
Code imported from https://github.com/Cadene/pretrained-models.pytorch
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
pretrained_settings = {
 | 
			
		||||
    'inceptionv4': {
 | 
			
		||||
        'imagenet': {
 | 
			
		||||
            'url':
 | 
			
		||||
            'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth',
 | 
			
		||||
            'input_space': 'RGB',
 | 
			
		||||
            'input_size': [3, 299, 299],
 | 
			
		||||
            'input_range': [0, 1],
 | 
			
		||||
            'mean': [0.5, 0.5, 0.5],
 | 
			
		||||
            'std': [0.5, 0.5, 0.5],
 | 
			
		||||
            'num_classes': 1000
 | 
			
		||||
        },
 | 
			
		||||
        'imagenet+background': {
 | 
			
		||||
            'url':
 | 
			
		||||
            'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth',
 | 
			
		||||
            'input_space': 'RGB',
 | 
			
		||||
            'input_size': [3, 299, 299],
 | 
			
		||||
            'input_range': [0, 1],
 | 
			
		||||
            'mean': [0.5, 0.5, 0.5],
 | 
			
		||||
            'std': [0.5, 0.5, 0.5],
 | 
			
		||||
            'num_classes': 1001
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BasicConv2d(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
 | 
			
		||||
        super(BasicConv2d, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(
 | 
			
		||||
            in_planes,
 | 
			
		||||
            out_planes,
 | 
			
		||||
            kernel_size=kernel_size,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=padding,
 | 
			
		||||
            bias=False
 | 
			
		||||
        ) # verify bias false
 | 
			
		||||
        self.bn = nn.BatchNorm2d(
 | 
			
		||||
            out_planes,
 | 
			
		||||
            eps=0.001, # value found in tensorflow
 | 
			
		||||
            momentum=0.1, # default pytorch value
 | 
			
		||||
            affine=True
 | 
			
		||||
        )
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv(x)
 | 
			
		||||
        x = self.bn(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Mixed_3a(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Mixed_3a, self).__init__()
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(3, stride=2)
 | 
			
		||||
        self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.maxpool(x)
 | 
			
		||||
        x1 = self.conv(x)
 | 
			
		||||
        out = torch.cat((x0, x1), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Mixed_4a(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Mixed_4a, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.branch0 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(160, 64, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(64, 96, kernel_size=3, stride=1)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch1 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(160, 64, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)),
 | 
			
		||||
            BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)),
 | 
			
		||||
            BasicConv2d(64, 96, kernel_size=(3, 3), stride=1)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
        x1 = self.branch1(x)
 | 
			
		||||
        out = torch.cat((x0, x1), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Mixed_5a(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Mixed_5a, self).__init__()
 | 
			
		||||
        self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2)
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(3, stride=2)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.conv(x)
 | 
			
		||||
        x1 = self.maxpool(x)
 | 
			
		||||
        out = torch.cat((x0, x1), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Inception_A(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Inception_A, self).__init__()
 | 
			
		||||
        self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1)
 | 
			
		||||
 | 
			
		||||
        self.branch1 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(384, 64, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch2 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(384, 64, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
 | 
			
		||||
            BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch3 = nn.Sequential(
 | 
			
		||||
            nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
 | 
			
		||||
            BasicConv2d(384, 96, kernel_size=1, stride=1)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
        x1 = self.branch1(x)
 | 
			
		||||
        x2 = self.branch2(x)
 | 
			
		||||
        x3 = self.branch3(x)
 | 
			
		||||
        out = torch.cat((x0, x1, x2, x3), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Reduction_A(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Reduction_A, self).__init__()
 | 
			
		||||
        self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2)
 | 
			
		||||
 | 
			
		||||
        self.branch1 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(384, 192, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1),
 | 
			
		||||
            BasicConv2d(224, 256, kernel_size=3, stride=2)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch2 = nn.MaxPool2d(3, stride=2)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
        x1 = self.branch1(x)
 | 
			
		||||
        x2 = self.branch2(x)
 | 
			
		||||
        out = torch.cat((x0, x1, x2), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Inception_B(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Inception_B, self).__init__()
 | 
			
		||||
        self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1)
 | 
			
		||||
 | 
			
		||||
        self.branch1 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(1024, 192, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)
 | 
			
		||||
            ),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0)
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch2 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(1024, 192, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)
 | 
			
		||||
            ),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)
 | 
			
		||||
            ),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)
 | 
			
		||||
            ),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch3 = nn.Sequential(
 | 
			
		||||
            nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
 | 
			
		||||
            BasicConv2d(1024, 128, kernel_size=1, stride=1)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
        x1 = self.branch1(x)
 | 
			
		||||
        x2 = self.branch2(x)
 | 
			
		||||
        x3 = self.branch3(x)
 | 
			
		||||
        out = torch.cat((x0, x1, x2, x3), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Reduction_B(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Reduction_B, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.branch0 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(1024, 192, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(192, 192, kernel_size=3, stride=2)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch1 = nn.Sequential(
 | 
			
		||||
            BasicConv2d(1024, 256, kernel_size=1, stride=1),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)
 | 
			
		||||
            ),
 | 
			
		||||
            BasicConv2d(
 | 
			
		||||
                256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)
 | 
			
		||||
            ), BasicConv2d(320, 320, kernel_size=3, stride=2)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch2 = nn.MaxPool2d(3, stride=2)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
        x1 = self.branch1(x)
 | 
			
		||||
        x2 = self.branch2(x)
 | 
			
		||||
        out = torch.cat((x0, x1, x2), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Inception_C(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Inception_C, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1)
 | 
			
		||||
 | 
			
		||||
        self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
 | 
			
		||||
        self.branch1_1a = BasicConv2d(
 | 
			
		||||
            384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)
 | 
			
		||||
        )
 | 
			
		||||
        self.branch1_1b = BasicConv2d(
 | 
			
		||||
            384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
 | 
			
		||||
        self.branch2_1 = BasicConv2d(
 | 
			
		||||
            384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0)
 | 
			
		||||
        )
 | 
			
		||||
        self.branch2_2 = BasicConv2d(
 | 
			
		||||
            448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1)
 | 
			
		||||
        )
 | 
			
		||||
        self.branch2_3a = BasicConv2d(
 | 
			
		||||
            512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)
 | 
			
		||||
        )
 | 
			
		||||
        self.branch2_3b = BasicConv2d(
 | 
			
		||||
            512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.branch3 = nn.Sequential(
 | 
			
		||||
            nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
 | 
			
		||||
            BasicConv2d(1536, 256, kernel_size=1, stride=1)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x0 = self.branch0(x)
 | 
			
		||||
 | 
			
		||||
        x1_0 = self.branch1_0(x)
 | 
			
		||||
        x1_1a = self.branch1_1a(x1_0)
 | 
			
		||||
        x1_1b = self.branch1_1b(x1_0)
 | 
			
		||||
        x1 = torch.cat((x1_1a, x1_1b), 1)
 | 
			
		||||
 | 
			
		||||
        x2_0 = self.branch2_0(x)
 | 
			
		||||
        x2_1 = self.branch2_1(x2_0)
 | 
			
		||||
        x2_2 = self.branch2_2(x2_1)
 | 
			
		||||
        x2_3a = self.branch2_3a(x2_2)
 | 
			
		||||
        x2_3b = self.branch2_3b(x2_2)
 | 
			
		||||
        x2 = torch.cat((x2_3a, x2_3b), 1)
 | 
			
		||||
 | 
			
		||||
        x3 = self.branch3(x)
 | 
			
		||||
 | 
			
		||||
        out = torch.cat((x0, x1, x2, x3), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InceptionV4(nn.Module):
 | 
			
		||||
    """Inception-v4.
 | 
			
		||||
 | 
			
		||||
    Reference:
 | 
			
		||||
        Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual
 | 
			
		||||
        Connections on Learning. AAAI 2017.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``inceptionv4``: InceptionV4.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, num_classes, loss, **kwargs):
 | 
			
		||||
        super(InceptionV4, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
 | 
			
		||||
        self.features = nn.Sequential(
 | 
			
		||||
            BasicConv2d(3, 32, kernel_size=3, stride=2),
 | 
			
		||||
            BasicConv2d(32, 32, kernel_size=3, stride=1),
 | 
			
		||||
            BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1),
 | 
			
		||||
            Mixed_3a(),
 | 
			
		||||
            Mixed_4a(),
 | 
			
		||||
            Mixed_5a(),
 | 
			
		||||
            Inception_A(),
 | 
			
		||||
            Inception_A(),
 | 
			
		||||
            Inception_A(),
 | 
			
		||||
            Inception_A(),
 | 
			
		||||
            Reduction_A(), # Mixed_6a
 | 
			
		||||
            Inception_B(),
 | 
			
		||||
            Inception_B(),
 | 
			
		||||
            Inception_B(),
 | 
			
		||||
            Inception_B(),
 | 
			
		||||
            Inception_B(),
 | 
			
		||||
            Inception_B(),
 | 
			
		||||
            Inception_B(),
 | 
			
		||||
            Reduction_B(), # Mixed_7a
 | 
			
		||||
            Inception_C(),
 | 
			
		||||
            Inception_C(),
 | 
			
		||||
            Inception_C()
 | 
			
		||||
        )
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        self.classifier = nn.Linear(1536, num_classes)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.features(x)
 | 
			
		||||
        v = self.global_avgpool(f)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError('Unsupported loss: {}'.format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def inceptionv4(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = InceptionV4(num_classes, loss, **kwargs)
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        model_url = pretrained_settings['inceptionv4']['imagenet']['url']
 | 
			
		||||
        init_pretrained_weights(model, model_url)
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										269
									
								
								feeder/trackers/strongsort/deep/models/mlfn.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										269
									
								
								feeder/trackers/strongsort/deep/models/mlfn.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,269 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import torch
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
from torch import nn
 | 
			
		||||
from torch.nn import functional as F
 | 
			
		||||
 | 
			
		||||
__all__ = ['mlfn']
 | 
			
		||||
 | 
			
		||||
model_urls = {
 | 
			
		||||
    # training epoch = 5, top1 = 51.6
 | 
			
		||||
    'imagenet':
 | 
			
		||||
    'https://mega.nz/#!YHxAhaxC!yu9E6zWl0x5zscSouTdbZu8gdFFytDdl-RAdD2DEfpk',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MLFNBlock(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self, in_channels, out_channels, stride, fsm_channels, groups=32
 | 
			
		||||
    ):
 | 
			
		||||
        super(MLFNBlock, self).__init__()
 | 
			
		||||
        self.groups = groups
 | 
			
		||||
        mid_channels = out_channels // 2
 | 
			
		||||
 | 
			
		||||
        # Factor Modules
 | 
			
		||||
        self.fm_conv1 = nn.Conv2d(in_channels, mid_channels, 1, bias=False)
 | 
			
		||||
        self.fm_bn1 = nn.BatchNorm2d(mid_channels)
 | 
			
		||||
        self.fm_conv2 = nn.Conv2d(
 | 
			
		||||
            mid_channels,
 | 
			
		||||
            mid_channels,
 | 
			
		||||
            3,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            bias=False,
 | 
			
		||||
            groups=self.groups
 | 
			
		||||
        )
 | 
			
		||||
        self.fm_bn2 = nn.BatchNorm2d(mid_channels)
 | 
			
		||||
        self.fm_conv3 = nn.Conv2d(mid_channels, out_channels, 1, bias=False)
 | 
			
		||||
        self.fm_bn3 = nn.BatchNorm2d(out_channels)
 | 
			
		||||
 | 
			
		||||
        # Factor Selection Module
 | 
			
		||||
        self.fsm = nn.Sequential(
 | 
			
		||||
            nn.AdaptiveAvgPool2d(1),
 | 
			
		||||
            nn.Conv2d(in_channels, fsm_channels[0], 1),
 | 
			
		||||
            nn.BatchNorm2d(fsm_channels[0]),
 | 
			
		||||
            nn.ReLU(inplace=True),
 | 
			
		||||
            nn.Conv2d(fsm_channels[0], fsm_channels[1], 1),
 | 
			
		||||
            nn.BatchNorm2d(fsm_channels[1]),
 | 
			
		||||
            nn.ReLU(inplace=True),
 | 
			
		||||
            nn.Conv2d(fsm_channels[1], self.groups, 1),
 | 
			
		||||
            nn.BatchNorm2d(self.groups),
 | 
			
		||||
            nn.Sigmoid(),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.downsample = None
 | 
			
		||||
        if in_channels != out_channels or stride > 1:
 | 
			
		||||
            self.downsample = nn.Sequential(
 | 
			
		||||
                nn.Conv2d(
 | 
			
		||||
                    in_channels, out_channels, 1, stride=stride, bias=False
 | 
			
		||||
                ),
 | 
			
		||||
                nn.BatchNorm2d(out_channels),
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        residual = x
 | 
			
		||||
        s = self.fsm(x)
 | 
			
		||||
 | 
			
		||||
        # reduce dimension
 | 
			
		||||
        x = self.fm_conv1(x)
 | 
			
		||||
        x = self.fm_bn1(x)
 | 
			
		||||
        x = F.relu(x, inplace=True)
 | 
			
		||||
 | 
			
		||||
        # group convolution
 | 
			
		||||
        x = self.fm_conv2(x)
 | 
			
		||||
        x = self.fm_bn2(x)
 | 
			
		||||
        x = F.relu(x, inplace=True)
 | 
			
		||||
 | 
			
		||||
        # factor selection
 | 
			
		||||
        b, c = x.size(0), x.size(1)
 | 
			
		||||
        n = c // self.groups
 | 
			
		||||
        ss = s.repeat(1, n, 1, 1) # from (b, g, 1, 1) to (b, g*n=c, 1, 1)
 | 
			
		||||
        ss = ss.view(b, n, self.groups, 1, 1)
 | 
			
		||||
        ss = ss.permute(0, 2, 1, 3, 4).contiguous()
 | 
			
		||||
        ss = ss.view(b, c, 1, 1)
 | 
			
		||||
        x = ss * x
 | 
			
		||||
 | 
			
		||||
        # recover dimension
 | 
			
		||||
        x = self.fm_conv3(x)
 | 
			
		||||
        x = self.fm_bn3(x)
 | 
			
		||||
        x = F.relu(x, inplace=True)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            residual = self.downsample(residual)
 | 
			
		||||
 | 
			
		||||
        return F.relu(residual + x, inplace=True), s
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MLFN(nn.Module):
 | 
			
		||||
    """Multi-Level Factorisation Net.
 | 
			
		||||
 | 
			
		||||
    Reference:
 | 
			
		||||
        Chang et al. Multi-Level Factorisation Net for
 | 
			
		||||
        Person Re-Identification. CVPR 2018.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``mlfn``: MLFN (Multi-Level Factorisation Net).
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        num_classes,
 | 
			
		||||
        loss='softmax',
 | 
			
		||||
        groups=32,
 | 
			
		||||
        channels=[64, 256, 512, 1024, 2048],
 | 
			
		||||
        embed_dim=1024,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        super(MLFN, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
        self.groups = groups
 | 
			
		||||
 | 
			
		||||
        # first convolutional layer
 | 
			
		||||
        self.conv1 = nn.Conv2d(3, channels[0], 7, stride=2, padding=3)
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(channels[0])
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
 | 
			
		||||
 | 
			
		||||
        # main body
 | 
			
		||||
        self.feature = nn.ModuleList(
 | 
			
		||||
            [
 | 
			
		||||
                # layer 1-3
 | 
			
		||||
                MLFNBlock(channels[0], channels[1], 1, [128, 64], self.groups),
 | 
			
		||||
                MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups),
 | 
			
		||||
                MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups),
 | 
			
		||||
                # layer 4-7
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[1], channels[2], 2, [256, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[2], channels[2], 1, [256, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[2], channels[2], 1, [256, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[2], channels[2], 1, [256, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                # layer 8-13
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[2], channels[3], 2, [512, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[3], channels[3], 1, [512, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[3], channels[3], 1, [512, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[3], channels[3], 1, [512, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[3], channels[3], 1, [512, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[3], channels[3], 1, [512, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                # layer 14-16
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[3], channels[4], 2, [512, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[4], channels[4], 1, [512, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
                MLFNBlock(
 | 
			
		||||
                    channels[4], channels[4], 1, [512, 128], self.groups
 | 
			
		||||
                ),
 | 
			
		||||
            ]
 | 
			
		||||
        )
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
 | 
			
		||||
        # projection functions
 | 
			
		||||
        self.fc_x = nn.Sequential(
 | 
			
		||||
            nn.Conv2d(channels[4], embed_dim, 1, bias=False),
 | 
			
		||||
            nn.BatchNorm2d(embed_dim),
 | 
			
		||||
            nn.ReLU(inplace=True),
 | 
			
		||||
        )
 | 
			
		||||
        self.fc_s = nn.Sequential(
 | 
			
		||||
            nn.Conv2d(self.groups * 16, embed_dim, 1, bias=False),
 | 
			
		||||
            nn.BatchNorm2d(embed_dim),
 | 
			
		||||
            nn.ReLU(inplace=True),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.classifier = nn.Linear(embed_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        self.init_params()
 | 
			
		||||
 | 
			
		||||
    def init_params(self):
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                nn.init.kaiming_normal_(
 | 
			
		||||
                    m.weight, mode='fan_out', nonlinearity='relu'
 | 
			
		||||
                )
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.Linear):
 | 
			
		||||
                nn.init.normal_(m.weight, 0, 0.01)
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.bn1(x)
 | 
			
		||||
        x = F.relu(x, inplace=True)
 | 
			
		||||
        x = self.maxpool(x)
 | 
			
		||||
 | 
			
		||||
        s_hat = []
 | 
			
		||||
        for block in self.feature:
 | 
			
		||||
            x, s = block(x)
 | 
			
		||||
            s_hat.append(s)
 | 
			
		||||
        s_hat = torch.cat(s_hat, 1)
 | 
			
		||||
 | 
			
		||||
        x = self.global_avgpool(x)
 | 
			
		||||
        x = self.fc_x(x)
 | 
			
		||||
        s_hat = self.fc_s(s_hat)
 | 
			
		||||
 | 
			
		||||
        v = (x+s_hat) * 0.5
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError('Unsupported loss: {}'.format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mlfn(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = MLFN(num_classes, loss, **kwargs)
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        # init_pretrained_weights(model, model_urls['imagenet'])
 | 
			
		||||
        import warnings
 | 
			
		||||
        warnings.warn(
 | 
			
		||||
            'The imagenet pretrained weights need to be manually downloaded from {}'
 | 
			
		||||
            .format(model_urls['imagenet'])
 | 
			
		||||
        )
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										274
									
								
								feeder/trackers/strongsort/deep/models/mobilenetv2.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										274
									
								
								feeder/trackers/strongsort/deep/models/mobilenetv2.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,274 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
from torch import nn
 | 
			
		||||
from torch.nn import functional as F
 | 
			
		||||
 | 
			
		||||
__all__ = ['mobilenetv2_x1_0', 'mobilenetv2_x1_4']
 | 
			
		||||
 | 
			
		||||
model_urls = {
 | 
			
		||||
    # 1.0: top-1 71.3
 | 
			
		||||
    'mobilenetv2_x1_0':
 | 
			
		||||
    'https://mega.nz/#!NKp2wAIA!1NH1pbNzY_M2hVk_hdsxNM1NUOWvvGPHhaNr-fASF6c',
 | 
			
		||||
    # 1.4: top-1 73.9
 | 
			
		||||
    'mobilenetv2_x1_4':
 | 
			
		||||
    'https://mega.nz/#!RGhgEIwS!xN2s2ZdyqI6vQ3EwgmRXLEW3khr9tpXg96G9SUJugGk',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ConvBlock(nn.Module):
 | 
			
		||||
    """Basic convolutional block.
 | 
			
		||||
    
 | 
			
		||||
    convolution (bias discarded) + batch normalization + relu6.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        in_c (int): number of input channels.
 | 
			
		||||
        out_c (int): number of output channels.
 | 
			
		||||
        k (int or tuple): kernel size.
 | 
			
		||||
        s (int or tuple): stride.
 | 
			
		||||
        p (int or tuple): padding.
 | 
			
		||||
        g (int): number of blocked connections from input channels
 | 
			
		||||
            to output channels (default: 1).
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_c, out_c, k, s=1, p=0, g=1):
 | 
			
		||||
        super(ConvBlock, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(
 | 
			
		||||
            in_c, out_c, k, stride=s, padding=p, bias=False, groups=g
 | 
			
		||||
        )
 | 
			
		||||
        self.bn = nn.BatchNorm2d(out_c)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        return F.relu6(self.bn(self.conv(x)))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Bottleneck(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels, expansion_factor, stride=1):
 | 
			
		||||
        super(Bottleneck, self).__init__()
 | 
			
		||||
        mid_channels = in_channels * expansion_factor
 | 
			
		||||
        self.use_residual = stride == 1 and in_channels == out_channels
 | 
			
		||||
        self.conv1 = ConvBlock(in_channels, mid_channels, 1)
 | 
			
		||||
        self.dwconv2 = ConvBlock(
 | 
			
		||||
            mid_channels, mid_channels, 3, stride, 1, g=mid_channels
 | 
			
		||||
        )
 | 
			
		||||
        self.conv3 = nn.Sequential(
 | 
			
		||||
            nn.Conv2d(mid_channels, out_channels, 1, bias=False),
 | 
			
		||||
            nn.BatchNorm2d(out_channels),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        m = self.conv1(x)
 | 
			
		||||
        m = self.dwconv2(m)
 | 
			
		||||
        m = self.conv3(m)
 | 
			
		||||
        if self.use_residual:
 | 
			
		||||
            return x + m
 | 
			
		||||
        else:
 | 
			
		||||
            return m
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MobileNetV2(nn.Module):
 | 
			
		||||
    """MobileNetV2.
 | 
			
		||||
 | 
			
		||||
    Reference:
 | 
			
		||||
        Sandler et al. MobileNetV2: Inverted Residuals and
 | 
			
		||||
        Linear Bottlenecks. CVPR 2018.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``mobilenetv2_x1_0``: MobileNetV2 x1.0.
 | 
			
		||||
        - ``mobilenetv2_x1_4``: MobileNetV2 x1.4.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        num_classes,
 | 
			
		||||
        width_mult=1,
 | 
			
		||||
        loss='softmax',
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        super(MobileNetV2, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
        self.in_channels = int(32 * width_mult)
 | 
			
		||||
        self.feature_dim = int(1280 * width_mult) if width_mult > 1 else 1280
 | 
			
		||||
 | 
			
		||||
        # construct layers
 | 
			
		||||
        self.conv1 = ConvBlock(3, self.in_channels, 3, s=2, p=1)
 | 
			
		||||
        self.conv2 = self._make_layer(
 | 
			
		||||
            Bottleneck, 1, int(16 * width_mult), 1, 1
 | 
			
		||||
        )
 | 
			
		||||
        self.conv3 = self._make_layer(
 | 
			
		||||
            Bottleneck, 6, int(24 * width_mult), 2, 2
 | 
			
		||||
        )
 | 
			
		||||
        self.conv4 = self._make_layer(
 | 
			
		||||
            Bottleneck, 6, int(32 * width_mult), 3, 2
 | 
			
		||||
        )
 | 
			
		||||
        self.conv5 = self._make_layer(
 | 
			
		||||
            Bottleneck, 6, int(64 * width_mult), 4, 2
 | 
			
		||||
        )
 | 
			
		||||
        self.conv6 = self._make_layer(
 | 
			
		||||
            Bottleneck, 6, int(96 * width_mult), 3, 1
 | 
			
		||||
        )
 | 
			
		||||
        self.conv7 = self._make_layer(
 | 
			
		||||
            Bottleneck, 6, int(160 * width_mult), 3, 2
 | 
			
		||||
        )
 | 
			
		||||
        self.conv8 = self._make_layer(
 | 
			
		||||
            Bottleneck, 6, int(320 * width_mult), 1, 1
 | 
			
		||||
        )
 | 
			
		||||
        self.conv9 = ConvBlock(self.in_channels, self.feature_dim, 1)
 | 
			
		||||
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        self.fc = self._construct_fc_layer(
 | 
			
		||||
            fc_dims, self.feature_dim, dropout_p
 | 
			
		||||
        )
 | 
			
		||||
        self.classifier = nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        self._init_params()
 | 
			
		||||
 | 
			
		||||
    def _make_layer(self, block, t, c, n, s):
 | 
			
		||||
        # t: expansion factor
 | 
			
		||||
        # c: output channels
 | 
			
		||||
        # n: number of blocks
 | 
			
		||||
        # s: stride for first layer
 | 
			
		||||
        layers = []
 | 
			
		||||
        layers.append(block(self.in_channels, c, t, s))
 | 
			
		||||
        self.in_channels = c
 | 
			
		||||
        for i in range(1, n):
 | 
			
		||||
            layers.append(block(self.in_channels, c, t))
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
 | 
			
		||||
        """Constructs fully connected layer.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
 | 
			
		||||
            input_dim (int): input dimension
 | 
			
		||||
            dropout_p (float): dropout probability, if None, dropout is unused
 | 
			
		||||
        """
 | 
			
		||||
        if fc_dims is None:
 | 
			
		||||
            self.feature_dim = input_dim
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        assert isinstance(
 | 
			
		||||
            fc_dims, (list, tuple)
 | 
			
		||||
        ), 'fc_dims must be either list or tuple, but got {}'.format(
 | 
			
		||||
            type(fc_dims)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        for dim in fc_dims:
 | 
			
		||||
            layers.append(nn.Linear(input_dim, dim))
 | 
			
		||||
            layers.append(nn.BatchNorm1d(dim))
 | 
			
		||||
            layers.append(nn.ReLU(inplace=True))
 | 
			
		||||
            if dropout_p is not None:
 | 
			
		||||
                layers.append(nn.Dropout(p=dropout_p))
 | 
			
		||||
            input_dim = dim
 | 
			
		||||
 | 
			
		||||
        self.feature_dim = fc_dims[-1]
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _init_params(self):
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                nn.init.kaiming_normal_(
 | 
			
		||||
                    m.weight, mode='fan_out', nonlinearity='relu'
 | 
			
		||||
                )
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm1d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.Linear):
 | 
			
		||||
                nn.init.normal_(m.weight, 0, 0.01)
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.conv2(x)
 | 
			
		||||
        x = self.conv3(x)
 | 
			
		||||
        x = self.conv4(x)
 | 
			
		||||
        x = self.conv5(x)
 | 
			
		||||
        x = self.conv6(x)
 | 
			
		||||
        x = self.conv7(x)
 | 
			
		||||
        x = self.conv8(x)
 | 
			
		||||
        x = self.conv9(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.featuremaps(x)
 | 
			
		||||
        v = self.global_avgpool(f)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        if self.fc is not None:
 | 
			
		||||
            v = self.fc(v)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError("Unsupported loss: {}".format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mobilenetv2_x1_0(num_classes, loss, pretrained=True, **kwargs):
 | 
			
		||||
    model = MobileNetV2(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        width_mult=1,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        # init_pretrained_weights(model, model_urls['mobilenetv2_x1_0'])
 | 
			
		||||
        import warnings
 | 
			
		||||
        warnings.warn(
 | 
			
		||||
            'The imagenet pretrained weights need to be manually downloaded from {}'
 | 
			
		||||
            .format(model_urls['mobilenetv2_x1_0'])
 | 
			
		||||
        )
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mobilenetv2_x1_4(num_classes, loss, pretrained=True, **kwargs):
 | 
			
		||||
    model = MobileNetV2(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        width_mult=1.4,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        # init_pretrained_weights(model, model_urls['mobilenetv2_x1_4'])
 | 
			
		||||
        import warnings
 | 
			
		||||
        warnings.warn(
 | 
			
		||||
            'The imagenet pretrained weights need to be manually downloaded from {}'
 | 
			
		||||
            .format(model_urls['mobilenetv2_x1_4'])
 | 
			
		||||
        )
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										206
									
								
								feeder/trackers/strongsort/deep/models/mudeep.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										206
									
								
								feeder/trackers/strongsort/deep/models/mudeep.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,206 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import torch
 | 
			
		||||
from torch import nn
 | 
			
		||||
from torch.nn import functional as F
 | 
			
		||||
 | 
			
		||||
__all__ = ['MuDeep']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ConvBlock(nn.Module):
 | 
			
		||||
    """Basic convolutional block.
 | 
			
		||||
    
 | 
			
		||||
    convolution + batch normalization + relu.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        in_c (int): number of input channels.
 | 
			
		||||
        out_c (int): number of output channels.
 | 
			
		||||
        k (int or tuple): kernel size.
 | 
			
		||||
        s (int or tuple): stride.
 | 
			
		||||
        p (int or tuple): padding.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_c, out_c, k, s, p):
 | 
			
		||||
        super(ConvBlock, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p)
 | 
			
		||||
        self.bn = nn.BatchNorm2d(out_c)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        return F.relu(self.bn(self.conv(x)))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ConvLayers(nn.Module):
 | 
			
		||||
    """Preprocessing layers."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(ConvLayers, self).__init__()
 | 
			
		||||
        self.conv1 = ConvBlock(3, 48, k=3, s=1, p=1)
 | 
			
		||||
        self.conv2 = ConvBlock(48, 96, k=3, s=1, p=1)
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.conv2(x)
 | 
			
		||||
        x = self.maxpool(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MultiScaleA(nn.Module):
 | 
			
		||||
    """Multi-scale stream layer A (Sec.3.1)"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(MultiScaleA, self).__init__()
 | 
			
		||||
        self.stream1 = nn.Sequential(
 | 
			
		||||
            ConvBlock(96, 96, k=1, s=1, p=0),
 | 
			
		||||
            ConvBlock(96, 24, k=3, s=1, p=1),
 | 
			
		||||
        )
 | 
			
		||||
        self.stream2 = nn.Sequential(
 | 
			
		||||
            nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
 | 
			
		||||
            ConvBlock(96, 24, k=1, s=1, p=0),
 | 
			
		||||
        )
 | 
			
		||||
        self.stream3 = ConvBlock(96, 24, k=1, s=1, p=0)
 | 
			
		||||
        self.stream4 = nn.Sequential(
 | 
			
		||||
            ConvBlock(96, 16, k=1, s=1, p=0),
 | 
			
		||||
            ConvBlock(16, 24, k=3, s=1, p=1),
 | 
			
		||||
            ConvBlock(24, 24, k=3, s=1, p=1),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        s1 = self.stream1(x)
 | 
			
		||||
        s2 = self.stream2(x)
 | 
			
		||||
        s3 = self.stream3(x)
 | 
			
		||||
        s4 = self.stream4(x)
 | 
			
		||||
        y = torch.cat([s1, s2, s3, s4], dim=1)
 | 
			
		||||
        return y
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Reduction(nn.Module):
 | 
			
		||||
    """Reduction layer (Sec.3.1)"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Reduction, self).__init__()
 | 
			
		||||
        self.stream1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
 | 
			
		||||
        self.stream2 = ConvBlock(96, 96, k=3, s=2, p=1)
 | 
			
		||||
        self.stream3 = nn.Sequential(
 | 
			
		||||
            ConvBlock(96, 48, k=1, s=1, p=0),
 | 
			
		||||
            ConvBlock(48, 56, k=3, s=1, p=1),
 | 
			
		||||
            ConvBlock(56, 64, k=3, s=2, p=1),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        s1 = self.stream1(x)
 | 
			
		||||
        s2 = self.stream2(x)
 | 
			
		||||
        s3 = self.stream3(x)
 | 
			
		||||
        y = torch.cat([s1, s2, s3], dim=1)
 | 
			
		||||
        return y
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MultiScaleB(nn.Module):
 | 
			
		||||
    """Multi-scale stream layer B (Sec.3.1)"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(MultiScaleB, self).__init__()
 | 
			
		||||
        self.stream1 = nn.Sequential(
 | 
			
		||||
            nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
 | 
			
		||||
            ConvBlock(256, 256, k=1, s=1, p=0),
 | 
			
		||||
        )
 | 
			
		||||
        self.stream2 = nn.Sequential(
 | 
			
		||||
            ConvBlock(256, 64, k=1, s=1, p=0),
 | 
			
		||||
            ConvBlock(64, 128, k=(1, 3), s=1, p=(0, 1)),
 | 
			
		||||
            ConvBlock(128, 256, k=(3, 1), s=1, p=(1, 0)),
 | 
			
		||||
        )
 | 
			
		||||
        self.stream3 = ConvBlock(256, 256, k=1, s=1, p=0)
 | 
			
		||||
        self.stream4 = nn.Sequential(
 | 
			
		||||
            ConvBlock(256, 64, k=1, s=1, p=0),
 | 
			
		||||
            ConvBlock(64, 64, k=(1, 3), s=1, p=(0, 1)),
 | 
			
		||||
            ConvBlock(64, 128, k=(3, 1), s=1, p=(1, 0)),
 | 
			
		||||
            ConvBlock(128, 128, k=(1, 3), s=1, p=(0, 1)),
 | 
			
		||||
            ConvBlock(128, 256, k=(3, 1), s=1, p=(1, 0)),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        s1 = self.stream1(x)
 | 
			
		||||
        s2 = self.stream2(x)
 | 
			
		||||
        s3 = self.stream3(x)
 | 
			
		||||
        s4 = self.stream4(x)
 | 
			
		||||
        return s1, s2, s3, s4
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Fusion(nn.Module):
 | 
			
		||||
    """Saliency-based learning fusion layer (Sec.3.2)"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(Fusion, self).__init__()
 | 
			
		||||
        self.a1 = nn.Parameter(torch.rand(1, 256, 1, 1))
 | 
			
		||||
        self.a2 = nn.Parameter(torch.rand(1, 256, 1, 1))
 | 
			
		||||
        self.a3 = nn.Parameter(torch.rand(1, 256, 1, 1))
 | 
			
		||||
        self.a4 = nn.Parameter(torch.rand(1, 256, 1, 1))
 | 
			
		||||
 | 
			
		||||
        # We add an average pooling layer to reduce the spatial dimension
 | 
			
		||||
        # of feature maps, which differs from the original paper.
 | 
			
		||||
        self.avgpool = nn.AvgPool2d(kernel_size=4, stride=4, padding=0)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x1, x2, x3, x4):
 | 
			
		||||
        s1 = self.a1.expand_as(x1) * x1
 | 
			
		||||
        s2 = self.a2.expand_as(x2) * x2
 | 
			
		||||
        s3 = self.a3.expand_as(x3) * x3
 | 
			
		||||
        s4 = self.a4.expand_as(x4) * x4
 | 
			
		||||
        y = self.avgpool(s1 + s2 + s3 + s4)
 | 
			
		||||
        return y
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MuDeep(nn.Module):
 | 
			
		||||
    """Multiscale deep neural network.
 | 
			
		||||
 | 
			
		||||
    Reference:
 | 
			
		||||
        Qian et al. Multi-scale Deep Learning Architectures
 | 
			
		||||
        for Person Re-identification. ICCV 2017.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``mudeep``: Multiscale deep neural network.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, num_classes, loss='softmax', **kwargs):
 | 
			
		||||
        super(MuDeep, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
 | 
			
		||||
        self.block1 = ConvLayers()
 | 
			
		||||
        self.block2 = MultiScaleA()
 | 
			
		||||
        self.block3 = Reduction()
 | 
			
		||||
        self.block4 = MultiScaleB()
 | 
			
		||||
        self.block5 = Fusion()
 | 
			
		||||
 | 
			
		||||
        # Due to this fully connected layer, input image has to be fixed
 | 
			
		||||
        # in shape, i.e. (3, 256, 128), such that the last convolutional feature
 | 
			
		||||
        # maps are of shape (256, 16, 8). If input shape is changed,
 | 
			
		||||
        # the input dimension of this layer has to be changed accordingly.
 | 
			
		||||
        self.fc = nn.Sequential(
 | 
			
		||||
            nn.Linear(256 * 16 * 8, 4096),
 | 
			
		||||
            nn.BatchNorm1d(4096),
 | 
			
		||||
            nn.ReLU(),
 | 
			
		||||
        )
 | 
			
		||||
        self.classifier = nn.Linear(4096, num_classes)
 | 
			
		||||
        self.feat_dim = 4096
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.block1(x)
 | 
			
		||||
        x = self.block2(x)
 | 
			
		||||
        x = self.block3(x)
 | 
			
		||||
        x = self.block4(x)
 | 
			
		||||
        x = self.block5(*x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.featuremaps(x)
 | 
			
		||||
        x = x.view(x.size(0), -1)
 | 
			
		||||
        x = self.fc(x)
 | 
			
		||||
        y = self.classifier(x)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return x
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, x
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError('Unsupported loss: {}'.format(self.loss))
 | 
			
		||||
							
								
								
									
										1131
									
								
								feeder/trackers/strongsort/deep/models/nasnet.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										1131
									
								
								feeder/trackers/strongsort/deep/models/nasnet.py
									
										
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										598
									
								
								feeder/trackers/strongsort/deep/models/osnet.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										598
									
								
								feeder/trackers/strongsort/deep/models/osnet.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,598 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import warnings
 | 
			
		||||
import torch
 | 
			
		||||
from torch import nn
 | 
			
		||||
from torch.nn import functional as F
 | 
			
		||||
 | 
			
		||||
__all__ = [
 | 
			
		||||
    'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
pretrained_urls = {
 | 
			
		||||
    'osnet_x1_0':
 | 
			
		||||
    'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY',
 | 
			
		||||
    'osnet_x0_75':
 | 
			
		||||
    'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq',
 | 
			
		||||
    'osnet_x0_5':
 | 
			
		||||
    'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i',
 | 
			
		||||
    'osnet_x0_25':
 | 
			
		||||
    'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs',
 | 
			
		||||
    'osnet_ibn_x1_0':
 | 
			
		||||
    'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l'
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##########
 | 
			
		||||
# Basic layers
 | 
			
		||||
##########
 | 
			
		||||
class ConvLayer(nn.Module):
 | 
			
		||||
    """Convolution layer (conv + bn + relu)."""
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        in_channels,
 | 
			
		||||
        out_channels,
 | 
			
		||||
        kernel_size,
 | 
			
		||||
        stride=1,
 | 
			
		||||
        padding=0,
 | 
			
		||||
        groups=1,
 | 
			
		||||
        IN=False
 | 
			
		||||
    ):
 | 
			
		||||
        super(ConvLayer, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(
 | 
			
		||||
            in_channels,
 | 
			
		||||
            out_channels,
 | 
			
		||||
            kernel_size,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=padding,
 | 
			
		||||
            bias=False,
 | 
			
		||||
            groups=groups
 | 
			
		||||
        )
 | 
			
		||||
        if IN:
 | 
			
		||||
            self.bn = nn.InstanceNorm2d(out_channels, affine=True)
 | 
			
		||||
        else:
 | 
			
		||||
            self.bn = nn.BatchNorm2d(out_channels)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv(x)
 | 
			
		||||
        x = self.bn(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Conv1x1(nn.Module):
 | 
			
		||||
    """1x1 convolution + bn + relu."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels, stride=1, groups=1):
 | 
			
		||||
        super(Conv1x1, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(
 | 
			
		||||
            in_channels,
 | 
			
		||||
            out_channels,
 | 
			
		||||
            1,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=0,
 | 
			
		||||
            bias=False,
 | 
			
		||||
            groups=groups
 | 
			
		||||
        )
 | 
			
		||||
        self.bn = nn.BatchNorm2d(out_channels)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv(x)
 | 
			
		||||
        x = self.bn(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Conv1x1Linear(nn.Module):
 | 
			
		||||
    """1x1 convolution + bn (w/o non-linearity)."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels, stride=1):
 | 
			
		||||
        super(Conv1x1Linear, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(
 | 
			
		||||
            in_channels, out_channels, 1, stride=stride, padding=0, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn = nn.BatchNorm2d(out_channels)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv(x)
 | 
			
		||||
        x = self.bn(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Conv3x3(nn.Module):
 | 
			
		||||
    """3x3 convolution + bn + relu."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels, stride=1, groups=1):
 | 
			
		||||
        super(Conv3x3, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(
 | 
			
		||||
            in_channels,
 | 
			
		||||
            out_channels,
 | 
			
		||||
            3,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            bias=False,
 | 
			
		||||
            groups=groups
 | 
			
		||||
        )
 | 
			
		||||
        self.bn = nn.BatchNorm2d(out_channels)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv(x)
 | 
			
		||||
        x = self.bn(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class LightConv3x3(nn.Module):
 | 
			
		||||
    """Lightweight 3x3 convolution.
 | 
			
		||||
 | 
			
		||||
    1x1 (linear) + dw 3x3 (nonlinear).
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels):
 | 
			
		||||
        super(LightConv3x3, self).__init__()
 | 
			
		||||
        self.conv1 = nn.Conv2d(
 | 
			
		||||
            in_channels, out_channels, 1, stride=1, padding=0, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.conv2 = nn.Conv2d(
 | 
			
		||||
            out_channels,
 | 
			
		||||
            out_channels,
 | 
			
		||||
            3,
 | 
			
		||||
            stride=1,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            bias=False,
 | 
			
		||||
            groups=out_channels
 | 
			
		||||
        )
 | 
			
		||||
        self.bn = nn.BatchNorm2d(out_channels)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.conv2(x)
 | 
			
		||||
        x = self.bn(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##########
 | 
			
		||||
# Building blocks for omni-scale feature learning
 | 
			
		||||
##########
 | 
			
		||||
class ChannelGate(nn.Module):
 | 
			
		||||
    """A mini-network that generates channel-wise gates conditioned on input tensor."""
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        in_channels,
 | 
			
		||||
        num_gates=None,
 | 
			
		||||
        return_gates=False,
 | 
			
		||||
        gate_activation='sigmoid',
 | 
			
		||||
        reduction=16,
 | 
			
		||||
        layer_norm=False
 | 
			
		||||
    ):
 | 
			
		||||
        super(ChannelGate, self).__init__()
 | 
			
		||||
        if num_gates is None:
 | 
			
		||||
            num_gates = in_channels
 | 
			
		||||
        self.return_gates = return_gates
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        self.fc1 = nn.Conv2d(
 | 
			
		||||
            in_channels,
 | 
			
		||||
            in_channels // reduction,
 | 
			
		||||
            kernel_size=1,
 | 
			
		||||
            bias=True,
 | 
			
		||||
            padding=0
 | 
			
		||||
        )
 | 
			
		||||
        self.norm1 = None
 | 
			
		||||
        if layer_norm:
 | 
			
		||||
            self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.fc2 = nn.Conv2d(
 | 
			
		||||
            in_channels // reduction,
 | 
			
		||||
            num_gates,
 | 
			
		||||
            kernel_size=1,
 | 
			
		||||
            bias=True,
 | 
			
		||||
            padding=0
 | 
			
		||||
        )
 | 
			
		||||
        if gate_activation == 'sigmoid':
 | 
			
		||||
            self.gate_activation = nn.Sigmoid()
 | 
			
		||||
        elif gate_activation == 'relu':
 | 
			
		||||
            self.gate_activation = nn.ReLU(inplace=True)
 | 
			
		||||
        elif gate_activation == 'linear':
 | 
			
		||||
            self.gate_activation = None
 | 
			
		||||
        else:
 | 
			
		||||
            raise RuntimeError(
 | 
			
		||||
                "Unknown gate activation: {}".format(gate_activation)
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        input = x
 | 
			
		||||
        x = self.global_avgpool(x)
 | 
			
		||||
        x = self.fc1(x)
 | 
			
		||||
        if self.norm1 is not None:
 | 
			
		||||
            x = self.norm1(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        x = self.fc2(x)
 | 
			
		||||
        if self.gate_activation is not None:
 | 
			
		||||
            x = self.gate_activation(x)
 | 
			
		||||
        if self.return_gates:
 | 
			
		||||
            return x
 | 
			
		||||
        return input * x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OSBlock(nn.Module):
 | 
			
		||||
    """Omni-scale feature learning block."""
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        in_channels,
 | 
			
		||||
        out_channels,
 | 
			
		||||
        IN=False,
 | 
			
		||||
        bottleneck_reduction=4,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        super(OSBlock, self).__init__()
 | 
			
		||||
        mid_channels = out_channels // bottleneck_reduction
 | 
			
		||||
        self.conv1 = Conv1x1(in_channels, mid_channels)
 | 
			
		||||
        self.conv2a = LightConv3x3(mid_channels, mid_channels)
 | 
			
		||||
        self.conv2b = nn.Sequential(
 | 
			
		||||
            LightConv3x3(mid_channels, mid_channels),
 | 
			
		||||
            LightConv3x3(mid_channels, mid_channels),
 | 
			
		||||
        )
 | 
			
		||||
        self.conv2c = nn.Sequential(
 | 
			
		||||
            LightConv3x3(mid_channels, mid_channels),
 | 
			
		||||
            LightConv3x3(mid_channels, mid_channels),
 | 
			
		||||
            LightConv3x3(mid_channels, mid_channels),
 | 
			
		||||
        )
 | 
			
		||||
        self.conv2d = nn.Sequential(
 | 
			
		||||
            LightConv3x3(mid_channels, mid_channels),
 | 
			
		||||
            LightConv3x3(mid_channels, mid_channels),
 | 
			
		||||
            LightConv3x3(mid_channels, mid_channels),
 | 
			
		||||
            LightConv3x3(mid_channels, mid_channels),
 | 
			
		||||
        )
 | 
			
		||||
        self.gate = ChannelGate(mid_channels)
 | 
			
		||||
        self.conv3 = Conv1x1Linear(mid_channels, out_channels)
 | 
			
		||||
        self.downsample = None
 | 
			
		||||
        if in_channels != out_channels:
 | 
			
		||||
            self.downsample = Conv1x1Linear(in_channels, out_channels)
 | 
			
		||||
        self.IN = None
 | 
			
		||||
        if IN:
 | 
			
		||||
            self.IN = nn.InstanceNorm2d(out_channels, affine=True)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        identity = x
 | 
			
		||||
        x1 = self.conv1(x)
 | 
			
		||||
        x2a = self.conv2a(x1)
 | 
			
		||||
        x2b = self.conv2b(x1)
 | 
			
		||||
        x2c = self.conv2c(x1)
 | 
			
		||||
        x2d = self.conv2d(x1)
 | 
			
		||||
        x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d)
 | 
			
		||||
        x3 = self.conv3(x2)
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            identity = self.downsample(identity)
 | 
			
		||||
        out = x3 + identity
 | 
			
		||||
        if self.IN is not None:
 | 
			
		||||
            out = self.IN(out)
 | 
			
		||||
        return F.relu(out)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##########
 | 
			
		||||
# Network architecture
 | 
			
		||||
##########
 | 
			
		||||
class OSNet(nn.Module):
 | 
			
		||||
    """Omni-Scale Network.
 | 
			
		||||
    
 | 
			
		||||
    Reference:
 | 
			
		||||
        - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
 | 
			
		||||
        - Zhou et al. Learning Generalisable Omni-Scale Representations
 | 
			
		||||
          for Person Re-Identification. TPAMI, 2021.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        num_classes,
 | 
			
		||||
        blocks,
 | 
			
		||||
        layers,
 | 
			
		||||
        channels,
 | 
			
		||||
        feature_dim=512,
 | 
			
		||||
        loss='softmax',
 | 
			
		||||
        IN=False,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        super(OSNet, self).__init__()
 | 
			
		||||
        num_blocks = len(blocks)
 | 
			
		||||
        assert num_blocks == len(layers)
 | 
			
		||||
        assert num_blocks == len(channels) - 1
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
        self.feature_dim = feature_dim
 | 
			
		||||
 | 
			
		||||
        # convolutional backbone
 | 
			
		||||
        self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN)
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
 | 
			
		||||
        self.conv2 = self._make_layer(
 | 
			
		||||
            blocks[0],
 | 
			
		||||
            layers[0],
 | 
			
		||||
            channels[0],
 | 
			
		||||
            channels[1],
 | 
			
		||||
            reduce_spatial_size=True,
 | 
			
		||||
            IN=IN
 | 
			
		||||
        )
 | 
			
		||||
        self.conv3 = self._make_layer(
 | 
			
		||||
            blocks[1],
 | 
			
		||||
            layers[1],
 | 
			
		||||
            channels[1],
 | 
			
		||||
            channels[2],
 | 
			
		||||
            reduce_spatial_size=True
 | 
			
		||||
        )
 | 
			
		||||
        self.conv4 = self._make_layer(
 | 
			
		||||
            blocks[2],
 | 
			
		||||
            layers[2],
 | 
			
		||||
            channels[2],
 | 
			
		||||
            channels[3],
 | 
			
		||||
            reduce_spatial_size=False
 | 
			
		||||
        )
 | 
			
		||||
        self.conv5 = Conv1x1(channels[3], channels[3])
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        # fully connected layer
 | 
			
		||||
        self.fc = self._construct_fc_layer(
 | 
			
		||||
            self.feature_dim, channels[3], dropout_p=None
 | 
			
		||||
        )
 | 
			
		||||
        # identity classification layer
 | 
			
		||||
        self.classifier = nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        self._init_params()
 | 
			
		||||
 | 
			
		||||
    def _make_layer(
 | 
			
		||||
        self,
 | 
			
		||||
        block,
 | 
			
		||||
        layer,
 | 
			
		||||
        in_channels,
 | 
			
		||||
        out_channels,
 | 
			
		||||
        reduce_spatial_size,
 | 
			
		||||
        IN=False
 | 
			
		||||
    ):
 | 
			
		||||
        layers = []
 | 
			
		||||
 | 
			
		||||
        layers.append(block(in_channels, out_channels, IN=IN))
 | 
			
		||||
        for i in range(1, layer):
 | 
			
		||||
            layers.append(block(out_channels, out_channels, IN=IN))
 | 
			
		||||
 | 
			
		||||
        if reduce_spatial_size:
 | 
			
		||||
            layers.append(
 | 
			
		||||
                nn.Sequential(
 | 
			
		||||
                    Conv1x1(out_channels, out_channels),
 | 
			
		||||
                    nn.AvgPool2d(2, stride=2)
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
 | 
			
		||||
        if fc_dims is None or fc_dims < 0:
 | 
			
		||||
            self.feature_dim = input_dim
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        if isinstance(fc_dims, int):
 | 
			
		||||
            fc_dims = [fc_dims]
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        for dim in fc_dims:
 | 
			
		||||
            layers.append(nn.Linear(input_dim, dim))
 | 
			
		||||
            layers.append(nn.BatchNorm1d(dim))
 | 
			
		||||
            layers.append(nn.ReLU(inplace=True))
 | 
			
		||||
            if dropout_p is not None:
 | 
			
		||||
                layers.append(nn.Dropout(p=dropout_p))
 | 
			
		||||
            input_dim = dim
 | 
			
		||||
 | 
			
		||||
        self.feature_dim = fc_dims[-1]
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _init_params(self):
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                nn.init.kaiming_normal_(
 | 
			
		||||
                    m.weight, mode='fan_out', nonlinearity='relu'
 | 
			
		||||
                )
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm1d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
            elif isinstance(m, nn.Linear):
 | 
			
		||||
                nn.init.normal_(m.weight, 0, 0.01)
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.maxpool(x)
 | 
			
		||||
        x = self.conv2(x)
 | 
			
		||||
        x = self.conv3(x)
 | 
			
		||||
        x = self.conv4(x)
 | 
			
		||||
        x = self.conv5(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x, return_featuremaps=False):
 | 
			
		||||
        x = self.featuremaps(x)
 | 
			
		||||
        if return_featuremaps:
 | 
			
		||||
            return x
 | 
			
		||||
        v = self.global_avgpool(x)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
        if self.fc is not None:
 | 
			
		||||
            v = self.fc(v)
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError("Unsupported loss: {}".format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, key=''):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    import os
 | 
			
		||||
    import errno
 | 
			
		||||
    import gdown
 | 
			
		||||
    from collections import OrderedDict
 | 
			
		||||
 | 
			
		||||
    def _get_torch_home():
 | 
			
		||||
        ENV_TORCH_HOME = 'TORCH_HOME'
 | 
			
		||||
        ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
 | 
			
		||||
        DEFAULT_CACHE_DIR = '~/.cache'
 | 
			
		||||
        torch_home = os.path.expanduser(
 | 
			
		||||
            os.getenv(
 | 
			
		||||
                ENV_TORCH_HOME,
 | 
			
		||||
                os.path.join(
 | 
			
		||||
                    os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
        return torch_home
 | 
			
		||||
 | 
			
		||||
    torch_home = _get_torch_home()
 | 
			
		||||
    model_dir = os.path.join(torch_home, 'checkpoints')
 | 
			
		||||
    try:
 | 
			
		||||
        os.makedirs(model_dir)
 | 
			
		||||
    except OSError as e:
 | 
			
		||||
        if e.errno == errno.EEXIST:
 | 
			
		||||
            # Directory already exists, ignore.
 | 
			
		||||
            pass
 | 
			
		||||
        else:
 | 
			
		||||
            # Unexpected OSError, re-raise.
 | 
			
		||||
            raise
 | 
			
		||||
    filename = key + '_imagenet.pth'
 | 
			
		||||
    cached_file = os.path.join(model_dir, filename)
 | 
			
		||||
 | 
			
		||||
    if not os.path.exists(cached_file):
 | 
			
		||||
        gdown.download(pretrained_urls[key], cached_file, quiet=False)
 | 
			
		||||
 | 
			
		||||
    state_dict = torch.load(cached_file)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    new_state_dict = OrderedDict()
 | 
			
		||||
    matched_layers, discarded_layers = [], []
 | 
			
		||||
 | 
			
		||||
    for k, v in state_dict.items():
 | 
			
		||||
        if k.startswith('module.'):
 | 
			
		||||
            k = k[7:] # discard module.
 | 
			
		||||
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size():
 | 
			
		||||
            new_state_dict[k] = v
 | 
			
		||||
            matched_layers.append(k)
 | 
			
		||||
        else:
 | 
			
		||||
            discarded_layers.append(k)
 | 
			
		||||
 | 
			
		||||
    model_dict.update(new_state_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
    if len(matched_layers) == 0:
 | 
			
		||||
        warnings.warn(
 | 
			
		||||
            'The pretrained weights from "{}" cannot be loaded, '
 | 
			
		||||
            'please check the key names manually '
 | 
			
		||||
            '(** ignored and continue **)'.format(cached_file)
 | 
			
		||||
        )
 | 
			
		||||
    else:
 | 
			
		||||
        print(
 | 
			
		||||
            'Successfully loaded imagenet pretrained weights from "{}"'.
 | 
			
		||||
            format(cached_file)
 | 
			
		||||
        )
 | 
			
		||||
        if len(discarded_layers) > 0:
 | 
			
		||||
            print(
 | 
			
		||||
                '** The following layers are discarded '
 | 
			
		||||
                'due to unmatched keys or layer size: {}'.
 | 
			
		||||
                format(discarded_layers)
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##########
 | 
			
		||||
# Instantiation
 | 
			
		||||
##########
 | 
			
		||||
def osnet_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
 | 
			
		||||
    # standard size (width x1.0)
 | 
			
		||||
    model = OSNet(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        blocks=[OSBlock, OSBlock, OSBlock],
 | 
			
		||||
        layers=[2, 2, 2],
 | 
			
		||||
        channels=[64, 256, 384, 512],
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, key='osnet_x1_0')
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def osnet_x0_75(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
 | 
			
		||||
    # medium size (width x0.75)
 | 
			
		||||
    model = OSNet(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        blocks=[OSBlock, OSBlock, OSBlock],
 | 
			
		||||
        layers=[2, 2, 2],
 | 
			
		||||
        channels=[48, 192, 288, 384],
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, key='osnet_x0_75')
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
 | 
			
		||||
    # tiny size (width x0.5)
 | 
			
		||||
    model = OSNet(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        blocks=[OSBlock, OSBlock, OSBlock],
 | 
			
		||||
        layers=[2, 2, 2],
 | 
			
		||||
        channels=[32, 128, 192, 256],
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, key='osnet_x0_5')
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def osnet_x0_25(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
 | 
			
		||||
    # very tiny size (width x0.25)
 | 
			
		||||
    model = OSNet(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        blocks=[OSBlock, OSBlock, OSBlock],
 | 
			
		||||
        layers=[2, 2, 2],
 | 
			
		||||
        channels=[16, 64, 96, 128],
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, key='osnet_x0_25')
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def osnet_ibn_x1_0(
 | 
			
		||||
    num_classes=1000, pretrained=True, loss='softmax', **kwargs
 | 
			
		||||
):
 | 
			
		||||
    # standard size (width x1.0) + IBN layer
 | 
			
		||||
    # Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018.
 | 
			
		||||
    model = OSNet(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        blocks=[OSBlock, OSBlock, OSBlock],
 | 
			
		||||
        layers=[2, 2, 2],
 | 
			
		||||
        channels=[64, 256, 384, 512],
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        IN=True,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, key='osnet_ibn_x1_0')
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										609
									
								
								feeder/trackers/strongsort/deep/models/osnet_ain.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										609
									
								
								feeder/trackers/strongsort/deep/models/osnet_ain.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,609 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import warnings
 | 
			
		||||
import torch
 | 
			
		||||
from torch import nn
 | 
			
		||||
from torch.nn import functional as F
 | 
			
		||||
 | 
			
		||||
__all__ = [
 | 
			
		||||
    'osnet_ain_x1_0', 'osnet_ain_x0_75', 'osnet_ain_x0_5', 'osnet_ain_x0_25'
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
pretrained_urls = {
 | 
			
		||||
    'osnet_ain_x1_0':
 | 
			
		||||
    'https://drive.google.com/uc?id=1-CaioD9NaqbHK_kzSMW8VE4_3KcsRjEo',
 | 
			
		||||
    'osnet_ain_x0_75':
 | 
			
		||||
    'https://drive.google.com/uc?id=1apy0hpsMypqstfencdH-jKIUEFOW4xoM',
 | 
			
		||||
    'osnet_ain_x0_5':
 | 
			
		||||
    'https://drive.google.com/uc?id=1KusKvEYyKGDTUBVRxRiz55G31wkihB6l',
 | 
			
		||||
    'osnet_ain_x0_25':
 | 
			
		||||
    'https://drive.google.com/uc?id=1SxQt2AvmEcgWNhaRb2xC4rP6ZwVDP0Wt'
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##########
 | 
			
		||||
# Basic layers
 | 
			
		||||
##########
 | 
			
		||||
class ConvLayer(nn.Module):
 | 
			
		||||
    """Convolution layer (conv + bn + relu)."""
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        in_channels,
 | 
			
		||||
        out_channels,
 | 
			
		||||
        kernel_size,
 | 
			
		||||
        stride=1,
 | 
			
		||||
        padding=0,
 | 
			
		||||
        groups=1,
 | 
			
		||||
        IN=False
 | 
			
		||||
    ):
 | 
			
		||||
        super(ConvLayer, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(
 | 
			
		||||
            in_channels,
 | 
			
		||||
            out_channels,
 | 
			
		||||
            kernel_size,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=padding,
 | 
			
		||||
            bias=False,
 | 
			
		||||
            groups=groups
 | 
			
		||||
        )
 | 
			
		||||
        if IN:
 | 
			
		||||
            self.bn = nn.InstanceNorm2d(out_channels, affine=True)
 | 
			
		||||
        else:
 | 
			
		||||
            self.bn = nn.BatchNorm2d(out_channels)
 | 
			
		||||
        self.relu = nn.ReLU()
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv(x)
 | 
			
		||||
        x = self.bn(x)
 | 
			
		||||
        return self.relu(x)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Conv1x1(nn.Module):
 | 
			
		||||
    """1x1 convolution + bn + relu."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels, stride=1, groups=1):
 | 
			
		||||
        super(Conv1x1, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(
 | 
			
		||||
            in_channels,
 | 
			
		||||
            out_channels,
 | 
			
		||||
            1,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=0,
 | 
			
		||||
            bias=False,
 | 
			
		||||
            groups=groups
 | 
			
		||||
        )
 | 
			
		||||
        self.bn = nn.BatchNorm2d(out_channels)
 | 
			
		||||
        self.relu = nn.ReLU()
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv(x)
 | 
			
		||||
        x = self.bn(x)
 | 
			
		||||
        return self.relu(x)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Conv1x1Linear(nn.Module):
 | 
			
		||||
    """1x1 convolution + bn (w/o non-linearity)."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels, stride=1, bn=True):
 | 
			
		||||
        super(Conv1x1Linear, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(
 | 
			
		||||
            in_channels, out_channels, 1, stride=stride, padding=0, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn = None
 | 
			
		||||
        if bn:
 | 
			
		||||
            self.bn = nn.BatchNorm2d(out_channels)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv(x)
 | 
			
		||||
        if self.bn is not None:
 | 
			
		||||
            x = self.bn(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Conv3x3(nn.Module):
 | 
			
		||||
    """3x3 convolution + bn + relu."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels, stride=1, groups=1):
 | 
			
		||||
        super(Conv3x3, self).__init__()
 | 
			
		||||
        self.conv = nn.Conv2d(
 | 
			
		||||
            in_channels,
 | 
			
		||||
            out_channels,
 | 
			
		||||
            3,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            bias=False,
 | 
			
		||||
            groups=groups
 | 
			
		||||
        )
 | 
			
		||||
        self.bn = nn.BatchNorm2d(out_channels)
 | 
			
		||||
        self.relu = nn.ReLU()
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv(x)
 | 
			
		||||
        x = self.bn(x)
 | 
			
		||||
        return self.relu(x)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class LightConv3x3(nn.Module):
 | 
			
		||||
    """Lightweight 3x3 convolution.
 | 
			
		||||
 | 
			
		||||
    1x1 (linear) + dw 3x3 (nonlinear).
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels):
 | 
			
		||||
        super(LightConv3x3, self).__init__()
 | 
			
		||||
        self.conv1 = nn.Conv2d(
 | 
			
		||||
            in_channels, out_channels, 1, stride=1, padding=0, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.conv2 = nn.Conv2d(
 | 
			
		||||
            out_channels,
 | 
			
		||||
            out_channels,
 | 
			
		||||
            3,
 | 
			
		||||
            stride=1,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            bias=False,
 | 
			
		||||
            groups=out_channels
 | 
			
		||||
        )
 | 
			
		||||
        self.bn = nn.BatchNorm2d(out_channels)
 | 
			
		||||
        self.relu = nn.ReLU()
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.conv2(x)
 | 
			
		||||
        x = self.bn(x)
 | 
			
		||||
        return self.relu(x)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class LightConvStream(nn.Module):
 | 
			
		||||
    """Lightweight convolution stream."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels, depth):
 | 
			
		||||
        super(LightConvStream, self).__init__()
 | 
			
		||||
        assert depth >= 1, 'depth must be equal to or larger than 1, but got {}'.format(
 | 
			
		||||
            depth
 | 
			
		||||
        )
 | 
			
		||||
        layers = []
 | 
			
		||||
        layers += [LightConv3x3(in_channels, out_channels)]
 | 
			
		||||
        for i in range(depth - 1):
 | 
			
		||||
            layers += [LightConv3x3(out_channels, out_channels)]
 | 
			
		||||
        self.layers = nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        return self.layers(x)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##########
 | 
			
		||||
# Building blocks for omni-scale feature learning
 | 
			
		||||
##########
 | 
			
		||||
class ChannelGate(nn.Module):
 | 
			
		||||
    """A mini-network that generates channel-wise gates conditioned on input tensor."""
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        in_channels,
 | 
			
		||||
        num_gates=None,
 | 
			
		||||
        return_gates=False,
 | 
			
		||||
        gate_activation='sigmoid',
 | 
			
		||||
        reduction=16,
 | 
			
		||||
        layer_norm=False
 | 
			
		||||
    ):
 | 
			
		||||
        super(ChannelGate, self).__init__()
 | 
			
		||||
        if num_gates is None:
 | 
			
		||||
            num_gates = in_channels
 | 
			
		||||
        self.return_gates = return_gates
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        self.fc1 = nn.Conv2d(
 | 
			
		||||
            in_channels,
 | 
			
		||||
            in_channels // reduction,
 | 
			
		||||
            kernel_size=1,
 | 
			
		||||
            bias=True,
 | 
			
		||||
            padding=0
 | 
			
		||||
        )
 | 
			
		||||
        self.norm1 = None
 | 
			
		||||
        if layer_norm:
 | 
			
		||||
            self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
 | 
			
		||||
        self.relu = nn.ReLU()
 | 
			
		||||
        self.fc2 = nn.Conv2d(
 | 
			
		||||
            in_channels // reduction,
 | 
			
		||||
            num_gates,
 | 
			
		||||
            kernel_size=1,
 | 
			
		||||
            bias=True,
 | 
			
		||||
            padding=0
 | 
			
		||||
        )
 | 
			
		||||
        if gate_activation == 'sigmoid':
 | 
			
		||||
            self.gate_activation = nn.Sigmoid()
 | 
			
		||||
        elif gate_activation == 'relu':
 | 
			
		||||
            self.gate_activation = nn.ReLU()
 | 
			
		||||
        elif gate_activation == 'linear':
 | 
			
		||||
            self.gate_activation = None
 | 
			
		||||
        else:
 | 
			
		||||
            raise RuntimeError(
 | 
			
		||||
                "Unknown gate activation: {}".format(gate_activation)
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        input = x
 | 
			
		||||
        x = self.global_avgpool(x)
 | 
			
		||||
        x = self.fc1(x)
 | 
			
		||||
        if self.norm1 is not None:
 | 
			
		||||
            x = self.norm1(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        x = self.fc2(x)
 | 
			
		||||
        if self.gate_activation is not None:
 | 
			
		||||
            x = self.gate_activation(x)
 | 
			
		||||
        if self.return_gates:
 | 
			
		||||
            return x
 | 
			
		||||
        return input * x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OSBlock(nn.Module):
 | 
			
		||||
    """Omni-scale feature learning block."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
 | 
			
		||||
        super(OSBlock, self).__init__()
 | 
			
		||||
        assert T >= 1
 | 
			
		||||
        assert out_channels >= reduction and out_channels % reduction == 0
 | 
			
		||||
        mid_channels = out_channels // reduction
 | 
			
		||||
 | 
			
		||||
        self.conv1 = Conv1x1(in_channels, mid_channels)
 | 
			
		||||
        self.conv2 = nn.ModuleList()
 | 
			
		||||
        for t in range(1, T + 1):
 | 
			
		||||
            self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
 | 
			
		||||
        self.gate = ChannelGate(mid_channels)
 | 
			
		||||
        self.conv3 = Conv1x1Linear(mid_channels, out_channels)
 | 
			
		||||
        self.downsample = None
 | 
			
		||||
        if in_channels != out_channels:
 | 
			
		||||
            self.downsample = Conv1x1Linear(in_channels, out_channels)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        identity = x
 | 
			
		||||
        x1 = self.conv1(x)
 | 
			
		||||
        x2 = 0
 | 
			
		||||
        for conv2_t in self.conv2:
 | 
			
		||||
            x2_t = conv2_t(x1)
 | 
			
		||||
            x2 = x2 + self.gate(x2_t)
 | 
			
		||||
        x3 = self.conv3(x2)
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            identity = self.downsample(identity)
 | 
			
		||||
        out = x3 + identity
 | 
			
		||||
        return F.relu(out)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OSBlockINin(nn.Module):
 | 
			
		||||
    """Omni-scale feature learning block with instance normalization."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
 | 
			
		||||
        super(OSBlockINin, self).__init__()
 | 
			
		||||
        assert T >= 1
 | 
			
		||||
        assert out_channels >= reduction and out_channels % reduction == 0
 | 
			
		||||
        mid_channels = out_channels // reduction
 | 
			
		||||
 | 
			
		||||
        self.conv1 = Conv1x1(in_channels, mid_channels)
 | 
			
		||||
        self.conv2 = nn.ModuleList()
 | 
			
		||||
        for t in range(1, T + 1):
 | 
			
		||||
            self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
 | 
			
		||||
        self.gate = ChannelGate(mid_channels)
 | 
			
		||||
        self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn=False)
 | 
			
		||||
        self.downsample = None
 | 
			
		||||
        if in_channels != out_channels:
 | 
			
		||||
            self.downsample = Conv1x1Linear(in_channels, out_channels)
 | 
			
		||||
        self.IN = nn.InstanceNorm2d(out_channels, affine=True)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        identity = x
 | 
			
		||||
        x1 = self.conv1(x)
 | 
			
		||||
        x2 = 0
 | 
			
		||||
        for conv2_t in self.conv2:
 | 
			
		||||
            x2_t = conv2_t(x1)
 | 
			
		||||
            x2 = x2 + self.gate(x2_t)
 | 
			
		||||
        x3 = self.conv3(x2)
 | 
			
		||||
        x3 = self.IN(x3) # IN inside residual
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            identity = self.downsample(identity)
 | 
			
		||||
        out = x3 + identity
 | 
			
		||||
        return F.relu(out)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##########
 | 
			
		||||
# Network architecture
 | 
			
		||||
##########
 | 
			
		||||
class OSNet(nn.Module):
 | 
			
		||||
    """Omni-Scale Network.
 | 
			
		||||
    
 | 
			
		||||
    Reference:
 | 
			
		||||
        - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
 | 
			
		||||
        - Zhou et al. Learning Generalisable Omni-Scale Representations
 | 
			
		||||
          for Person Re-Identification. TPAMI, 2021.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        num_classes,
 | 
			
		||||
        blocks,
 | 
			
		||||
        layers,
 | 
			
		||||
        channels,
 | 
			
		||||
        feature_dim=512,
 | 
			
		||||
        loss='softmax',
 | 
			
		||||
        conv1_IN=False,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        super(OSNet, self).__init__()
 | 
			
		||||
        num_blocks = len(blocks)
 | 
			
		||||
        assert num_blocks == len(layers)
 | 
			
		||||
        assert num_blocks == len(channels) - 1
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
        self.feature_dim = feature_dim
 | 
			
		||||
 | 
			
		||||
        # convolutional backbone
 | 
			
		||||
        self.conv1 = ConvLayer(
 | 
			
		||||
            3, channels[0], 7, stride=2, padding=3, IN=conv1_IN
 | 
			
		||||
        )
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
 | 
			
		||||
        self.conv2 = self._make_layer(
 | 
			
		||||
            blocks[0], layers[0], channels[0], channels[1]
 | 
			
		||||
        )
 | 
			
		||||
        self.pool2 = nn.Sequential(
 | 
			
		||||
            Conv1x1(channels[1], channels[1]), nn.AvgPool2d(2, stride=2)
 | 
			
		||||
        )
 | 
			
		||||
        self.conv3 = self._make_layer(
 | 
			
		||||
            blocks[1], layers[1], channels[1], channels[2]
 | 
			
		||||
        )
 | 
			
		||||
        self.pool3 = nn.Sequential(
 | 
			
		||||
            Conv1x1(channels[2], channels[2]), nn.AvgPool2d(2, stride=2)
 | 
			
		||||
        )
 | 
			
		||||
        self.conv4 = self._make_layer(
 | 
			
		||||
            blocks[2], layers[2], channels[2], channels[3]
 | 
			
		||||
        )
 | 
			
		||||
        self.conv5 = Conv1x1(channels[3], channels[3])
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        # fully connected layer
 | 
			
		||||
        self.fc = self._construct_fc_layer(
 | 
			
		||||
            self.feature_dim, channels[3], dropout_p=None
 | 
			
		||||
        )
 | 
			
		||||
        # identity classification layer
 | 
			
		||||
        self.classifier = nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        self._init_params()
 | 
			
		||||
 | 
			
		||||
    def _make_layer(self, blocks, layer, in_channels, out_channels):
 | 
			
		||||
        layers = []
 | 
			
		||||
        layers += [blocks[0](in_channels, out_channels)]
 | 
			
		||||
        for i in range(1, len(blocks)):
 | 
			
		||||
            layers += [blocks[i](out_channels, out_channels)]
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
 | 
			
		||||
        if fc_dims is None or fc_dims < 0:
 | 
			
		||||
            self.feature_dim = input_dim
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        if isinstance(fc_dims, int):
 | 
			
		||||
            fc_dims = [fc_dims]
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        for dim in fc_dims:
 | 
			
		||||
            layers.append(nn.Linear(input_dim, dim))
 | 
			
		||||
            layers.append(nn.BatchNorm1d(dim))
 | 
			
		||||
            layers.append(nn.ReLU())
 | 
			
		||||
            if dropout_p is not None:
 | 
			
		||||
                layers.append(nn.Dropout(p=dropout_p))
 | 
			
		||||
            input_dim = dim
 | 
			
		||||
 | 
			
		||||
        self.feature_dim = fc_dims[-1]
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _init_params(self):
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                nn.init.kaiming_normal_(
 | 
			
		||||
                    m.weight, mode='fan_out', nonlinearity='relu'
 | 
			
		||||
                )
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm1d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
            elif isinstance(m, nn.InstanceNorm2d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
            elif isinstance(m, nn.Linear):
 | 
			
		||||
                nn.init.normal_(m.weight, 0, 0.01)
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.maxpool(x)
 | 
			
		||||
        x = self.conv2(x)
 | 
			
		||||
        x = self.pool2(x)
 | 
			
		||||
        x = self.conv3(x)
 | 
			
		||||
        x = self.pool3(x)
 | 
			
		||||
        x = self.conv4(x)
 | 
			
		||||
        x = self.conv5(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x, return_featuremaps=False):
 | 
			
		||||
        x = self.featuremaps(x)
 | 
			
		||||
        if return_featuremaps:
 | 
			
		||||
            return x
 | 
			
		||||
        v = self.global_avgpool(x)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
        if self.fc is not None:
 | 
			
		||||
            v = self.fc(v)
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError("Unsupported loss: {}".format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, key=''):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    import os
 | 
			
		||||
    import errno
 | 
			
		||||
    import gdown
 | 
			
		||||
    from collections import OrderedDict
 | 
			
		||||
 | 
			
		||||
    def _get_torch_home():
 | 
			
		||||
        ENV_TORCH_HOME = 'TORCH_HOME'
 | 
			
		||||
        ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
 | 
			
		||||
        DEFAULT_CACHE_DIR = '~/.cache'
 | 
			
		||||
        torch_home = os.path.expanduser(
 | 
			
		||||
            os.getenv(
 | 
			
		||||
                ENV_TORCH_HOME,
 | 
			
		||||
                os.path.join(
 | 
			
		||||
                    os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
        return torch_home
 | 
			
		||||
 | 
			
		||||
    torch_home = _get_torch_home()
 | 
			
		||||
    model_dir = os.path.join(torch_home, 'checkpoints')
 | 
			
		||||
    try:
 | 
			
		||||
        os.makedirs(model_dir)
 | 
			
		||||
    except OSError as e:
 | 
			
		||||
        if e.errno == errno.EEXIST:
 | 
			
		||||
            # Directory already exists, ignore.
 | 
			
		||||
            pass
 | 
			
		||||
        else:
 | 
			
		||||
            # Unexpected OSError, re-raise.
 | 
			
		||||
            raise
 | 
			
		||||
    filename = key + '_imagenet.pth'
 | 
			
		||||
    cached_file = os.path.join(model_dir, filename)
 | 
			
		||||
 | 
			
		||||
    if not os.path.exists(cached_file):
 | 
			
		||||
        gdown.download(pretrained_urls[key], cached_file, quiet=False)
 | 
			
		||||
 | 
			
		||||
    state_dict = torch.load(cached_file)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    new_state_dict = OrderedDict()
 | 
			
		||||
    matched_layers, discarded_layers = [], []
 | 
			
		||||
 | 
			
		||||
    for k, v in state_dict.items():
 | 
			
		||||
        if k.startswith('module.'):
 | 
			
		||||
            k = k[7:] # discard module.
 | 
			
		||||
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size():
 | 
			
		||||
            new_state_dict[k] = v
 | 
			
		||||
            matched_layers.append(k)
 | 
			
		||||
        else:
 | 
			
		||||
            discarded_layers.append(k)
 | 
			
		||||
 | 
			
		||||
    model_dict.update(new_state_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
    if len(matched_layers) == 0:
 | 
			
		||||
        warnings.warn(
 | 
			
		||||
            'The pretrained weights from "{}" cannot be loaded, '
 | 
			
		||||
            'please check the key names manually '
 | 
			
		||||
            '(** ignored and continue **)'.format(cached_file)
 | 
			
		||||
        )
 | 
			
		||||
    else:
 | 
			
		||||
        print(
 | 
			
		||||
            'Successfully loaded imagenet pretrained weights from "{}"'.
 | 
			
		||||
            format(cached_file)
 | 
			
		||||
        )
 | 
			
		||||
        if len(discarded_layers) > 0:
 | 
			
		||||
            print(
 | 
			
		||||
                '** The following layers are discarded '
 | 
			
		||||
                'due to unmatched keys or layer size: {}'.
 | 
			
		||||
                format(discarded_layers)
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##########
 | 
			
		||||
# Instantiation
 | 
			
		||||
##########
 | 
			
		||||
def osnet_ain_x1_0(
 | 
			
		||||
    num_classes=1000, pretrained=True, loss='softmax', **kwargs
 | 
			
		||||
):
 | 
			
		||||
    model = OSNet(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        blocks=[
 | 
			
		||||
            [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin],
 | 
			
		||||
            [OSBlockINin, OSBlock]
 | 
			
		||||
        ],
 | 
			
		||||
        layers=[2, 2, 2],
 | 
			
		||||
        channels=[64, 256, 384, 512],
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        conv1_IN=True,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, key='osnet_ain_x1_0')
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def osnet_ain_x0_75(
 | 
			
		||||
    num_classes=1000, pretrained=True, loss='softmax', **kwargs
 | 
			
		||||
):
 | 
			
		||||
    model = OSNet(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        blocks=[
 | 
			
		||||
            [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin],
 | 
			
		||||
            [OSBlockINin, OSBlock]
 | 
			
		||||
        ],
 | 
			
		||||
        layers=[2, 2, 2],
 | 
			
		||||
        channels=[48, 192, 288, 384],
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        conv1_IN=True,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, key='osnet_ain_x0_75')
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def osnet_ain_x0_5(
 | 
			
		||||
    num_classes=1000, pretrained=True, loss='softmax', **kwargs
 | 
			
		||||
):
 | 
			
		||||
    model = OSNet(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        blocks=[
 | 
			
		||||
            [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin],
 | 
			
		||||
            [OSBlockINin, OSBlock]
 | 
			
		||||
        ],
 | 
			
		||||
        layers=[2, 2, 2],
 | 
			
		||||
        channels=[32, 128, 192, 256],
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        conv1_IN=True,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, key='osnet_ain_x0_5')
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def osnet_ain_x0_25(
 | 
			
		||||
    num_classes=1000, pretrained=True, loss='softmax', **kwargs
 | 
			
		||||
):
 | 
			
		||||
    model = OSNet(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        blocks=[
 | 
			
		||||
            [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin],
 | 
			
		||||
            [OSBlockINin, OSBlock]
 | 
			
		||||
        ],
 | 
			
		||||
        layers=[2, 2, 2],
 | 
			
		||||
        channels=[16, 64, 96, 128],
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        conv1_IN=True,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, key='osnet_ain_x0_25')
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										314
									
								
								feeder/trackers/strongsort/deep/models/pcb.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										314
									
								
								feeder/trackers/strongsort/deep/models/pcb.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,314 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
from torch import nn
 | 
			
		||||
from torch.nn import functional as F
 | 
			
		||||
 | 
			
		||||
__all__ = ['pcb_p6', 'pcb_p4']
 | 
			
		||||
 | 
			
		||||
model_urls = {
 | 
			
		||||
    'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
 | 
			
		||||
    'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
 | 
			
		||||
    'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
 | 
			
		||||
    'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
 | 
			
		||||
    'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def conv3x3(in_planes, out_planes, stride=1):
 | 
			
		||||
    """3x3 convolution with padding"""
 | 
			
		||||
    return nn.Conv2d(
 | 
			
		||||
        in_planes,
 | 
			
		||||
        out_planes,
 | 
			
		||||
        kernel_size=3,
 | 
			
		||||
        stride=stride,
 | 
			
		||||
        padding=1,
 | 
			
		||||
        bias=False
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BasicBlock(nn.Module):
 | 
			
		||||
    expansion = 1
 | 
			
		||||
 | 
			
		||||
    def __init__(self, inplanes, planes, stride=1, downsample=None):
 | 
			
		||||
        super(BasicBlock, self).__init__()
 | 
			
		||||
        self.conv1 = conv3x3(inplanes, planes, stride)
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.conv2 = conv3x3(planes, planes)
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        residual = x
 | 
			
		||||
 | 
			
		||||
        out = self.conv1(x)
 | 
			
		||||
        out = self.bn1(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv2(out)
 | 
			
		||||
        out = self.bn2(out)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            residual = self.downsample(x)
 | 
			
		||||
 | 
			
		||||
        out += residual
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Bottleneck(nn.Module):
 | 
			
		||||
    expansion = 4
 | 
			
		||||
 | 
			
		||||
    def __init__(self, inplanes, planes, stride=1, downsample=None):
 | 
			
		||||
        super(Bottleneck, self).__init__()
 | 
			
		||||
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.conv2 = nn.Conv2d(
 | 
			
		||||
            planes,
 | 
			
		||||
            planes,
 | 
			
		||||
            kernel_size=3,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.conv3 = nn.Conv2d(
 | 
			
		||||
            planes, planes * self.expansion, kernel_size=1, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        residual = x
 | 
			
		||||
 | 
			
		||||
        out = self.conv1(x)
 | 
			
		||||
        out = self.bn1(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv2(out)
 | 
			
		||||
        out = self.bn2(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv3(out)
 | 
			
		||||
        out = self.bn3(out)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            residual = self.downsample(x)
 | 
			
		||||
 | 
			
		||||
        out += residual
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DimReduceLayer(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, in_channels, out_channels, nonlinear):
 | 
			
		||||
        super(DimReduceLayer, self).__init__()
 | 
			
		||||
        layers = []
 | 
			
		||||
        layers.append(
 | 
			
		||||
            nn.Conv2d(
 | 
			
		||||
                in_channels, out_channels, 1, stride=1, padding=0, bias=False
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
        layers.append(nn.BatchNorm2d(out_channels))
 | 
			
		||||
 | 
			
		||||
        if nonlinear == 'relu':
 | 
			
		||||
            layers.append(nn.ReLU(inplace=True))
 | 
			
		||||
        elif nonlinear == 'leakyrelu':
 | 
			
		||||
            layers.append(nn.LeakyReLU(0.1))
 | 
			
		||||
 | 
			
		||||
        self.layers = nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        return self.layers(x)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class PCB(nn.Module):
 | 
			
		||||
    """Part-based Convolutional Baseline.
 | 
			
		||||
 | 
			
		||||
    Reference:
 | 
			
		||||
        Sun et al. Beyond Part Models: Person Retrieval with Refined
 | 
			
		||||
        Part Pooling (and A Strong Convolutional Baseline). ECCV 2018.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``pcb_p4``: PCB with 4-part strips.
 | 
			
		||||
        - ``pcb_p6``: PCB with 6-part strips.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        num_classes,
 | 
			
		||||
        loss,
 | 
			
		||||
        block,
 | 
			
		||||
        layers,
 | 
			
		||||
        parts=6,
 | 
			
		||||
        reduced_dim=256,
 | 
			
		||||
        nonlinear='relu',
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        self.inplanes = 64
 | 
			
		||||
        super(PCB, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
        self.parts = parts
 | 
			
		||||
        self.feature_dim = 512 * block.expansion
 | 
			
		||||
 | 
			
		||||
        # backbone network
 | 
			
		||||
        self.conv1 = nn.Conv2d(
 | 
			
		||||
            3, 64, kernel_size=7, stride=2, padding=3, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(64)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
 | 
			
		||||
        self.layer1 = self._make_layer(block, 64, layers[0])
 | 
			
		||||
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
 | 
			
		||||
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
 | 
			
		||||
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
 | 
			
		||||
 | 
			
		||||
        # pcb layers
 | 
			
		||||
        self.parts_avgpool = nn.AdaptiveAvgPool2d((self.parts, 1))
 | 
			
		||||
        self.dropout = nn.Dropout(p=0.5)
 | 
			
		||||
        self.conv5 = DimReduceLayer(
 | 
			
		||||
            512 * block.expansion, reduced_dim, nonlinear=nonlinear
 | 
			
		||||
        )
 | 
			
		||||
        self.feature_dim = reduced_dim
 | 
			
		||||
        self.classifier = nn.ModuleList(
 | 
			
		||||
            [
 | 
			
		||||
                nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
                for _ in range(self.parts)
 | 
			
		||||
            ]
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self._init_params()
 | 
			
		||||
 | 
			
		||||
    def _make_layer(self, block, planes, blocks, stride=1):
 | 
			
		||||
        downsample = None
 | 
			
		||||
        if stride != 1 or self.inplanes != planes * block.expansion:
 | 
			
		||||
            downsample = nn.Sequential(
 | 
			
		||||
                nn.Conv2d(
 | 
			
		||||
                    self.inplanes,
 | 
			
		||||
                    planes * block.expansion,
 | 
			
		||||
                    kernel_size=1,
 | 
			
		||||
                    stride=stride,
 | 
			
		||||
                    bias=False
 | 
			
		||||
                ),
 | 
			
		||||
                nn.BatchNorm2d(planes * block.expansion),
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        layers.append(block(self.inplanes, planes, stride, downsample))
 | 
			
		||||
        self.inplanes = planes * block.expansion
 | 
			
		||||
        for i in range(1, blocks):
 | 
			
		||||
            layers.append(block(self.inplanes, planes))
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _init_params(self):
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                nn.init.kaiming_normal_(
 | 
			
		||||
                    m.weight, mode='fan_out', nonlinearity='relu'
 | 
			
		||||
                )
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm1d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.Linear):
 | 
			
		||||
                nn.init.normal_(m.weight, 0, 0.01)
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.bn1(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        x = self.maxpool(x)
 | 
			
		||||
        x = self.layer1(x)
 | 
			
		||||
        x = self.layer2(x)
 | 
			
		||||
        x = self.layer3(x)
 | 
			
		||||
        x = self.layer4(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.featuremaps(x)
 | 
			
		||||
        v_g = self.parts_avgpool(f)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            v_g = F.normalize(v_g, p=2, dim=1)
 | 
			
		||||
            return v_g.view(v_g.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        v_g = self.dropout(v_g)
 | 
			
		||||
        v_h = self.conv5(v_g)
 | 
			
		||||
 | 
			
		||||
        y = []
 | 
			
		||||
        for i in range(self.parts):
 | 
			
		||||
            v_h_i = v_h[:, :, i, :]
 | 
			
		||||
            v_h_i = v_h_i.view(v_h_i.size(0), -1)
 | 
			
		||||
            y_i = self.classifier[i](v_h_i)
 | 
			
		||||
            y.append(y_i)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            v_g = F.normalize(v_g, p=2, dim=1)
 | 
			
		||||
            return y, v_g.view(v_g.size(0), -1)
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError('Unsupported loss: {}'.format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pcb_p6(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = PCB(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=Bottleneck,
 | 
			
		||||
        layers=[3, 4, 6, 3],
 | 
			
		||||
        last_stride=1,
 | 
			
		||||
        parts=6,
 | 
			
		||||
        reduced_dim=256,
 | 
			
		||||
        nonlinear='relu',
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnet50'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pcb_p4(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = PCB(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=Bottleneck,
 | 
			
		||||
        layers=[3, 4, 6, 3],
 | 
			
		||||
        last_stride=1,
 | 
			
		||||
        parts=4,
 | 
			
		||||
        reduced_dim=256,
 | 
			
		||||
        nonlinear='relu',
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnet50'])
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										530
									
								
								feeder/trackers/strongsort/deep/models/resnet.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										530
									
								
								feeder/trackers/strongsort/deep/models/resnet.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,530 @@
 | 
			
		|||
"""
 | 
			
		||||
Code source: https://github.com/pytorch/vision
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import division, absolute_import
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
from torch import nn
 | 
			
		||||
 | 
			
		||||
__all__ = [
 | 
			
		||||
    'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
 | 
			
		||||
    'resnext50_32x4d', 'resnext101_32x8d', 'resnet50_fc512'
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
model_urls = {
 | 
			
		||||
    'resnet18':
 | 
			
		||||
    'https://download.pytorch.org/models/resnet18-5c106cde.pth',
 | 
			
		||||
    'resnet34':
 | 
			
		||||
    'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
 | 
			
		||||
    'resnet50':
 | 
			
		||||
    'https://download.pytorch.org/models/resnet50-19c8e357.pth',
 | 
			
		||||
    'resnet101':
 | 
			
		||||
    'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
 | 
			
		||||
    'resnet152':
 | 
			
		||||
    'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
 | 
			
		||||
    'resnext50_32x4d':
 | 
			
		||||
    'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
 | 
			
		||||
    'resnext101_32x8d':
 | 
			
		||||
    'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
 | 
			
		||||
    """3x3 convolution with padding"""
 | 
			
		||||
    return nn.Conv2d(
 | 
			
		||||
        in_planes,
 | 
			
		||||
        out_planes,
 | 
			
		||||
        kernel_size=3,
 | 
			
		||||
        stride=stride,
 | 
			
		||||
        padding=dilation,
 | 
			
		||||
        groups=groups,
 | 
			
		||||
        bias=False,
 | 
			
		||||
        dilation=dilation
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def conv1x1(in_planes, out_planes, stride=1):
 | 
			
		||||
    """1x1 convolution"""
 | 
			
		||||
    return nn.Conv2d(
 | 
			
		||||
        in_planes, out_planes, kernel_size=1, stride=stride, bias=False
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BasicBlock(nn.Module):
 | 
			
		||||
    expansion = 1
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        inplanes,
 | 
			
		||||
        planes,
 | 
			
		||||
        stride=1,
 | 
			
		||||
        downsample=None,
 | 
			
		||||
        groups=1,
 | 
			
		||||
        base_width=64,
 | 
			
		||||
        dilation=1,
 | 
			
		||||
        norm_layer=None
 | 
			
		||||
    ):
 | 
			
		||||
        super(BasicBlock, self).__init__()
 | 
			
		||||
        if norm_layer is None:
 | 
			
		||||
            norm_layer = nn.BatchNorm2d
 | 
			
		||||
        if groups != 1 or base_width != 64:
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                'BasicBlock only supports groups=1 and base_width=64'
 | 
			
		||||
            )
 | 
			
		||||
        if dilation > 1:
 | 
			
		||||
            raise NotImplementedError(
 | 
			
		||||
                "Dilation > 1 not supported in BasicBlock"
 | 
			
		||||
            )
 | 
			
		||||
        # Both self.conv1 and self.downsample layers downsample the input when stride != 1
 | 
			
		||||
        self.conv1 = conv3x3(inplanes, planes, stride)
 | 
			
		||||
        self.bn1 = norm_layer(planes)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.conv2 = conv3x3(planes, planes)
 | 
			
		||||
        self.bn2 = norm_layer(planes)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        identity = x
 | 
			
		||||
 | 
			
		||||
        out = self.conv1(x)
 | 
			
		||||
        out = self.bn1(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv2(out)
 | 
			
		||||
        out = self.bn2(out)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            identity = self.downsample(x)
 | 
			
		||||
 | 
			
		||||
        out += identity
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Bottleneck(nn.Module):
 | 
			
		||||
    expansion = 4
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        inplanes,
 | 
			
		||||
        planes,
 | 
			
		||||
        stride=1,
 | 
			
		||||
        downsample=None,
 | 
			
		||||
        groups=1,
 | 
			
		||||
        base_width=64,
 | 
			
		||||
        dilation=1,
 | 
			
		||||
        norm_layer=None
 | 
			
		||||
    ):
 | 
			
		||||
        super(Bottleneck, self).__init__()
 | 
			
		||||
        if norm_layer is None:
 | 
			
		||||
            norm_layer = nn.BatchNorm2d
 | 
			
		||||
        width = int(planes * (base_width/64.)) * groups
 | 
			
		||||
        # Both self.conv2 and self.downsample layers downsample the input when stride != 1
 | 
			
		||||
        self.conv1 = conv1x1(inplanes, width)
 | 
			
		||||
        self.bn1 = norm_layer(width)
 | 
			
		||||
        self.conv2 = conv3x3(width, width, stride, groups, dilation)
 | 
			
		||||
        self.bn2 = norm_layer(width)
 | 
			
		||||
        self.conv3 = conv1x1(width, planes * self.expansion)
 | 
			
		||||
        self.bn3 = norm_layer(planes * self.expansion)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        identity = x
 | 
			
		||||
 | 
			
		||||
        out = self.conv1(x)
 | 
			
		||||
        out = self.bn1(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv2(out)
 | 
			
		||||
        out = self.bn2(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv3(out)
 | 
			
		||||
        out = self.bn3(out)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            identity = self.downsample(x)
 | 
			
		||||
 | 
			
		||||
        out += identity
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ResNet(nn.Module):
 | 
			
		||||
    """Residual network.
 | 
			
		||||
    
 | 
			
		||||
    Reference:
 | 
			
		||||
        - He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
 | 
			
		||||
        - Xie et al. Aggregated Residual Transformations for Deep Neural Networks. CVPR 2017.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``resnet18``: ResNet18.
 | 
			
		||||
        - ``resnet34``: ResNet34.
 | 
			
		||||
        - ``resnet50``: ResNet50.
 | 
			
		||||
        - ``resnet101``: ResNet101.
 | 
			
		||||
        - ``resnet152``: ResNet152.
 | 
			
		||||
        - ``resnext50_32x4d``: ResNeXt50.
 | 
			
		||||
        - ``resnext101_32x8d``: ResNeXt101.
 | 
			
		||||
        - ``resnet50_fc512``: ResNet50 + FC.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        num_classes,
 | 
			
		||||
        loss,
 | 
			
		||||
        block,
 | 
			
		||||
        layers,
 | 
			
		||||
        zero_init_residual=False,
 | 
			
		||||
        groups=1,
 | 
			
		||||
        width_per_group=64,
 | 
			
		||||
        replace_stride_with_dilation=None,
 | 
			
		||||
        norm_layer=None,
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        super(ResNet, self).__init__()
 | 
			
		||||
        if norm_layer is None:
 | 
			
		||||
            norm_layer = nn.BatchNorm2d
 | 
			
		||||
        self._norm_layer = norm_layer
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
        self.feature_dim = 512 * block.expansion
 | 
			
		||||
        self.inplanes = 64
 | 
			
		||||
        self.dilation = 1
 | 
			
		||||
        if replace_stride_with_dilation is None:
 | 
			
		||||
            # each element in the tuple indicates if we should replace
 | 
			
		||||
            # the 2x2 stride with a dilated convolution instead
 | 
			
		||||
            replace_stride_with_dilation = [False, False, False]
 | 
			
		||||
        if len(replace_stride_with_dilation) != 3:
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "replace_stride_with_dilation should be None "
 | 
			
		||||
                "or a 3-element tuple, got {}".
 | 
			
		||||
                format(replace_stride_with_dilation)
 | 
			
		||||
            )
 | 
			
		||||
        self.groups = groups
 | 
			
		||||
        self.base_width = width_per_group
 | 
			
		||||
        self.conv1 = nn.Conv2d(
 | 
			
		||||
            3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn1 = norm_layer(self.inplanes)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
 | 
			
		||||
        self.layer1 = self._make_layer(block, 64, layers[0])
 | 
			
		||||
        self.layer2 = self._make_layer(
 | 
			
		||||
            block,
 | 
			
		||||
            128,
 | 
			
		||||
            layers[1],
 | 
			
		||||
            stride=2,
 | 
			
		||||
            dilate=replace_stride_with_dilation[0]
 | 
			
		||||
        )
 | 
			
		||||
        self.layer3 = self._make_layer(
 | 
			
		||||
            block,
 | 
			
		||||
            256,
 | 
			
		||||
            layers[2],
 | 
			
		||||
            stride=2,
 | 
			
		||||
            dilate=replace_stride_with_dilation[1]
 | 
			
		||||
        )
 | 
			
		||||
        self.layer4 = self._make_layer(
 | 
			
		||||
            block,
 | 
			
		||||
            512,
 | 
			
		||||
            layers[3],
 | 
			
		||||
            stride=last_stride,
 | 
			
		||||
            dilate=replace_stride_with_dilation[2]
 | 
			
		||||
        )
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
 | 
			
		||||
        self.fc = self._construct_fc_layer(
 | 
			
		||||
            fc_dims, 512 * block.expansion, dropout_p
 | 
			
		||||
        )
 | 
			
		||||
        self.classifier = nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        self._init_params()
 | 
			
		||||
 | 
			
		||||
        # Zero-initialize the last BN in each residual branch,
 | 
			
		||||
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
 | 
			
		||||
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
 | 
			
		||||
        if zero_init_residual:
 | 
			
		||||
            for m in self.modules():
 | 
			
		||||
                if isinstance(m, Bottleneck):
 | 
			
		||||
                    nn.init.constant_(m.bn3.weight, 0)
 | 
			
		||||
                elif isinstance(m, BasicBlock):
 | 
			
		||||
                    nn.init.constant_(m.bn2.weight, 0)
 | 
			
		||||
 | 
			
		||||
    def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
 | 
			
		||||
        norm_layer = self._norm_layer
 | 
			
		||||
        downsample = None
 | 
			
		||||
        previous_dilation = self.dilation
 | 
			
		||||
        if dilate:
 | 
			
		||||
            self.dilation *= stride
 | 
			
		||||
            stride = 1
 | 
			
		||||
        if stride != 1 or self.inplanes != planes * block.expansion:
 | 
			
		||||
            downsample = nn.Sequential(
 | 
			
		||||
                conv1x1(self.inplanes, planes * block.expansion, stride),
 | 
			
		||||
                norm_layer(planes * block.expansion),
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        layers.append(
 | 
			
		||||
            block(
 | 
			
		||||
                self.inplanes, planes, stride, downsample, self.groups,
 | 
			
		||||
                self.base_width, previous_dilation, norm_layer
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
        self.inplanes = planes * block.expansion
 | 
			
		||||
        for _ in range(1, blocks):
 | 
			
		||||
            layers.append(
 | 
			
		||||
                block(
 | 
			
		||||
                    self.inplanes,
 | 
			
		||||
                    planes,
 | 
			
		||||
                    groups=self.groups,
 | 
			
		||||
                    base_width=self.base_width,
 | 
			
		||||
                    dilation=self.dilation,
 | 
			
		||||
                    norm_layer=norm_layer
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
 | 
			
		||||
        """Constructs fully connected layer
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
 | 
			
		||||
            input_dim (int): input dimension
 | 
			
		||||
            dropout_p (float): dropout probability, if None, dropout is unused
 | 
			
		||||
        """
 | 
			
		||||
        if fc_dims is None:
 | 
			
		||||
            self.feature_dim = input_dim
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        assert isinstance(
 | 
			
		||||
            fc_dims, (list, tuple)
 | 
			
		||||
        ), 'fc_dims must be either list or tuple, but got {}'.format(
 | 
			
		||||
            type(fc_dims)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        for dim in fc_dims:
 | 
			
		||||
            layers.append(nn.Linear(input_dim, dim))
 | 
			
		||||
            layers.append(nn.BatchNorm1d(dim))
 | 
			
		||||
            layers.append(nn.ReLU(inplace=True))
 | 
			
		||||
            if dropout_p is not None:
 | 
			
		||||
                layers.append(nn.Dropout(p=dropout_p))
 | 
			
		||||
            input_dim = dim
 | 
			
		||||
 | 
			
		||||
        self.feature_dim = fc_dims[-1]
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _init_params(self):
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                nn.init.kaiming_normal_(
 | 
			
		||||
                    m.weight, mode='fan_out', nonlinearity='relu'
 | 
			
		||||
                )
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm1d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.Linear):
 | 
			
		||||
                nn.init.normal_(m.weight, 0, 0.01)
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.bn1(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        x = self.maxpool(x)
 | 
			
		||||
        x = self.layer1(x)
 | 
			
		||||
        x = self.layer2(x)
 | 
			
		||||
        x = self.layer3(x)
 | 
			
		||||
        x = self.layer4(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.featuremaps(x)
 | 
			
		||||
        v = self.global_avgpool(f)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        if self.fc is not None:
 | 
			
		||||
            v = self.fc(v)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError("Unsupported loss: {}".format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
"""ResNet"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resnet18(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ResNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=BasicBlock,
 | 
			
		||||
        layers=[2, 2, 2, 2],
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnet18'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resnet34(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ResNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=BasicBlock,
 | 
			
		||||
        layers=[3, 4, 6, 3],
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnet34'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ResNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=Bottleneck,
 | 
			
		||||
        layers=[3, 4, 6, 3],
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnet50'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ResNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=Bottleneck,
 | 
			
		||||
        layers=[3, 4, 23, 3],
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnet101'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ResNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=Bottleneck,
 | 
			
		||||
        layers=[3, 8, 36, 3],
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnet152'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
"""ResNeXt"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ResNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=Bottleneck,
 | 
			
		||||
        layers=[3, 4, 6, 3],
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        groups=32,
 | 
			
		||||
        width_per_group=4,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnext50_32x4d'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resnext101_32x8d(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ResNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=Bottleneck,
 | 
			
		||||
        layers=[3, 4, 23, 3],
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        groups=32,
 | 
			
		||||
        width_per_group=8,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnext101_32x8d'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
ResNet + FC
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ResNet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=Bottleneck,
 | 
			
		||||
        layers=[3, 4, 6, 3],
 | 
			
		||||
        last_stride=1,
 | 
			
		||||
        fc_dims=[512],
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnet50'])
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										289
									
								
								feeder/trackers/strongsort/deep/models/resnet_ibn_a.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										289
									
								
								feeder/trackers/strongsort/deep/models/resnet_ibn_a.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,289 @@
 | 
			
		|||
"""
 | 
			
		||||
Credit to https://github.com/XingangPan/IBN-Net.
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import division, absolute_import
 | 
			
		||||
import math
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
 | 
			
		||||
__all__ = ['resnet50_ibn_a']
 | 
			
		||||
 | 
			
		||||
model_urls = {
 | 
			
		||||
    'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
 | 
			
		||||
    'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
 | 
			
		||||
    'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def conv3x3(in_planes, out_planes, stride=1):
 | 
			
		||||
    "3x3 convolution with padding"
 | 
			
		||||
    return nn.Conv2d(
 | 
			
		||||
        in_planes,
 | 
			
		||||
        out_planes,
 | 
			
		||||
        kernel_size=3,
 | 
			
		||||
        stride=stride,
 | 
			
		||||
        padding=1,
 | 
			
		||||
        bias=False
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BasicBlock(nn.Module):
 | 
			
		||||
    expansion = 1
 | 
			
		||||
 | 
			
		||||
    def __init__(self, inplanes, planes, stride=1, downsample=None):
 | 
			
		||||
        super(BasicBlock, self).__init__()
 | 
			
		||||
        self.conv1 = conv3x3(inplanes, planes, stride)
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.conv2 = conv3x3(planes, planes)
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        residual = x
 | 
			
		||||
 | 
			
		||||
        out = self.conv1(x)
 | 
			
		||||
        out = self.bn1(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv2(out)
 | 
			
		||||
        out = self.bn2(out)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            residual = self.downsample(x)
 | 
			
		||||
 | 
			
		||||
        out += residual
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class IBN(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, planes):
 | 
			
		||||
        super(IBN, self).__init__()
 | 
			
		||||
        half1 = int(planes / 2)
 | 
			
		||||
        self.half = half1
 | 
			
		||||
        half2 = planes - half1
 | 
			
		||||
        self.IN = nn.InstanceNorm2d(half1, affine=True)
 | 
			
		||||
        self.BN = nn.BatchNorm2d(half2)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        split = torch.split(x, self.half, 1)
 | 
			
		||||
        out1 = self.IN(split[0].contiguous())
 | 
			
		||||
        out2 = self.BN(split[1].contiguous())
 | 
			
		||||
        out = torch.cat((out1, out2), 1)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Bottleneck(nn.Module):
 | 
			
		||||
    expansion = 4
 | 
			
		||||
 | 
			
		||||
    def __init__(self, inplanes, planes, ibn=False, stride=1, downsample=None):
 | 
			
		||||
        super(Bottleneck, self).__init__()
 | 
			
		||||
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
 | 
			
		||||
        if ibn:
 | 
			
		||||
            self.bn1 = IBN(planes)
 | 
			
		||||
        else:
 | 
			
		||||
            self.bn1 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.conv2 = nn.Conv2d(
 | 
			
		||||
            planes,
 | 
			
		||||
            planes,
 | 
			
		||||
            kernel_size=3,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.conv3 = nn.Conv2d(
 | 
			
		||||
            planes, planes * self.expansion, kernel_size=1, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        residual = x
 | 
			
		||||
 | 
			
		||||
        out = self.conv1(x)
 | 
			
		||||
        out = self.bn1(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv2(out)
 | 
			
		||||
        out = self.bn2(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv3(out)
 | 
			
		||||
        out = self.bn3(out)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            residual = self.downsample(x)
 | 
			
		||||
 | 
			
		||||
        out += residual
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ResNet(nn.Module):
 | 
			
		||||
    """Residual network + IBN layer.
 | 
			
		||||
    
 | 
			
		||||
    Reference:
 | 
			
		||||
        - He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
 | 
			
		||||
        - Pan et al. Two at Once: Enhancing Learning and Generalization
 | 
			
		||||
          Capacities via IBN-Net. ECCV 2018.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        block,
 | 
			
		||||
        layers,
 | 
			
		||||
        num_classes=1000,
 | 
			
		||||
        loss='softmax',
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        scale = 64
 | 
			
		||||
        self.inplanes = scale
 | 
			
		||||
        super(ResNet, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
        self.feature_dim = scale * 8 * block.expansion
 | 
			
		||||
 | 
			
		||||
        self.conv1 = nn.Conv2d(
 | 
			
		||||
            3, scale, kernel_size=7, stride=2, padding=3, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(scale)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
 | 
			
		||||
        self.layer1 = self._make_layer(block, scale, layers[0])
 | 
			
		||||
        self.layer2 = self._make_layer(block, scale * 2, layers[1], stride=2)
 | 
			
		||||
        self.layer3 = self._make_layer(block, scale * 4, layers[2], stride=2)
 | 
			
		||||
        self.layer4 = self._make_layer(block, scale * 8, layers[3], stride=2)
 | 
			
		||||
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
 | 
			
		||||
        self.fc = self._construct_fc_layer(
 | 
			
		||||
            fc_dims, scale * 8 * block.expansion, dropout_p
 | 
			
		||||
        )
 | 
			
		||||
        self.classifier = nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
 | 
			
		||||
                m.weight.data.normal_(0, math.sqrt(2. / n))
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                m.weight.data.fill_(1)
 | 
			
		||||
                m.bias.data.zero_()
 | 
			
		||||
            elif isinstance(m, nn.InstanceNorm2d):
 | 
			
		||||
                m.weight.data.fill_(1)
 | 
			
		||||
                m.bias.data.zero_()
 | 
			
		||||
 | 
			
		||||
    def _make_layer(self, block, planes, blocks, stride=1):
 | 
			
		||||
        downsample = None
 | 
			
		||||
        if stride != 1 or self.inplanes != planes * block.expansion:
 | 
			
		||||
            downsample = nn.Sequential(
 | 
			
		||||
                nn.Conv2d(
 | 
			
		||||
                    self.inplanes,
 | 
			
		||||
                    planes * block.expansion,
 | 
			
		||||
                    kernel_size=1,
 | 
			
		||||
                    stride=stride,
 | 
			
		||||
                    bias=False
 | 
			
		||||
                ),
 | 
			
		||||
                nn.BatchNorm2d(planes * block.expansion),
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        ibn = True
 | 
			
		||||
        if planes == 512:
 | 
			
		||||
            ibn = False
 | 
			
		||||
        layers.append(block(self.inplanes, planes, ibn, stride, downsample))
 | 
			
		||||
        self.inplanes = planes * block.expansion
 | 
			
		||||
        for i in range(1, blocks):
 | 
			
		||||
            layers.append(block(self.inplanes, planes, ibn))
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
 | 
			
		||||
        """Constructs fully connected layer
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
 | 
			
		||||
            input_dim (int): input dimension
 | 
			
		||||
            dropout_p (float): dropout probability, if None, dropout is unused
 | 
			
		||||
        """
 | 
			
		||||
        if fc_dims is None:
 | 
			
		||||
            self.feature_dim = input_dim
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        assert isinstance(
 | 
			
		||||
            fc_dims, (list, tuple)
 | 
			
		||||
        ), 'fc_dims must be either list or tuple, but got {}'.format(
 | 
			
		||||
            type(fc_dims)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        for dim in fc_dims:
 | 
			
		||||
            layers.append(nn.Linear(input_dim, dim))
 | 
			
		||||
            layers.append(nn.BatchNorm1d(dim))
 | 
			
		||||
            layers.append(nn.ReLU(inplace=True))
 | 
			
		||||
            if dropout_p is not None:
 | 
			
		||||
                layers.append(nn.Dropout(p=dropout_p))
 | 
			
		||||
            input_dim = dim
 | 
			
		||||
 | 
			
		||||
        self.feature_dim = fc_dims[-1]
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.bn1(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        x = self.maxpool(x)
 | 
			
		||||
        x = self.layer1(x)
 | 
			
		||||
        x = self.layer2(x)
 | 
			
		||||
        x = self.layer3(x)
 | 
			
		||||
        x = self.layer4(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.featuremaps(x)
 | 
			
		||||
        v = self.avgpool(f)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
        if self.fc is not None:
 | 
			
		||||
            v = self.fc(v)
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError("Unsupported loss: {}".format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resnet50_ibn_a(num_classes, loss='softmax', pretrained=False, **kwargs):
 | 
			
		||||
    model = ResNet(
 | 
			
		||||
        Bottleneck, [3, 4, 6, 3], num_classes=num_classes, loss=loss, **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnet50'])
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										274
									
								
								feeder/trackers/strongsort/deep/models/resnet_ibn_b.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										274
									
								
								feeder/trackers/strongsort/deep/models/resnet_ibn_b.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,274 @@
 | 
			
		|||
"""
 | 
			
		||||
Credit to https://github.com/XingangPan/IBN-Net.
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import division, absolute_import
 | 
			
		||||
import math
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
 | 
			
		||||
__all__ = ['resnet50_ibn_b']
 | 
			
		||||
 | 
			
		||||
model_urls = {
 | 
			
		||||
    'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
 | 
			
		||||
    'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
 | 
			
		||||
    'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def conv3x3(in_planes, out_planes, stride=1):
 | 
			
		||||
    "3x3 convolution with padding"
 | 
			
		||||
    return nn.Conv2d(
 | 
			
		||||
        in_planes,
 | 
			
		||||
        out_planes,
 | 
			
		||||
        kernel_size=3,
 | 
			
		||||
        stride=stride,
 | 
			
		||||
        padding=1,
 | 
			
		||||
        bias=False
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BasicBlock(nn.Module):
 | 
			
		||||
    expansion = 1
 | 
			
		||||
 | 
			
		||||
    def __init__(self, inplanes, planes, stride=1, downsample=None):
 | 
			
		||||
        super(BasicBlock, self).__init__()
 | 
			
		||||
        self.conv1 = conv3x3(inplanes, planes, stride)
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.conv2 = conv3x3(planes, planes)
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        residual = x
 | 
			
		||||
 | 
			
		||||
        out = self.conv1(x)
 | 
			
		||||
        out = self.bn1(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv2(out)
 | 
			
		||||
        out = self.bn2(out)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            residual = self.downsample(x)
 | 
			
		||||
 | 
			
		||||
        out += residual
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Bottleneck(nn.Module):
 | 
			
		||||
    expansion = 4
 | 
			
		||||
 | 
			
		||||
    def __init__(self, inplanes, planes, stride=1, downsample=None, IN=False):
 | 
			
		||||
        super(Bottleneck, self).__init__()
 | 
			
		||||
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.conv2 = nn.Conv2d(
 | 
			
		||||
            planes,
 | 
			
		||||
            planes,
 | 
			
		||||
            kernel_size=3,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.conv3 = nn.Conv2d(
 | 
			
		||||
            planes, planes * self.expansion, kernel_size=1, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
 | 
			
		||||
        self.IN = None
 | 
			
		||||
        if IN:
 | 
			
		||||
            self.IN = nn.InstanceNorm2d(planes * 4, affine=True)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        residual = x
 | 
			
		||||
 | 
			
		||||
        out = self.conv1(x)
 | 
			
		||||
        out = self.bn1(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv2(out)
 | 
			
		||||
        out = self.bn2(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv3(out)
 | 
			
		||||
        out = self.bn3(out)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            residual = self.downsample(x)
 | 
			
		||||
 | 
			
		||||
        out += residual
 | 
			
		||||
        if self.IN is not None:
 | 
			
		||||
            out = self.IN(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ResNet(nn.Module):
 | 
			
		||||
    """Residual network + IBN layer.
 | 
			
		||||
    
 | 
			
		||||
    Reference:
 | 
			
		||||
        - He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
 | 
			
		||||
        - Pan et al. Two at Once: Enhancing Learning and Generalization
 | 
			
		||||
          Capacities via IBN-Net. ECCV 2018.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        block,
 | 
			
		||||
        layers,
 | 
			
		||||
        num_classes=1000,
 | 
			
		||||
        loss='softmax',
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        scale = 64
 | 
			
		||||
        self.inplanes = scale
 | 
			
		||||
        super(ResNet, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
        self.feature_dim = scale * 8 * block.expansion
 | 
			
		||||
 | 
			
		||||
        self.conv1 = nn.Conv2d(
 | 
			
		||||
            3, scale, kernel_size=7, stride=2, padding=3, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn1 = nn.InstanceNorm2d(scale, affine=True)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
 | 
			
		||||
        self.layer1 = self._make_layer(
 | 
			
		||||
            block, scale, layers[0], stride=1, IN=True
 | 
			
		||||
        )
 | 
			
		||||
        self.layer2 = self._make_layer(
 | 
			
		||||
            block, scale * 2, layers[1], stride=2, IN=True
 | 
			
		||||
        )
 | 
			
		||||
        self.layer3 = self._make_layer(block, scale * 4, layers[2], stride=2)
 | 
			
		||||
        self.layer4 = self._make_layer(block, scale * 8, layers[3], stride=2)
 | 
			
		||||
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
 | 
			
		||||
        self.fc = self._construct_fc_layer(
 | 
			
		||||
            fc_dims, scale * 8 * block.expansion, dropout_p
 | 
			
		||||
        )
 | 
			
		||||
        self.classifier = nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
 | 
			
		||||
                m.weight.data.normal_(0, math.sqrt(2. / n))
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                m.weight.data.fill_(1)
 | 
			
		||||
                m.bias.data.zero_()
 | 
			
		||||
            elif isinstance(m, nn.InstanceNorm2d):
 | 
			
		||||
                m.weight.data.fill_(1)
 | 
			
		||||
                m.bias.data.zero_()
 | 
			
		||||
 | 
			
		||||
    def _make_layer(self, block, planes, blocks, stride=1, IN=False):
 | 
			
		||||
        downsample = None
 | 
			
		||||
        if stride != 1 or self.inplanes != planes * block.expansion:
 | 
			
		||||
            downsample = nn.Sequential(
 | 
			
		||||
                nn.Conv2d(
 | 
			
		||||
                    self.inplanes,
 | 
			
		||||
                    planes * block.expansion,
 | 
			
		||||
                    kernel_size=1,
 | 
			
		||||
                    stride=stride,
 | 
			
		||||
                    bias=False
 | 
			
		||||
                ),
 | 
			
		||||
                nn.BatchNorm2d(planes * block.expansion),
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        layers.append(block(self.inplanes, planes, stride, downsample))
 | 
			
		||||
        self.inplanes = planes * block.expansion
 | 
			
		||||
        for i in range(1, blocks - 1):
 | 
			
		||||
            layers.append(block(self.inplanes, planes))
 | 
			
		||||
        layers.append(block(self.inplanes, planes, IN=IN))
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
 | 
			
		||||
        """Constructs fully connected layer
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
 | 
			
		||||
            input_dim (int): input dimension
 | 
			
		||||
            dropout_p (float): dropout probability, if None, dropout is unused
 | 
			
		||||
        """
 | 
			
		||||
        if fc_dims is None:
 | 
			
		||||
            self.feature_dim = input_dim
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        assert isinstance(
 | 
			
		||||
            fc_dims, (list, tuple)
 | 
			
		||||
        ), 'fc_dims must be either list or tuple, but got {}'.format(
 | 
			
		||||
            type(fc_dims)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        for dim in fc_dims:
 | 
			
		||||
            layers.append(nn.Linear(input_dim, dim))
 | 
			
		||||
            layers.append(nn.BatchNorm1d(dim))
 | 
			
		||||
            layers.append(nn.ReLU(inplace=True))
 | 
			
		||||
            if dropout_p is not None:
 | 
			
		||||
                layers.append(nn.Dropout(p=dropout_p))
 | 
			
		||||
            input_dim = dim
 | 
			
		||||
 | 
			
		||||
        self.feature_dim = fc_dims[-1]
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.bn1(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        x = self.maxpool(x)
 | 
			
		||||
        x = self.layer1(x)
 | 
			
		||||
        x = self.layer2(x)
 | 
			
		||||
        x = self.layer3(x)
 | 
			
		||||
        x = self.layer4(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.featuremaps(x)
 | 
			
		||||
        v = self.avgpool(f)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
        if self.fc is not None:
 | 
			
		||||
            v = self.fc(v)
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError("Unsupported loss: {}".format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resnet50_ibn_b(num_classes, loss='softmax', pretrained=False, **kwargs):
 | 
			
		||||
    model = ResNet(
 | 
			
		||||
        Bottleneck, [3, 4, 6, 3], num_classes=num_classes, loss=loss, **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnet50'])
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										307
									
								
								feeder/trackers/strongsort/deep/models/resnetmid.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										307
									
								
								feeder/trackers/strongsort/deep/models/resnetmid.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,307 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import torch
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
from torch import nn
 | 
			
		||||
 | 
			
		||||
__all__ = ['resnet50mid']
 | 
			
		||||
 | 
			
		||||
model_urls = {
 | 
			
		||||
    'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
 | 
			
		||||
    'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
 | 
			
		||||
    'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
 | 
			
		||||
    'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
 | 
			
		||||
    'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def conv3x3(in_planes, out_planes, stride=1):
 | 
			
		||||
    """3x3 convolution with padding"""
 | 
			
		||||
    return nn.Conv2d(
 | 
			
		||||
        in_planes,
 | 
			
		||||
        out_planes,
 | 
			
		||||
        kernel_size=3,
 | 
			
		||||
        stride=stride,
 | 
			
		||||
        padding=1,
 | 
			
		||||
        bias=False
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BasicBlock(nn.Module):
 | 
			
		||||
    expansion = 1
 | 
			
		||||
 | 
			
		||||
    def __init__(self, inplanes, planes, stride=1, downsample=None):
 | 
			
		||||
        super(BasicBlock, self).__init__()
 | 
			
		||||
        self.conv1 = conv3x3(inplanes, planes, stride)
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.conv2 = conv3x3(planes, planes)
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        residual = x
 | 
			
		||||
 | 
			
		||||
        out = self.conv1(x)
 | 
			
		||||
        out = self.bn1(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv2(out)
 | 
			
		||||
        out = self.bn2(out)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            residual = self.downsample(x)
 | 
			
		||||
 | 
			
		||||
        out += residual
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Bottleneck(nn.Module):
 | 
			
		||||
    expansion = 4
 | 
			
		||||
 | 
			
		||||
    def __init__(self, inplanes, planes, stride=1, downsample=None):
 | 
			
		||||
        super(Bottleneck, self).__init__()
 | 
			
		||||
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.conv2 = nn.Conv2d(
 | 
			
		||||
            planes,
 | 
			
		||||
            planes,
 | 
			
		||||
            kernel_size=3,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.conv3 = nn.Conv2d(
 | 
			
		||||
            planes, planes * self.expansion, kernel_size=1, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        residual = x
 | 
			
		||||
 | 
			
		||||
        out = self.conv1(x)
 | 
			
		||||
        out = self.bn1(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv2(out)
 | 
			
		||||
        out = self.bn2(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv3(out)
 | 
			
		||||
        out = self.bn3(out)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            residual = self.downsample(x)
 | 
			
		||||
 | 
			
		||||
        out += residual
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ResNetMid(nn.Module):
 | 
			
		||||
    """Residual network + mid-level features.
 | 
			
		||||
    
 | 
			
		||||
    Reference:
 | 
			
		||||
        Yu et al. The Devil is in the Middle: Exploiting Mid-level Representations for
 | 
			
		||||
        Cross-Domain Instance Matching. arXiv:1711.08106.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``resnet50mid``: ResNet50 + mid-level feature fusion.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        num_classes,
 | 
			
		||||
        loss,
 | 
			
		||||
        block,
 | 
			
		||||
        layers,
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        self.inplanes = 64
 | 
			
		||||
        super(ResNetMid, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
        self.feature_dim = 512 * block.expansion
 | 
			
		||||
 | 
			
		||||
        # backbone network
 | 
			
		||||
        self.conv1 = nn.Conv2d(
 | 
			
		||||
            3, 64, kernel_size=7, stride=2, padding=3, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(64)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
 | 
			
		||||
        self.layer1 = self._make_layer(block, 64, layers[0])
 | 
			
		||||
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
 | 
			
		||||
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
 | 
			
		||||
        self.layer4 = self._make_layer(
 | 
			
		||||
            block, 512, layers[3], stride=last_stride
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        assert fc_dims is not None
 | 
			
		||||
        self.fc_fusion = self._construct_fc_layer(
 | 
			
		||||
            fc_dims, 512 * block.expansion * 2
 | 
			
		||||
        )
 | 
			
		||||
        self.feature_dim += 512 * block.expansion
 | 
			
		||||
        self.classifier = nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        self._init_params()
 | 
			
		||||
 | 
			
		||||
    def _make_layer(self, block, planes, blocks, stride=1):
 | 
			
		||||
        downsample = None
 | 
			
		||||
        if stride != 1 or self.inplanes != planes * block.expansion:
 | 
			
		||||
            downsample = nn.Sequential(
 | 
			
		||||
                nn.Conv2d(
 | 
			
		||||
                    self.inplanes,
 | 
			
		||||
                    planes * block.expansion,
 | 
			
		||||
                    kernel_size=1,
 | 
			
		||||
                    stride=stride,
 | 
			
		||||
                    bias=False
 | 
			
		||||
                ),
 | 
			
		||||
                nn.BatchNorm2d(planes * block.expansion),
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        layers.append(block(self.inplanes, planes, stride, downsample))
 | 
			
		||||
        self.inplanes = planes * block.expansion
 | 
			
		||||
        for i in range(1, blocks):
 | 
			
		||||
            layers.append(block(self.inplanes, planes))
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
 | 
			
		||||
        """Constructs fully connected layer
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
 | 
			
		||||
            input_dim (int): input dimension
 | 
			
		||||
            dropout_p (float): dropout probability, if None, dropout is unused
 | 
			
		||||
        """
 | 
			
		||||
        if fc_dims is None:
 | 
			
		||||
            self.feature_dim = input_dim
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        assert isinstance(
 | 
			
		||||
            fc_dims, (list, tuple)
 | 
			
		||||
        ), 'fc_dims must be either list or tuple, but got {}'.format(
 | 
			
		||||
            type(fc_dims)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        for dim in fc_dims:
 | 
			
		||||
            layers.append(nn.Linear(input_dim, dim))
 | 
			
		||||
            layers.append(nn.BatchNorm1d(dim))
 | 
			
		||||
            layers.append(nn.ReLU(inplace=True))
 | 
			
		||||
            if dropout_p is not None:
 | 
			
		||||
                layers.append(nn.Dropout(p=dropout_p))
 | 
			
		||||
            input_dim = dim
 | 
			
		||||
 | 
			
		||||
        self.feature_dim = fc_dims[-1]
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _init_params(self):
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                nn.init.kaiming_normal_(
 | 
			
		||||
                    m.weight, mode='fan_out', nonlinearity='relu'
 | 
			
		||||
                )
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm1d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.Linear):
 | 
			
		||||
                nn.init.normal_(m.weight, 0, 0.01)
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.bn1(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        x = self.maxpool(x)
 | 
			
		||||
        x = self.layer1(x)
 | 
			
		||||
        x = self.layer2(x)
 | 
			
		||||
        x = self.layer3(x)
 | 
			
		||||
        x4a = self.layer4[0](x)
 | 
			
		||||
        x4b = self.layer4[1](x4a)
 | 
			
		||||
        x4c = self.layer4[2](x4b)
 | 
			
		||||
        return x4a, x4b, x4c
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x4a, x4b, x4c = self.featuremaps(x)
 | 
			
		||||
 | 
			
		||||
        v4a = self.global_avgpool(x4a)
 | 
			
		||||
        v4b = self.global_avgpool(x4b)
 | 
			
		||||
        v4c = self.global_avgpool(x4c)
 | 
			
		||||
        v4ab = torch.cat([v4a, v4b], 1)
 | 
			
		||||
        v4ab = v4ab.view(v4ab.size(0), -1)
 | 
			
		||||
        v4ab = self.fc_fusion(v4ab)
 | 
			
		||||
        v4c = v4c.view(v4c.size(0), -1)
 | 
			
		||||
        v = torch.cat([v4ab, v4c], 1)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError('Unsupported loss: {}'.format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
Residual network configurations:
 | 
			
		||||
--
 | 
			
		||||
resnet18: block=BasicBlock, layers=[2, 2, 2, 2]
 | 
			
		||||
resnet34: block=BasicBlock, layers=[3, 4, 6, 3]
 | 
			
		||||
resnet50: block=Bottleneck, layers=[3, 4, 6, 3]
 | 
			
		||||
resnet101: block=Bottleneck, layers=[3, 4, 23, 3]
 | 
			
		||||
resnet152: block=Bottleneck, layers=[3, 8, 36, 3]
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resnet50mid(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ResNetMid(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=Bottleneck,
 | 
			
		||||
        layers=[3, 4, 6, 3],
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=[1024],
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['resnet50'])
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										688
									
								
								feeder/trackers/strongsort/deep/models/senet.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										688
									
								
								feeder/trackers/strongsort/deep/models/senet.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,688 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import math
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
from torch.utils import model_zoo
 | 
			
		||||
 | 
			
		||||
__all__ = [
 | 
			
		||||
    'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152',
 | 
			
		||||
    'se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnet50_fc512'
 | 
			
		||||
]
 | 
			
		||||
"""
 | 
			
		||||
Code imported from https://github.com/Cadene/pretrained-models.pytorch
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
pretrained_settings = {
 | 
			
		||||
    'senet154': {
 | 
			
		||||
        'imagenet': {
 | 
			
		||||
            'url':
 | 
			
		||||
            'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth',
 | 
			
		||||
            'input_space': 'RGB',
 | 
			
		||||
            'input_size': [3, 224, 224],
 | 
			
		||||
            'input_range': [0, 1],
 | 
			
		||||
            'mean': [0.485, 0.456, 0.406],
 | 
			
		||||
            'std': [0.229, 0.224, 0.225],
 | 
			
		||||
            'num_classes': 1000
 | 
			
		||||
        }
 | 
			
		||||
    },
 | 
			
		||||
    'se_resnet50': {
 | 
			
		||||
        'imagenet': {
 | 
			
		||||
            'url':
 | 
			
		||||
            'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth',
 | 
			
		||||
            'input_space': 'RGB',
 | 
			
		||||
            'input_size': [3, 224, 224],
 | 
			
		||||
            'input_range': [0, 1],
 | 
			
		||||
            'mean': [0.485, 0.456, 0.406],
 | 
			
		||||
            'std': [0.229, 0.224, 0.225],
 | 
			
		||||
            'num_classes': 1000
 | 
			
		||||
        }
 | 
			
		||||
    },
 | 
			
		||||
    'se_resnet101': {
 | 
			
		||||
        'imagenet': {
 | 
			
		||||
            'url':
 | 
			
		||||
            'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth',
 | 
			
		||||
            'input_space': 'RGB',
 | 
			
		||||
            'input_size': [3, 224, 224],
 | 
			
		||||
            'input_range': [0, 1],
 | 
			
		||||
            'mean': [0.485, 0.456, 0.406],
 | 
			
		||||
            'std': [0.229, 0.224, 0.225],
 | 
			
		||||
            'num_classes': 1000
 | 
			
		||||
        }
 | 
			
		||||
    },
 | 
			
		||||
    'se_resnet152': {
 | 
			
		||||
        'imagenet': {
 | 
			
		||||
            'url':
 | 
			
		||||
            'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth',
 | 
			
		||||
            'input_space': 'RGB',
 | 
			
		||||
            'input_size': [3, 224, 224],
 | 
			
		||||
            'input_range': [0, 1],
 | 
			
		||||
            'mean': [0.485, 0.456, 0.406],
 | 
			
		||||
            'std': [0.229, 0.224, 0.225],
 | 
			
		||||
            'num_classes': 1000
 | 
			
		||||
        }
 | 
			
		||||
    },
 | 
			
		||||
    'se_resnext50_32x4d': {
 | 
			
		||||
        'imagenet': {
 | 
			
		||||
            'url':
 | 
			
		||||
            'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth',
 | 
			
		||||
            'input_space': 'RGB',
 | 
			
		||||
            'input_size': [3, 224, 224],
 | 
			
		||||
            'input_range': [0, 1],
 | 
			
		||||
            'mean': [0.485, 0.456, 0.406],
 | 
			
		||||
            'std': [0.229, 0.224, 0.225],
 | 
			
		||||
            'num_classes': 1000
 | 
			
		||||
        }
 | 
			
		||||
    },
 | 
			
		||||
    'se_resnext101_32x4d': {
 | 
			
		||||
        'imagenet': {
 | 
			
		||||
            'url':
 | 
			
		||||
            'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth',
 | 
			
		||||
            'input_space': 'RGB',
 | 
			
		||||
            'input_size': [3, 224, 224],
 | 
			
		||||
            'input_range': [0, 1],
 | 
			
		||||
            'mean': [0.485, 0.456, 0.406],
 | 
			
		||||
            'std': [0.229, 0.224, 0.225],
 | 
			
		||||
            'num_classes': 1000
 | 
			
		||||
        }
 | 
			
		||||
    },
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SEModule(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, channels, reduction):
 | 
			
		||||
        super(SEModule, self).__init__()
 | 
			
		||||
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        self.fc1 = nn.Conv2d(
 | 
			
		||||
            channels, channels // reduction, kernel_size=1, padding=0
 | 
			
		||||
        )
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.fc2 = nn.Conv2d(
 | 
			
		||||
            channels // reduction, channels, kernel_size=1, padding=0
 | 
			
		||||
        )
 | 
			
		||||
        self.sigmoid = nn.Sigmoid()
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        module_input = x
 | 
			
		||||
        x = self.avg_pool(x)
 | 
			
		||||
        x = self.fc1(x)
 | 
			
		||||
        x = self.relu(x)
 | 
			
		||||
        x = self.fc2(x)
 | 
			
		||||
        x = self.sigmoid(x)
 | 
			
		||||
        return module_input * x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Bottleneck(nn.Module):
 | 
			
		||||
    """
 | 
			
		||||
    Base class for bottlenecks that implements `forward()` method.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        residual = x
 | 
			
		||||
 | 
			
		||||
        out = self.conv1(x)
 | 
			
		||||
        out = self.bn1(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv2(out)
 | 
			
		||||
        out = self.bn2(out)
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        out = self.conv3(out)
 | 
			
		||||
        out = self.bn3(out)
 | 
			
		||||
 | 
			
		||||
        if self.downsample is not None:
 | 
			
		||||
            residual = self.downsample(x)
 | 
			
		||||
 | 
			
		||||
        out = self.se_module(out) + residual
 | 
			
		||||
        out = self.relu(out)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SEBottleneck(Bottleneck):
 | 
			
		||||
    """
 | 
			
		||||
    Bottleneck for SENet154.
 | 
			
		||||
    """
 | 
			
		||||
    expansion = 4
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self, inplanes, planes, groups, reduction, stride=1, downsample=None
 | 
			
		||||
    ):
 | 
			
		||||
        super(SEBottleneck, self).__init__()
 | 
			
		||||
        self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(planes * 2)
 | 
			
		||||
        self.conv2 = nn.Conv2d(
 | 
			
		||||
            planes * 2,
 | 
			
		||||
            planes * 4,
 | 
			
		||||
            kernel_size=3,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            groups=groups,
 | 
			
		||||
            bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(planes * 4)
 | 
			
		||||
        self.conv3 = nn.Conv2d(
 | 
			
		||||
            planes * 4, planes * 4, kernel_size=1, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn3 = nn.BatchNorm2d(planes * 4)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.se_module = SEModule(planes * 4, reduction=reduction)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SEResNetBottleneck(Bottleneck):
 | 
			
		||||
    """
 | 
			
		||||
    ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
 | 
			
		||||
    implementation and uses `stride=stride` in `conv1` and not in `conv2`
 | 
			
		||||
    (the latter is used in the torchvision implementation of ResNet).
 | 
			
		||||
    """
 | 
			
		||||
    expansion = 4
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self, inplanes, planes, groups, reduction, stride=1, downsample=None
 | 
			
		||||
    ):
 | 
			
		||||
        super(SEResNetBottleneck, self).__init__()
 | 
			
		||||
        self.conv1 = nn.Conv2d(
 | 
			
		||||
            inplanes, planes, kernel_size=1, bias=False, stride=stride
 | 
			
		||||
        )
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.conv2 = nn.Conv2d(
 | 
			
		||||
            planes,
 | 
			
		||||
            planes,
 | 
			
		||||
            kernel_size=3,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            groups=groups,
 | 
			
		||||
            bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(planes)
 | 
			
		||||
        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
 | 
			
		||||
        self.bn3 = nn.BatchNorm2d(planes * 4)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.se_module = SEModule(planes * 4, reduction=reduction)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SEResNeXtBottleneck(Bottleneck):
 | 
			
		||||
    """ResNeXt bottleneck type C with a Squeeze-and-Excitation module"""
 | 
			
		||||
    expansion = 4
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        inplanes,
 | 
			
		||||
        planes,
 | 
			
		||||
        groups,
 | 
			
		||||
        reduction,
 | 
			
		||||
        stride=1,
 | 
			
		||||
        downsample=None,
 | 
			
		||||
        base_width=4
 | 
			
		||||
    ):
 | 
			
		||||
        super(SEResNeXtBottleneck, self).__init__()
 | 
			
		||||
        width = int(math.floor(planes * (base_width/64.)) * groups)
 | 
			
		||||
        self.conv1 = nn.Conv2d(
 | 
			
		||||
            inplanes, width, kernel_size=1, bias=False, stride=1
 | 
			
		||||
        )
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(width)
 | 
			
		||||
        self.conv2 = nn.Conv2d(
 | 
			
		||||
            width,
 | 
			
		||||
            width,
 | 
			
		||||
            kernel_size=3,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            groups=groups,
 | 
			
		||||
            bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(width)
 | 
			
		||||
        self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
 | 
			
		||||
        self.bn3 = nn.BatchNorm2d(planes * 4)
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        self.se_module = SEModule(planes * 4, reduction=reduction)
 | 
			
		||||
        self.downsample = downsample
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SENet(nn.Module):
 | 
			
		||||
    """Squeeze-and-excitation network.
 | 
			
		||||
    
 | 
			
		||||
    Reference:
 | 
			
		||||
        Hu et al. Squeeze-and-Excitation Networks. CVPR 2018.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``senet154``: SENet154.
 | 
			
		||||
        - ``se_resnet50``: ResNet50 + SE.
 | 
			
		||||
        - ``se_resnet101``: ResNet101 + SE.
 | 
			
		||||
        - ``se_resnet152``: ResNet152 + SE.
 | 
			
		||||
        - ``se_resnext50_32x4d``: ResNeXt50 (groups=32, width=4) + SE.
 | 
			
		||||
        - ``se_resnext101_32x4d``: ResNeXt101 (groups=32, width=4) + SE.
 | 
			
		||||
        - ``se_resnet50_fc512``: (ResNet50 + SE) + FC.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        num_classes,
 | 
			
		||||
        loss,
 | 
			
		||||
        block,
 | 
			
		||||
        layers,
 | 
			
		||||
        groups,
 | 
			
		||||
        reduction,
 | 
			
		||||
        dropout_p=0.2,
 | 
			
		||||
        inplanes=128,
 | 
			
		||||
        input_3x3=True,
 | 
			
		||||
        downsample_kernel_size=3,
 | 
			
		||||
        downsample_padding=1,
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        """
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        block (nn.Module): Bottleneck class.
 | 
			
		||||
            - For SENet154: SEBottleneck
 | 
			
		||||
            - For SE-ResNet models: SEResNetBottleneck
 | 
			
		||||
            - For SE-ResNeXt models:  SEResNeXtBottleneck
 | 
			
		||||
        layers (list of ints): Number of residual blocks for 4 layers of the
 | 
			
		||||
            network (layer1...layer4).
 | 
			
		||||
        groups (int): Number of groups for the 3x3 convolution in each
 | 
			
		||||
            bottleneck block.
 | 
			
		||||
            - For SENet154: 64
 | 
			
		||||
            - For SE-ResNet models: 1
 | 
			
		||||
            - For SE-ResNeXt models:  32
 | 
			
		||||
        reduction (int): Reduction ratio for Squeeze-and-Excitation modules.
 | 
			
		||||
            - For all models: 16
 | 
			
		||||
        dropout_p (float or None): Drop probability for the Dropout layer.
 | 
			
		||||
            If `None` the Dropout layer is not used.
 | 
			
		||||
            - For SENet154: 0.2
 | 
			
		||||
            - For SE-ResNet models: None
 | 
			
		||||
            - For SE-ResNeXt models: None
 | 
			
		||||
        inplanes (int):  Number of input channels for layer1.
 | 
			
		||||
            - For SENet154: 128
 | 
			
		||||
            - For SE-ResNet models: 64
 | 
			
		||||
            - For SE-ResNeXt models: 64
 | 
			
		||||
        input_3x3 (bool): If `True`, use three 3x3 convolutions instead of
 | 
			
		||||
            a single 7x7 convolution in layer0.
 | 
			
		||||
            - For SENet154: True
 | 
			
		||||
            - For SE-ResNet models: False
 | 
			
		||||
            - For SE-ResNeXt models: False
 | 
			
		||||
        downsample_kernel_size (int): Kernel size for downsampling convolutions
 | 
			
		||||
            in layer2, layer3 and layer4.
 | 
			
		||||
            - For SENet154: 3
 | 
			
		||||
            - For SE-ResNet models: 1
 | 
			
		||||
            - For SE-ResNeXt models: 1
 | 
			
		||||
        downsample_padding (int): Padding for downsampling convolutions in
 | 
			
		||||
            layer2, layer3 and layer4.
 | 
			
		||||
            - For SENet154: 1
 | 
			
		||||
            - For SE-ResNet models: 0
 | 
			
		||||
            - For SE-ResNeXt models: 0
 | 
			
		||||
        num_classes (int): Number of outputs in `classifier` layer.
 | 
			
		||||
        """
 | 
			
		||||
        super(SENet, self).__init__()
 | 
			
		||||
        self.inplanes = inplanes
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
 | 
			
		||||
        if input_3x3:
 | 
			
		||||
            layer0_modules = [
 | 
			
		||||
                (
 | 
			
		||||
                    'conv1',
 | 
			
		||||
                    nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False)
 | 
			
		||||
                ),
 | 
			
		||||
                ('bn1', nn.BatchNorm2d(64)),
 | 
			
		||||
                ('relu1', nn.ReLU(inplace=True)),
 | 
			
		||||
                (
 | 
			
		||||
                    'conv2',
 | 
			
		||||
                    nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)
 | 
			
		||||
                ),
 | 
			
		||||
                ('bn2', nn.BatchNorm2d(64)),
 | 
			
		||||
                ('relu2', nn.ReLU(inplace=True)),
 | 
			
		||||
                (
 | 
			
		||||
                    'conv3',
 | 
			
		||||
                    nn.Conv2d(
 | 
			
		||||
                        64, inplanes, 3, stride=1, padding=1, bias=False
 | 
			
		||||
                    )
 | 
			
		||||
                ),
 | 
			
		||||
                ('bn3', nn.BatchNorm2d(inplanes)),
 | 
			
		||||
                ('relu3', nn.ReLU(inplace=True)),
 | 
			
		||||
            ]
 | 
			
		||||
        else:
 | 
			
		||||
            layer0_modules = [
 | 
			
		||||
                (
 | 
			
		||||
                    'conv1',
 | 
			
		||||
                    nn.Conv2d(
 | 
			
		||||
                        3,
 | 
			
		||||
                        inplanes,
 | 
			
		||||
                        kernel_size=7,
 | 
			
		||||
                        stride=2,
 | 
			
		||||
                        padding=3,
 | 
			
		||||
                        bias=False
 | 
			
		||||
                    )
 | 
			
		||||
                ),
 | 
			
		||||
                ('bn1', nn.BatchNorm2d(inplanes)),
 | 
			
		||||
                ('relu1', nn.ReLU(inplace=True)),
 | 
			
		||||
            ]
 | 
			
		||||
        # To preserve compatibility with Caffe weights `ceil_mode=True`
 | 
			
		||||
        # is used instead of `padding=1`.
 | 
			
		||||
        layer0_modules.append(
 | 
			
		||||
            ('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True))
 | 
			
		||||
        )
 | 
			
		||||
        self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
 | 
			
		||||
        self.layer1 = self._make_layer(
 | 
			
		||||
            block,
 | 
			
		||||
            planes=64,
 | 
			
		||||
            blocks=layers[0],
 | 
			
		||||
            groups=groups,
 | 
			
		||||
            reduction=reduction,
 | 
			
		||||
            downsample_kernel_size=1,
 | 
			
		||||
            downsample_padding=0
 | 
			
		||||
        )
 | 
			
		||||
        self.layer2 = self._make_layer(
 | 
			
		||||
            block,
 | 
			
		||||
            planes=128,
 | 
			
		||||
            blocks=layers[1],
 | 
			
		||||
            stride=2,
 | 
			
		||||
            groups=groups,
 | 
			
		||||
            reduction=reduction,
 | 
			
		||||
            downsample_kernel_size=downsample_kernel_size,
 | 
			
		||||
            downsample_padding=downsample_padding
 | 
			
		||||
        )
 | 
			
		||||
        self.layer3 = self._make_layer(
 | 
			
		||||
            block,
 | 
			
		||||
            planes=256,
 | 
			
		||||
            blocks=layers[2],
 | 
			
		||||
            stride=2,
 | 
			
		||||
            groups=groups,
 | 
			
		||||
            reduction=reduction,
 | 
			
		||||
            downsample_kernel_size=downsample_kernel_size,
 | 
			
		||||
            downsample_padding=downsample_padding
 | 
			
		||||
        )
 | 
			
		||||
        self.layer4 = self._make_layer(
 | 
			
		||||
            block,
 | 
			
		||||
            planes=512,
 | 
			
		||||
            blocks=layers[3],
 | 
			
		||||
            stride=last_stride,
 | 
			
		||||
            groups=groups,
 | 
			
		||||
            reduction=reduction,
 | 
			
		||||
            downsample_kernel_size=downsample_kernel_size,
 | 
			
		||||
            downsample_padding=downsample_padding
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        self.fc = self._construct_fc_layer(
 | 
			
		||||
            fc_dims, 512 * block.expansion, dropout_p
 | 
			
		||||
        )
 | 
			
		||||
        self.classifier = nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
    def _make_layer(
 | 
			
		||||
        self,
 | 
			
		||||
        block,
 | 
			
		||||
        planes,
 | 
			
		||||
        blocks,
 | 
			
		||||
        groups,
 | 
			
		||||
        reduction,
 | 
			
		||||
        stride=1,
 | 
			
		||||
        downsample_kernel_size=1,
 | 
			
		||||
        downsample_padding=0
 | 
			
		||||
    ):
 | 
			
		||||
        downsample = None
 | 
			
		||||
        if stride != 1 or self.inplanes != planes * block.expansion:
 | 
			
		||||
            downsample = nn.Sequential(
 | 
			
		||||
                nn.Conv2d(
 | 
			
		||||
                    self.inplanes,
 | 
			
		||||
                    planes * block.expansion,
 | 
			
		||||
                    kernel_size=downsample_kernel_size,
 | 
			
		||||
                    stride=stride,
 | 
			
		||||
                    padding=downsample_padding,
 | 
			
		||||
                    bias=False
 | 
			
		||||
                ),
 | 
			
		||||
                nn.BatchNorm2d(planes * block.expansion),
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        layers.append(
 | 
			
		||||
            block(
 | 
			
		||||
                self.inplanes, planes, groups, reduction, stride, downsample
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
        self.inplanes = planes * block.expansion
 | 
			
		||||
        for i in range(1, blocks):
 | 
			
		||||
            layers.append(block(self.inplanes, planes, groups, reduction))
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
 | 
			
		||||
        """
 | 
			
		||||
        Construct fully connected layer
 | 
			
		||||
 | 
			
		||||
        - fc_dims (list or tuple): dimensions of fc layers, if None,
 | 
			
		||||
                                   no fc layers are constructed
 | 
			
		||||
        - input_dim (int): input dimension
 | 
			
		||||
        - dropout_p (float): dropout probability, if None, dropout is unused
 | 
			
		||||
        """
 | 
			
		||||
        if fc_dims is None:
 | 
			
		||||
            self.feature_dim = input_dim
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        assert isinstance(
 | 
			
		||||
            fc_dims, (list, tuple)
 | 
			
		||||
        ), 'fc_dims must be either list or tuple, but got {}'.format(
 | 
			
		||||
            type(fc_dims)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        for dim in fc_dims:
 | 
			
		||||
            layers.append(nn.Linear(input_dim, dim))
 | 
			
		||||
            layers.append(nn.BatchNorm1d(dim))
 | 
			
		||||
            layers.append(nn.ReLU(inplace=True))
 | 
			
		||||
            if dropout_p is not None:
 | 
			
		||||
                layers.append(nn.Dropout(p=dropout_p))
 | 
			
		||||
            input_dim = dim
 | 
			
		||||
 | 
			
		||||
        self.feature_dim = fc_dims[-1]
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.layer0(x)
 | 
			
		||||
        x = self.layer1(x)
 | 
			
		||||
        x = self.layer2(x)
 | 
			
		||||
        x = self.layer3(x)
 | 
			
		||||
        x = self.layer4(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.featuremaps(x)
 | 
			
		||||
        v = self.global_avgpool(f)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        if self.fc is not None:
 | 
			
		||||
            v = self.fc(v)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError("Unsupported loss: {}".format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def senet154(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = SENet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=SEBottleneck,
 | 
			
		||||
        layers=[3, 8, 36, 3],
 | 
			
		||||
        groups=64,
 | 
			
		||||
        reduction=16,
 | 
			
		||||
        dropout_p=0.2,
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        model_url = pretrained_settings['senet154']['imagenet']['url']
 | 
			
		||||
        init_pretrained_weights(model, model_url)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def se_resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = SENet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=SEResNetBottleneck,
 | 
			
		||||
        layers=[3, 4, 6, 3],
 | 
			
		||||
        groups=1,
 | 
			
		||||
        reduction=16,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        inplanes=64,
 | 
			
		||||
        input_3x3=False,
 | 
			
		||||
        downsample_kernel_size=1,
 | 
			
		||||
        downsample_padding=0,
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        model_url = pretrained_settings['se_resnet50']['imagenet']['url']
 | 
			
		||||
        init_pretrained_weights(model, model_url)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def se_resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = SENet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=SEResNetBottleneck,
 | 
			
		||||
        layers=[3, 4, 6, 3],
 | 
			
		||||
        groups=1,
 | 
			
		||||
        reduction=16,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        inplanes=64,
 | 
			
		||||
        input_3x3=False,
 | 
			
		||||
        downsample_kernel_size=1,
 | 
			
		||||
        downsample_padding=0,
 | 
			
		||||
        last_stride=1,
 | 
			
		||||
        fc_dims=[512],
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        model_url = pretrained_settings['se_resnet50']['imagenet']['url']
 | 
			
		||||
        init_pretrained_weights(model, model_url)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def se_resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = SENet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=SEResNetBottleneck,
 | 
			
		||||
        layers=[3, 4, 23, 3],
 | 
			
		||||
        groups=1,
 | 
			
		||||
        reduction=16,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        inplanes=64,
 | 
			
		||||
        input_3x3=False,
 | 
			
		||||
        downsample_kernel_size=1,
 | 
			
		||||
        downsample_padding=0,
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        model_url = pretrained_settings['se_resnet101']['imagenet']['url']
 | 
			
		||||
        init_pretrained_weights(model, model_url)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def se_resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = SENet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=SEResNetBottleneck,
 | 
			
		||||
        layers=[3, 8, 36, 3],
 | 
			
		||||
        groups=1,
 | 
			
		||||
        reduction=16,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        inplanes=64,
 | 
			
		||||
        input_3x3=False,
 | 
			
		||||
        downsample_kernel_size=1,
 | 
			
		||||
        downsample_padding=0,
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        model_url = pretrained_settings['se_resnet152']['imagenet']['url']
 | 
			
		||||
        init_pretrained_weights(model, model_url)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def se_resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = SENet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=SEResNeXtBottleneck,
 | 
			
		||||
        layers=[3, 4, 6, 3],
 | 
			
		||||
        groups=32,
 | 
			
		||||
        reduction=16,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        inplanes=64,
 | 
			
		||||
        input_3x3=False,
 | 
			
		||||
        downsample_kernel_size=1,
 | 
			
		||||
        downsample_padding=0,
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        model_url = pretrained_settings['se_resnext50_32x4d']['imagenet']['url'
 | 
			
		||||
                                                                          ]
 | 
			
		||||
        init_pretrained_weights(model, model_url)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def se_resnext101_32x4d(
 | 
			
		||||
    num_classes, loss='softmax', pretrained=True, **kwargs
 | 
			
		||||
):
 | 
			
		||||
    model = SENet(
 | 
			
		||||
        num_classes=num_classes,
 | 
			
		||||
        loss=loss,
 | 
			
		||||
        block=SEResNeXtBottleneck,
 | 
			
		||||
        layers=[3, 4, 23, 3],
 | 
			
		||||
        groups=32,
 | 
			
		||||
        reduction=16,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        inplanes=64,
 | 
			
		||||
        input_3x3=False,
 | 
			
		||||
        downsample_kernel_size=1,
 | 
			
		||||
        downsample_padding=0,
 | 
			
		||||
        last_stride=2,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        model_url = pretrained_settings['se_resnext101_32x4d']['imagenet'][
 | 
			
		||||
            'url']
 | 
			
		||||
        init_pretrained_weights(model, model_url)
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										198
									
								
								feeder/trackers/strongsort/deep/models/shufflenet.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										198
									
								
								feeder/trackers/strongsort/deep/models/shufflenet.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,198 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import torch
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
from torch import nn
 | 
			
		||||
from torch.nn import functional as F
 | 
			
		||||
 | 
			
		||||
__all__ = ['shufflenet']
 | 
			
		||||
 | 
			
		||||
model_urls = {
 | 
			
		||||
    # training epoch = 90, top1 = 61.8
 | 
			
		||||
    'imagenet':
 | 
			
		||||
    'https://mega.nz/#!RDpUlQCY!tr_5xBEkelzDjveIYBBcGcovNCOrgfiJO9kiidz9fZM',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ChannelShuffle(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, num_groups):
 | 
			
		||||
        super(ChannelShuffle, self).__init__()
 | 
			
		||||
        self.g = num_groups
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        b, c, h, w = x.size()
 | 
			
		||||
        n = c // self.g
 | 
			
		||||
        # reshape
 | 
			
		||||
        x = x.view(b, self.g, n, h, w)
 | 
			
		||||
        # transpose
 | 
			
		||||
        x = x.permute(0, 2, 1, 3, 4).contiguous()
 | 
			
		||||
        # flatten
 | 
			
		||||
        x = x.view(b, c, h, w)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Bottleneck(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        in_channels,
 | 
			
		||||
        out_channels,
 | 
			
		||||
        stride,
 | 
			
		||||
        num_groups,
 | 
			
		||||
        group_conv1x1=True
 | 
			
		||||
    ):
 | 
			
		||||
        super(Bottleneck, self).__init__()
 | 
			
		||||
        assert stride in [1, 2], 'Warning: stride must be either 1 or 2'
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
        mid_channels = out_channels // 4
 | 
			
		||||
        if stride == 2:
 | 
			
		||||
            out_channels -= in_channels
 | 
			
		||||
        # group conv is not applied to first conv1x1 at stage 2
 | 
			
		||||
        num_groups_conv1x1 = num_groups if group_conv1x1 else 1
 | 
			
		||||
        self.conv1 = nn.Conv2d(
 | 
			
		||||
            in_channels,
 | 
			
		||||
            mid_channels,
 | 
			
		||||
            1,
 | 
			
		||||
            groups=num_groups_conv1x1,
 | 
			
		||||
            bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(mid_channels)
 | 
			
		||||
        self.shuffle1 = ChannelShuffle(num_groups)
 | 
			
		||||
        self.conv2 = nn.Conv2d(
 | 
			
		||||
            mid_channels,
 | 
			
		||||
            mid_channels,
 | 
			
		||||
            3,
 | 
			
		||||
            stride=stride,
 | 
			
		||||
            padding=1,
 | 
			
		||||
            groups=mid_channels,
 | 
			
		||||
            bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(mid_channels)
 | 
			
		||||
        self.conv3 = nn.Conv2d(
 | 
			
		||||
            mid_channels, out_channels, 1, groups=num_groups, bias=False
 | 
			
		||||
        )
 | 
			
		||||
        self.bn3 = nn.BatchNorm2d(out_channels)
 | 
			
		||||
        if stride == 2:
 | 
			
		||||
            self.shortcut = nn.AvgPool2d(3, stride=2, padding=1)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        out = F.relu(self.bn1(self.conv1(x)))
 | 
			
		||||
        out = self.shuffle1(out)
 | 
			
		||||
        out = self.bn2(self.conv2(out))
 | 
			
		||||
        out = self.bn3(self.conv3(out))
 | 
			
		||||
        if self.stride == 2:
 | 
			
		||||
            res = self.shortcut(x)
 | 
			
		||||
            out = F.relu(torch.cat([res, out], 1))
 | 
			
		||||
        else:
 | 
			
		||||
            out = F.relu(x + out)
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# configuration of (num_groups: #out_channels) based on Table 1 in the paper
 | 
			
		||||
cfg = {
 | 
			
		||||
    1: [144, 288, 576],
 | 
			
		||||
    2: [200, 400, 800],
 | 
			
		||||
    3: [240, 480, 960],
 | 
			
		||||
    4: [272, 544, 1088],
 | 
			
		||||
    8: [384, 768, 1536],
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ShuffleNet(nn.Module):
 | 
			
		||||
    """ShuffleNet.
 | 
			
		||||
 | 
			
		||||
    Reference:
 | 
			
		||||
        Zhang et al. ShuffleNet: An Extremely Efficient Convolutional Neural
 | 
			
		||||
        Network for Mobile Devices. CVPR 2018.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``shufflenet``: ShuffleNet (groups=3).
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, num_classes, loss='softmax', num_groups=3, **kwargs):
 | 
			
		||||
        super(ShuffleNet, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
 | 
			
		||||
        self.conv1 = nn.Sequential(
 | 
			
		||||
            nn.Conv2d(3, 24, 3, stride=2, padding=1, bias=False),
 | 
			
		||||
            nn.BatchNorm2d(24),
 | 
			
		||||
            nn.ReLU(),
 | 
			
		||||
            nn.MaxPool2d(3, stride=2, padding=1),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.stage2 = nn.Sequential(
 | 
			
		||||
            Bottleneck(
 | 
			
		||||
                24, cfg[num_groups][0], 2, num_groups, group_conv1x1=False
 | 
			
		||||
            ),
 | 
			
		||||
            Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.stage3 = nn.Sequential(
 | 
			
		||||
            Bottleneck(cfg[num_groups][0], cfg[num_groups][1], 2, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.stage4 = nn.Sequential(
 | 
			
		||||
            Bottleneck(cfg[num_groups][1], cfg[num_groups][2], 2, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups),
 | 
			
		||||
            Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.classifier = nn.Linear(cfg[num_groups][2], num_classes)
 | 
			
		||||
        self.feat_dim = cfg[num_groups][2]
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.stage2(x)
 | 
			
		||||
        x = self.stage3(x)
 | 
			
		||||
        x = self.stage4(x)
 | 
			
		||||
        x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return x
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(x)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, x
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError('Unsupported loss: {}'.format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def shufflenet(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ShuffleNet(num_classes, loss, **kwargs)
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        # init_pretrained_weights(model, model_urls['imagenet'])
 | 
			
		||||
        import warnings
 | 
			
		||||
        warnings.warn(
 | 
			
		||||
            'The imagenet pretrained weights need to be manually downloaded from {}'
 | 
			
		||||
            .format(model_urls['imagenet'])
 | 
			
		||||
        )
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										262
									
								
								feeder/trackers/strongsort/deep/models/shufflenetv2.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										262
									
								
								feeder/trackers/strongsort/deep/models/shufflenetv2.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,262 @@
 | 
			
		|||
"""
 | 
			
		||||
Code source: https://github.com/pytorch/vision
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import division, absolute_import
 | 
			
		||||
import torch
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
from torch import nn
 | 
			
		||||
 | 
			
		||||
__all__ = [
 | 
			
		||||
    'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5',
 | 
			
		||||
    'shufflenet_v2_x2_0'
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
model_urls = {
 | 
			
		||||
    'shufflenetv2_x0.5':
 | 
			
		||||
    'https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth',
 | 
			
		||||
    'shufflenetv2_x1.0':
 | 
			
		||||
    'https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth',
 | 
			
		||||
    'shufflenetv2_x1.5': None,
 | 
			
		||||
    'shufflenetv2_x2.0': None,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def channel_shuffle(x, groups):
 | 
			
		||||
    batchsize, num_channels, height, width = x.data.size()
 | 
			
		||||
    channels_per_group = num_channels // groups
 | 
			
		||||
 | 
			
		||||
    # reshape
 | 
			
		||||
    x = x.view(batchsize, groups, channels_per_group, height, width)
 | 
			
		||||
 | 
			
		||||
    x = torch.transpose(x, 1, 2).contiguous()
 | 
			
		||||
 | 
			
		||||
    # flatten
 | 
			
		||||
    x = x.view(batchsize, -1, height, width)
 | 
			
		||||
 | 
			
		||||
    return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InvertedResidual(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, inp, oup, stride):
 | 
			
		||||
        super(InvertedResidual, self).__init__()
 | 
			
		||||
 | 
			
		||||
        if not (1 <= stride <= 3):
 | 
			
		||||
            raise ValueError('illegal stride value')
 | 
			
		||||
        self.stride = stride
 | 
			
		||||
 | 
			
		||||
        branch_features = oup // 2
 | 
			
		||||
        assert (self.stride != 1) or (inp == branch_features << 1)
 | 
			
		||||
 | 
			
		||||
        if self.stride > 1:
 | 
			
		||||
            self.branch1 = nn.Sequential(
 | 
			
		||||
                self.depthwise_conv(
 | 
			
		||||
                    inp, inp, kernel_size=3, stride=self.stride, padding=1
 | 
			
		||||
                ),
 | 
			
		||||
                nn.BatchNorm2d(inp),
 | 
			
		||||
                nn.Conv2d(
 | 
			
		||||
                    inp,
 | 
			
		||||
                    branch_features,
 | 
			
		||||
                    kernel_size=1,
 | 
			
		||||
                    stride=1,
 | 
			
		||||
                    padding=0,
 | 
			
		||||
                    bias=False
 | 
			
		||||
                ),
 | 
			
		||||
                nn.BatchNorm2d(branch_features),
 | 
			
		||||
                nn.ReLU(inplace=True),
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        self.branch2 = nn.Sequential(
 | 
			
		||||
            nn.Conv2d(
 | 
			
		||||
                inp if (self.stride > 1) else branch_features,
 | 
			
		||||
                branch_features,
 | 
			
		||||
                kernel_size=1,
 | 
			
		||||
                stride=1,
 | 
			
		||||
                padding=0,
 | 
			
		||||
                bias=False
 | 
			
		||||
            ),
 | 
			
		||||
            nn.BatchNorm2d(branch_features),
 | 
			
		||||
            nn.ReLU(inplace=True),
 | 
			
		||||
            self.depthwise_conv(
 | 
			
		||||
                branch_features,
 | 
			
		||||
                branch_features,
 | 
			
		||||
                kernel_size=3,
 | 
			
		||||
                stride=self.stride,
 | 
			
		||||
                padding=1
 | 
			
		||||
            ),
 | 
			
		||||
            nn.BatchNorm2d(branch_features),
 | 
			
		||||
            nn.Conv2d(
 | 
			
		||||
                branch_features,
 | 
			
		||||
                branch_features,
 | 
			
		||||
                kernel_size=1,
 | 
			
		||||
                stride=1,
 | 
			
		||||
                padding=0,
 | 
			
		||||
                bias=False
 | 
			
		||||
            ),
 | 
			
		||||
            nn.BatchNorm2d(branch_features),
 | 
			
		||||
            nn.ReLU(inplace=True),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
 | 
			
		||||
        return nn.Conv2d(
 | 
			
		||||
            i, o, kernel_size, stride, padding, bias=bias, groups=i
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        if self.stride == 1:
 | 
			
		||||
            x1, x2 = x.chunk(2, dim=1)
 | 
			
		||||
            out = torch.cat((x1, self.branch2(x2)), dim=1)
 | 
			
		||||
        else:
 | 
			
		||||
            out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
 | 
			
		||||
 | 
			
		||||
        out = channel_shuffle(out, 2)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ShuffleNetV2(nn.Module):
 | 
			
		||||
    """ShuffleNetV2.
 | 
			
		||||
    
 | 
			
		||||
    Reference:
 | 
			
		||||
        Ma et al. ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design. ECCV 2018.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``shufflenet_v2_x0_5``: ShuffleNetV2 x0.5.
 | 
			
		||||
        - ``shufflenet_v2_x1_0``: ShuffleNetV2 x1.0.
 | 
			
		||||
        - ``shufflenet_v2_x1_5``: ShuffleNetV2 x1.5.
 | 
			
		||||
        - ``shufflenet_v2_x2_0``: ShuffleNetV2 x2.0.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self, num_classes, loss, stages_repeats, stages_out_channels, **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        super(ShuffleNetV2, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
 | 
			
		||||
        if len(stages_repeats) != 3:
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                'expected stages_repeats as list of 3 positive ints'
 | 
			
		||||
            )
 | 
			
		||||
        if len(stages_out_channels) != 5:
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                'expected stages_out_channels as list of 5 positive ints'
 | 
			
		||||
            )
 | 
			
		||||
        self._stage_out_channels = stages_out_channels
 | 
			
		||||
 | 
			
		||||
        input_channels = 3
 | 
			
		||||
        output_channels = self._stage_out_channels[0]
 | 
			
		||||
        self.conv1 = nn.Sequential(
 | 
			
		||||
            nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
 | 
			
		||||
            nn.BatchNorm2d(output_channels),
 | 
			
		||||
            nn.ReLU(inplace=True),
 | 
			
		||||
        )
 | 
			
		||||
        input_channels = output_channels
 | 
			
		||||
 | 
			
		||||
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
 | 
			
		||||
 | 
			
		||||
        stage_names = ['stage{}'.format(i) for i in [2, 3, 4]]
 | 
			
		||||
        for name, repeats, output_channels in zip(
 | 
			
		||||
            stage_names, stages_repeats, self._stage_out_channels[1:]
 | 
			
		||||
        ):
 | 
			
		||||
            seq = [InvertedResidual(input_channels, output_channels, 2)]
 | 
			
		||||
            for i in range(repeats - 1):
 | 
			
		||||
                seq.append(
 | 
			
		||||
                    InvertedResidual(output_channels, output_channels, 1)
 | 
			
		||||
                )
 | 
			
		||||
            setattr(self, name, nn.Sequential(*seq))
 | 
			
		||||
            input_channels = output_channels
 | 
			
		||||
 | 
			
		||||
        output_channels = self._stage_out_channels[-1]
 | 
			
		||||
        self.conv5 = nn.Sequential(
 | 
			
		||||
            nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
 | 
			
		||||
            nn.BatchNorm2d(output_channels),
 | 
			
		||||
            nn.ReLU(inplace=True),
 | 
			
		||||
        )
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
 | 
			
		||||
 | 
			
		||||
        self.classifier = nn.Linear(output_channels, num_classes)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.maxpool(x)
 | 
			
		||||
        x = self.stage2(x)
 | 
			
		||||
        x = self.stage3(x)
 | 
			
		||||
        x = self.stage4(x)
 | 
			
		||||
        x = self.conv5(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.featuremaps(x)
 | 
			
		||||
        v = self.global_avgpool(f)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError("Unsupported loss: {}".format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    if model_url is None:
 | 
			
		||||
        import warnings
 | 
			
		||||
        warnings.warn(
 | 
			
		||||
            'ImageNet pretrained weights are unavailable for this model'
 | 
			
		||||
        )
 | 
			
		||||
        return
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def shufflenet_v2_x0_5(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ShuffleNetV2(
 | 
			
		||||
        num_classes, loss, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['shufflenetv2_x0.5'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def shufflenet_v2_x1_0(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ShuffleNetV2(
 | 
			
		||||
        num_classes, loss, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['shufflenetv2_x1.0'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def shufflenet_v2_x1_5(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ShuffleNetV2(
 | 
			
		||||
        num_classes, loss, [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['shufflenetv2_x1.5'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def shufflenet_v2_x2_0(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = ShuffleNetV2(
 | 
			
		||||
        num_classes, loss, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['shufflenetv2_x2.0'])
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										236
									
								
								feeder/trackers/strongsort/deep/models/squeezenet.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										236
									
								
								feeder/trackers/strongsort/deep/models/squeezenet.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,236 @@
 | 
			
		|||
"""
 | 
			
		||||
Code source: https://github.com/pytorch/vision
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import division, absolute_import
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
 | 
			
		||||
__all__ = ['squeezenet1_0', 'squeezenet1_1', 'squeezenet1_0_fc512']
 | 
			
		||||
 | 
			
		||||
model_urls = {
 | 
			
		||||
    'squeezenet1_0':
 | 
			
		||||
    'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
 | 
			
		||||
    'squeezenet1_1':
 | 
			
		||||
    'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Fire(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes
 | 
			
		||||
    ):
 | 
			
		||||
        super(Fire, self).__init__()
 | 
			
		||||
        self.inplanes = inplanes
 | 
			
		||||
        self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
 | 
			
		||||
        self.squeeze_activation = nn.ReLU(inplace=True)
 | 
			
		||||
        self.expand1x1 = nn.Conv2d(
 | 
			
		||||
            squeeze_planes, expand1x1_planes, kernel_size=1
 | 
			
		||||
        )
 | 
			
		||||
        self.expand1x1_activation = nn.ReLU(inplace=True)
 | 
			
		||||
        self.expand3x3 = nn.Conv2d(
 | 
			
		||||
            squeeze_planes, expand3x3_planes, kernel_size=3, padding=1
 | 
			
		||||
        )
 | 
			
		||||
        self.expand3x3_activation = nn.ReLU(inplace=True)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.squeeze_activation(self.squeeze(x))
 | 
			
		||||
        return torch.cat(
 | 
			
		||||
            [
 | 
			
		||||
                self.expand1x1_activation(self.expand1x1(x)),
 | 
			
		||||
                self.expand3x3_activation(self.expand3x3(x))
 | 
			
		||||
            ], 1
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SqueezeNet(nn.Module):
 | 
			
		||||
    """SqueezeNet.
 | 
			
		||||
 | 
			
		||||
    Reference:
 | 
			
		||||
        Iandola et al. SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
 | 
			
		||||
        and< 0.5 MB model size. arXiv:1602.07360.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``squeezenet1_0``: SqueezeNet (version=1.0).
 | 
			
		||||
        - ``squeezenet1_1``: SqueezeNet (version=1.1).
 | 
			
		||||
        - ``squeezenet1_0_fc512``: SqueezeNet (version=1.0) + FC.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        num_classes,
 | 
			
		||||
        loss,
 | 
			
		||||
        version=1.0,
 | 
			
		||||
        fc_dims=None,
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        super(SqueezeNet, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
        self.feature_dim = 512
 | 
			
		||||
 | 
			
		||||
        if version not in [1.0, 1.1]:
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                'Unsupported SqueezeNet version {version}:'
 | 
			
		||||
                '1.0 or 1.1 expected'.format(version=version)
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        if version == 1.0:
 | 
			
		||||
            self.features = nn.Sequential(
 | 
			
		||||
                nn.Conv2d(3, 96, kernel_size=7, stride=2),
 | 
			
		||||
                nn.ReLU(inplace=True),
 | 
			
		||||
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
 | 
			
		||||
                Fire(96, 16, 64, 64),
 | 
			
		||||
                Fire(128, 16, 64, 64),
 | 
			
		||||
                Fire(128, 32, 128, 128),
 | 
			
		||||
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
 | 
			
		||||
                Fire(256, 32, 128, 128),
 | 
			
		||||
                Fire(256, 48, 192, 192),
 | 
			
		||||
                Fire(384, 48, 192, 192),
 | 
			
		||||
                Fire(384, 64, 256, 256),
 | 
			
		||||
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
 | 
			
		||||
                Fire(512, 64, 256, 256),
 | 
			
		||||
            )
 | 
			
		||||
        else:
 | 
			
		||||
            self.features = nn.Sequential(
 | 
			
		||||
                nn.Conv2d(3, 64, kernel_size=3, stride=2),
 | 
			
		||||
                nn.ReLU(inplace=True),
 | 
			
		||||
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
 | 
			
		||||
                Fire(64, 16, 64, 64),
 | 
			
		||||
                Fire(128, 16, 64, 64),
 | 
			
		||||
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
 | 
			
		||||
                Fire(128, 32, 128, 128),
 | 
			
		||||
                Fire(256, 32, 128, 128),
 | 
			
		||||
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
 | 
			
		||||
                Fire(256, 48, 192, 192),
 | 
			
		||||
                Fire(384, 48, 192, 192),
 | 
			
		||||
                Fire(384, 64, 256, 256),
 | 
			
		||||
                Fire(512, 64, 256, 256),
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        self.fc = self._construct_fc_layer(fc_dims, 512, dropout_p)
 | 
			
		||||
        self.classifier = nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        self._init_params()
 | 
			
		||||
 | 
			
		||||
    def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
 | 
			
		||||
        """Constructs fully connected layer
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
 | 
			
		||||
            input_dim (int): input dimension
 | 
			
		||||
            dropout_p (float): dropout probability, if None, dropout is unused
 | 
			
		||||
        """
 | 
			
		||||
        if fc_dims is None:
 | 
			
		||||
            self.feature_dim = input_dim
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        assert isinstance(
 | 
			
		||||
            fc_dims, (list, tuple)
 | 
			
		||||
        ), 'fc_dims must be either list or tuple, but got {}'.format(
 | 
			
		||||
            type(fc_dims)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        for dim in fc_dims:
 | 
			
		||||
            layers.append(nn.Linear(input_dim, dim))
 | 
			
		||||
            layers.append(nn.BatchNorm1d(dim))
 | 
			
		||||
            layers.append(nn.ReLU(inplace=True))
 | 
			
		||||
            if dropout_p is not None:
 | 
			
		||||
                layers.append(nn.Dropout(p=dropout_p))
 | 
			
		||||
            input_dim = dim
 | 
			
		||||
 | 
			
		||||
        self.feature_dim = fc_dims[-1]
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _init_params(self):
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                nn.init.kaiming_normal_(
 | 
			
		||||
                    m.weight, mode='fan_out', nonlinearity='relu'
 | 
			
		||||
                )
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm1d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.Linear):
 | 
			
		||||
                nn.init.normal_(m.weight, 0, 0.01)
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.features(x)
 | 
			
		||||
        v = self.global_avgpool(f)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        if self.fc is not None:
 | 
			
		||||
            v = self.fc(v)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError('Unsupported loss: {}'.format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initializes model with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url, map_location=None)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def squeezenet1_0(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = SqueezeNet(
 | 
			
		||||
        num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['squeezenet1_0'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def squeezenet1_0_fc512(
 | 
			
		||||
    num_classes, loss='softmax', pretrained=True, **kwargs
 | 
			
		||||
):
 | 
			
		||||
    model = SqueezeNet(
 | 
			
		||||
        num_classes,
 | 
			
		||||
        loss,
 | 
			
		||||
        version=1.0,
 | 
			
		||||
        fc_dims=[512],
 | 
			
		||||
        dropout_p=None,
 | 
			
		||||
        **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['squeezenet1_0'])
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def squeezenet1_1(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = SqueezeNet(
 | 
			
		||||
        num_classes, loss, version=1.1, fc_dims=None, dropout_p=None, **kwargs
 | 
			
		||||
    )
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        init_pretrained_weights(model, model_urls['squeezenet1_1'])
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										344
									
								
								feeder/trackers/strongsort/deep/models/xception.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										344
									
								
								feeder/trackers/strongsort/deep/models/xception.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,344 @@
 | 
			
		|||
from __future__ import division, absolute_import
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
import torch.nn.functional as F
 | 
			
		||||
import torch.utils.model_zoo as model_zoo
 | 
			
		||||
 | 
			
		||||
__all__ = ['xception']
 | 
			
		||||
 | 
			
		||||
pretrained_settings = {
 | 
			
		||||
    'xception': {
 | 
			
		||||
        'imagenet': {
 | 
			
		||||
            'url':
 | 
			
		||||
            'http://data.lip6.fr/cadene/pretrainedmodels/xception-43020ad28.pth',
 | 
			
		||||
            'input_space': 'RGB',
 | 
			
		||||
            'input_size': [3, 299, 299],
 | 
			
		||||
            'input_range': [0, 1],
 | 
			
		||||
            'mean': [0.5, 0.5, 0.5],
 | 
			
		||||
            'std': [0.5, 0.5, 0.5],
 | 
			
		||||
            'num_classes': 1000,
 | 
			
		||||
            'scale':
 | 
			
		||||
            0.8975 # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SeparableConv2d(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        in_channels,
 | 
			
		||||
        out_channels,
 | 
			
		||||
        kernel_size=1,
 | 
			
		||||
        stride=1,
 | 
			
		||||
        padding=0,
 | 
			
		||||
        dilation=1,
 | 
			
		||||
        bias=False
 | 
			
		||||
    ):
 | 
			
		||||
        super(SeparableConv2d, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.conv1 = nn.Conv2d(
 | 
			
		||||
            in_channels,
 | 
			
		||||
            in_channels,
 | 
			
		||||
            kernel_size,
 | 
			
		||||
            stride,
 | 
			
		||||
            padding,
 | 
			
		||||
            dilation,
 | 
			
		||||
            groups=in_channels,
 | 
			
		||||
            bias=bias
 | 
			
		||||
        )
 | 
			
		||||
        self.pointwise = nn.Conv2d(
 | 
			
		||||
            in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.conv1(x)
 | 
			
		||||
        x = self.pointwise(x)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Block(nn.Module):
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        in_filters,
 | 
			
		||||
        out_filters,
 | 
			
		||||
        reps,
 | 
			
		||||
        strides=1,
 | 
			
		||||
        start_with_relu=True,
 | 
			
		||||
        grow_first=True
 | 
			
		||||
    ):
 | 
			
		||||
        super(Block, self).__init__()
 | 
			
		||||
 | 
			
		||||
        if out_filters != in_filters or strides != 1:
 | 
			
		||||
            self.skip = nn.Conv2d(
 | 
			
		||||
                in_filters, out_filters, 1, stride=strides, bias=False
 | 
			
		||||
            )
 | 
			
		||||
            self.skipbn = nn.BatchNorm2d(out_filters)
 | 
			
		||||
        else:
 | 
			
		||||
            self.skip = None
 | 
			
		||||
 | 
			
		||||
        self.relu = nn.ReLU(inplace=True)
 | 
			
		||||
        rep = []
 | 
			
		||||
 | 
			
		||||
        filters = in_filters
 | 
			
		||||
        if grow_first:
 | 
			
		||||
            rep.append(self.relu)
 | 
			
		||||
            rep.append(
 | 
			
		||||
                SeparableConv2d(
 | 
			
		||||
                    in_filters,
 | 
			
		||||
                    out_filters,
 | 
			
		||||
                    3,
 | 
			
		||||
                    stride=1,
 | 
			
		||||
                    padding=1,
 | 
			
		||||
                    bias=False
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
            rep.append(nn.BatchNorm2d(out_filters))
 | 
			
		||||
            filters = out_filters
 | 
			
		||||
 | 
			
		||||
        for i in range(reps - 1):
 | 
			
		||||
            rep.append(self.relu)
 | 
			
		||||
            rep.append(
 | 
			
		||||
                SeparableConv2d(
 | 
			
		||||
                    filters, filters, 3, stride=1, padding=1, bias=False
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
            rep.append(nn.BatchNorm2d(filters))
 | 
			
		||||
 | 
			
		||||
        if not grow_first:
 | 
			
		||||
            rep.append(self.relu)
 | 
			
		||||
            rep.append(
 | 
			
		||||
                SeparableConv2d(
 | 
			
		||||
                    in_filters,
 | 
			
		||||
                    out_filters,
 | 
			
		||||
                    3,
 | 
			
		||||
                    stride=1,
 | 
			
		||||
                    padding=1,
 | 
			
		||||
                    bias=False
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
            rep.append(nn.BatchNorm2d(out_filters))
 | 
			
		||||
 | 
			
		||||
        if not start_with_relu:
 | 
			
		||||
            rep = rep[1:]
 | 
			
		||||
        else:
 | 
			
		||||
            rep[0] = nn.ReLU(inplace=False)
 | 
			
		||||
 | 
			
		||||
        if strides != 1:
 | 
			
		||||
            rep.append(nn.MaxPool2d(3, strides, 1))
 | 
			
		||||
        self.rep = nn.Sequential(*rep)
 | 
			
		||||
 | 
			
		||||
    def forward(self, inp):
 | 
			
		||||
        x = self.rep(inp)
 | 
			
		||||
 | 
			
		||||
        if self.skip is not None:
 | 
			
		||||
            skip = self.skip(inp)
 | 
			
		||||
            skip = self.skipbn(skip)
 | 
			
		||||
        else:
 | 
			
		||||
            skip = inp
 | 
			
		||||
 | 
			
		||||
        x += skip
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Xception(nn.Module):
 | 
			
		||||
    """Xception.
 | 
			
		||||
    
 | 
			
		||||
    Reference:
 | 
			
		||||
        Chollet. Xception: Deep Learning with Depthwise
 | 
			
		||||
        Separable Convolutions. CVPR 2017.
 | 
			
		||||
 | 
			
		||||
    Public keys:
 | 
			
		||||
        - ``xception``: Xception.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self, num_classes, loss, fc_dims=None, dropout_p=None, **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        super(Xception, self).__init__()
 | 
			
		||||
        self.loss = loss
 | 
			
		||||
 | 
			
		||||
        self.conv1 = nn.Conv2d(3, 32, 3, 2, 0, bias=False)
 | 
			
		||||
        self.bn1 = nn.BatchNorm2d(32)
 | 
			
		||||
 | 
			
		||||
        self.conv2 = nn.Conv2d(32, 64, 3, bias=False)
 | 
			
		||||
        self.bn2 = nn.BatchNorm2d(64)
 | 
			
		||||
 | 
			
		||||
        self.block1 = Block(
 | 
			
		||||
            64, 128, 2, 2, start_with_relu=False, grow_first=True
 | 
			
		||||
        )
 | 
			
		||||
        self.block2 = Block(
 | 
			
		||||
            128, 256, 2, 2, start_with_relu=True, grow_first=True
 | 
			
		||||
        )
 | 
			
		||||
        self.block3 = Block(
 | 
			
		||||
            256, 728, 2, 2, start_with_relu=True, grow_first=True
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.block4 = Block(
 | 
			
		||||
            728, 728, 3, 1, start_with_relu=True, grow_first=True
 | 
			
		||||
        )
 | 
			
		||||
        self.block5 = Block(
 | 
			
		||||
            728, 728, 3, 1, start_with_relu=True, grow_first=True
 | 
			
		||||
        )
 | 
			
		||||
        self.block6 = Block(
 | 
			
		||||
            728, 728, 3, 1, start_with_relu=True, grow_first=True
 | 
			
		||||
        )
 | 
			
		||||
        self.block7 = Block(
 | 
			
		||||
            728, 728, 3, 1, start_with_relu=True, grow_first=True
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.block8 = Block(
 | 
			
		||||
            728, 728, 3, 1, start_with_relu=True, grow_first=True
 | 
			
		||||
        )
 | 
			
		||||
        self.block9 = Block(
 | 
			
		||||
            728, 728, 3, 1, start_with_relu=True, grow_first=True
 | 
			
		||||
        )
 | 
			
		||||
        self.block10 = Block(
 | 
			
		||||
            728, 728, 3, 1, start_with_relu=True, grow_first=True
 | 
			
		||||
        )
 | 
			
		||||
        self.block11 = Block(
 | 
			
		||||
            728, 728, 3, 1, start_with_relu=True, grow_first=True
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.block12 = Block(
 | 
			
		||||
            728, 1024, 2, 2, start_with_relu=True, grow_first=False
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1)
 | 
			
		||||
        self.bn3 = nn.BatchNorm2d(1536)
 | 
			
		||||
 | 
			
		||||
        self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1)
 | 
			
		||||
        self.bn4 = nn.BatchNorm2d(2048)
 | 
			
		||||
 | 
			
		||||
        self.global_avgpool = nn.AdaptiveAvgPool2d(1)
 | 
			
		||||
        self.feature_dim = 2048
 | 
			
		||||
        self.fc = self._construct_fc_layer(fc_dims, 2048, dropout_p)
 | 
			
		||||
        self.classifier = nn.Linear(self.feature_dim, num_classes)
 | 
			
		||||
 | 
			
		||||
        self._init_params()
 | 
			
		||||
 | 
			
		||||
    def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
 | 
			
		||||
        """Constructs fully connected layer.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
 | 
			
		||||
            input_dim (int): input dimension
 | 
			
		||||
            dropout_p (float): dropout probability, if None, dropout is unused
 | 
			
		||||
        """
 | 
			
		||||
        if fc_dims is None:
 | 
			
		||||
            self.feature_dim = input_dim
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        assert isinstance(
 | 
			
		||||
            fc_dims, (list, tuple)
 | 
			
		||||
        ), 'fc_dims must be either list or tuple, but got {}'.format(
 | 
			
		||||
            type(fc_dims)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        layers = []
 | 
			
		||||
        for dim in fc_dims:
 | 
			
		||||
            layers.append(nn.Linear(input_dim, dim))
 | 
			
		||||
            layers.append(nn.BatchNorm1d(dim))
 | 
			
		||||
            layers.append(nn.ReLU(inplace=True))
 | 
			
		||||
            if dropout_p is not None:
 | 
			
		||||
                layers.append(nn.Dropout(p=dropout_p))
 | 
			
		||||
            input_dim = dim
 | 
			
		||||
 | 
			
		||||
        self.feature_dim = fc_dims[-1]
 | 
			
		||||
 | 
			
		||||
        return nn.Sequential(*layers)
 | 
			
		||||
 | 
			
		||||
    def _init_params(self):
 | 
			
		||||
        for m in self.modules():
 | 
			
		||||
            if isinstance(m, nn.Conv2d):
 | 
			
		||||
                nn.init.kaiming_normal_(
 | 
			
		||||
                    m.weight, mode='fan_out', nonlinearity='relu'
 | 
			
		||||
                )
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm2d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.BatchNorm1d):
 | 
			
		||||
                nn.init.constant_(m.weight, 1)
 | 
			
		||||
                nn.init.constant_(m.bias, 0)
 | 
			
		||||
            elif isinstance(m, nn.Linear):
 | 
			
		||||
                nn.init.normal_(m.weight, 0, 0.01)
 | 
			
		||||
                if m.bias is not None:
 | 
			
		||||
                    nn.init.constant_(m.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def featuremaps(self, input):
 | 
			
		||||
        x = self.conv1(input)
 | 
			
		||||
        x = self.bn1(x)
 | 
			
		||||
        x = F.relu(x, inplace=True)
 | 
			
		||||
 | 
			
		||||
        x = self.conv2(x)
 | 
			
		||||
        x = self.bn2(x)
 | 
			
		||||
        x = F.relu(x, inplace=True)
 | 
			
		||||
 | 
			
		||||
        x = self.block1(x)
 | 
			
		||||
        x = self.block2(x)
 | 
			
		||||
        x = self.block3(x)
 | 
			
		||||
        x = self.block4(x)
 | 
			
		||||
        x = self.block5(x)
 | 
			
		||||
        x = self.block6(x)
 | 
			
		||||
        x = self.block7(x)
 | 
			
		||||
        x = self.block8(x)
 | 
			
		||||
        x = self.block9(x)
 | 
			
		||||
        x = self.block10(x)
 | 
			
		||||
        x = self.block11(x)
 | 
			
		||||
        x = self.block12(x)
 | 
			
		||||
 | 
			
		||||
        x = self.conv3(x)
 | 
			
		||||
        x = self.bn3(x)
 | 
			
		||||
        x = F.relu(x, inplace=True)
 | 
			
		||||
 | 
			
		||||
        x = self.conv4(x)
 | 
			
		||||
        x = self.bn4(x)
 | 
			
		||||
        x = F.relu(x, inplace=True)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        f = self.featuremaps(x)
 | 
			
		||||
        v = self.global_avgpool(f)
 | 
			
		||||
        v = v.view(v.size(0), -1)
 | 
			
		||||
 | 
			
		||||
        if self.fc is not None:
 | 
			
		||||
            v = self.fc(v)
 | 
			
		||||
 | 
			
		||||
        if not self.training:
 | 
			
		||||
            return v
 | 
			
		||||
 | 
			
		||||
        y = self.classifier(v)
 | 
			
		||||
 | 
			
		||||
        if self.loss == 'softmax':
 | 
			
		||||
            return y
 | 
			
		||||
        elif self.loss == 'triplet':
 | 
			
		||||
            return y, v
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError('Unsupported loss: {}'.format(self.loss))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_pretrained_weights(model, model_url):
 | 
			
		||||
    """Initialize models with pretrained weights.
 | 
			
		||||
    
 | 
			
		||||
    Layers that don't match with pretrained layers in name or size are kept unchanged.
 | 
			
		||||
    """
 | 
			
		||||
    pretrain_dict = model_zoo.load_url(model_url)
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    pretrain_dict = {
 | 
			
		||||
        k: v
 | 
			
		||||
        for k, v in pretrain_dict.items()
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size()
 | 
			
		||||
    }
 | 
			
		||||
    model_dict.update(pretrain_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def xception(num_classes, loss='softmax', pretrained=True, **kwargs):
 | 
			
		||||
    model = Xception(num_classes, loss, fc_dims=None, dropout_p=None, **kwargs)
 | 
			
		||||
    if pretrained:
 | 
			
		||||
        model_url = pretrained_settings['xception']['imagenet']['url']
 | 
			
		||||
        init_pretrained_weights(model, model_url)
 | 
			
		||||
    return model
 | 
			
		||||
							
								
								
									
										215
									
								
								feeder/trackers/strongsort/deep/reid_model_factory.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										215
									
								
								feeder/trackers/strongsort/deep/reid_model_factory.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,215 @@
 | 
			
		|||
import torch
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
__model_types = [
 | 
			
		||||
    'resnet50', 'mlfn', 'hacnn', 'mobilenetv2_x1_0', 'mobilenetv2_x1_4',
 | 
			
		||||
    'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25',
 | 
			
		||||
    'osnet_ibn_x1_0', 'osnet_ain_x1_0']
 | 
			
		||||
 | 
			
		||||
__trained_urls = {
 | 
			
		||||
 | 
			
		||||
    # market1501 models ########################################################
 | 
			
		||||
    'resnet50_market1501.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1dUUZ4rHDWohmsQXCRe2C_HbYkzz94iBV',
 | 
			
		||||
    'resnet50_dukemtmcreid.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=17ymnLglnc64NRvGOitY3BqMRS9UWd1wg',
 | 
			
		||||
    'resnet50_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1ep7RypVDOthCRIAqDnn4_N-UhkkFHJsj',
 | 
			
		||||
 | 
			
		||||
    'resnet50_fc512_market1501.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1kv8l5laX_YCdIGVCetjlNdzKIA3NvsSt',
 | 
			
		||||
    'resnet50_fc512_dukemtmcreid.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=13QN8Mp3XH81GK4BPGXobKHKyTGH50Rtx',
 | 
			
		||||
    'resnet50_fc512_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1fDJLcz4O5wxNSUvImIIjoaIF9u1Rwaud',
 | 
			
		||||
 | 
			
		||||
    'mlfn_market1501.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1wXcvhA_b1kpDfrt9s2Pma-MHxtj9pmvS',
 | 
			
		||||
    'mlfn_dukemtmcreid.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1rExgrTNb0VCIcOnXfMsbwSUW1h2L1Bum',
 | 
			
		||||
    'mlfn_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=18JzsZlJb3Wm7irCbZbZ07TN4IFKvR6p-',
 | 
			
		||||
 | 
			
		||||
    'hacnn_market1501.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1LRKIQduThwGxMDQMiVkTScBwR7WidmYF',
 | 
			
		||||
    'hacnn_dukemtmcreid.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1zNm6tP4ozFUCUQ7Sv1Z98EAJWXJEhtYH',
 | 
			
		||||
    'hacnn_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1MsKRtPM5WJ3_Tk2xC0aGOO7pM3VaFDNZ',
 | 
			
		||||
 | 
			
		||||
    'mobilenetv2_x1_0_market1501.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=18DgHC2ZJkjekVoqBWszD8_Xiikz-fewp',
 | 
			
		||||
    'mobilenetv2_x1_0_dukemtmcreid.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1q1WU2FETRJ3BXcpVtfJUuqq4z3psetds',
 | 
			
		||||
    'mobilenetv2_x1_0_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1j50Hv14NOUAg7ZeB3frzfX-WYLi7SrhZ',
 | 
			
		||||
 | 
			
		||||
    'mobilenetv2_x1_4_market1501.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1t6JCqphJG-fwwPVkRLmGGyEBhGOf2GO5',
 | 
			
		||||
    'mobilenetv2_x1_4_dukemtmcreid.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=12uD5FeVqLg9-AFDju2L7SQxjmPb4zpBN',
 | 
			
		||||
    'mobilenetv2_x1_4_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1ZY5P2Zgm-3RbDpbXM0kIBMPvspeNIbXz',
 | 
			
		||||
 | 
			
		||||
    'osnet_x1_0_market1501.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1vduhq5DpN2q1g4fYEZfPI17MJeh9qyrA',
 | 
			
		||||
    'osnet_x1_0_dukemtmcreid.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1QZO_4sNf4hdOKKKzKc-TZU9WW1v6zQbq',
 | 
			
		||||
    'osnet_x1_0_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=112EMUfBPYeYg70w-syK6V6Mx8-Qb9Q1M',
 | 
			
		||||
 | 
			
		||||
    'osnet_x0_75_market1501.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1ozRaDSQw_EQ8_93OUmjDbvLXw9TnfPer',
 | 
			
		||||
    'osnet_x0_75_dukemtmcreid.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1IE3KRaTPp4OUa6PGTFL_d5_KQSJbP0Or',
 | 
			
		||||
    'osnet_x0_75_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1QEGO6WnJ-BmUzVPd3q9NoaO_GsPNlmWc',
 | 
			
		||||
 | 
			
		||||
    'osnet_x0_5_market1501.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1PLB9rgqrUM7blWrg4QlprCuPT7ILYGKT',
 | 
			
		||||
    'osnet_x0_5_dukemtmcreid.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1KoUVqmiST175hnkALg9XuTi1oYpqcyTu',
 | 
			
		||||
    'osnet_x0_5_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1UT3AxIaDvS2PdxzZmbkLmjtiqq7AIKCv',
 | 
			
		||||
 | 
			
		||||
    'osnet_x0_25_market1501.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1z1UghYvOTtjx7kEoRfmqSMu-z62J6MAj',
 | 
			
		||||
    'osnet_x0_25_dukemtmcreid.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1eumrtiXT4NOspjyEV4j8cHmlOaaCGk5l',
 | 
			
		||||
    'osnet_x0_25_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1sSwXSUlj4_tHZequ_iZ8w_Jh0VaRQMqF',
 | 
			
		||||
 | 
			
		||||
    ####### market1501 models ##################################################
 | 
			
		||||
    'resnet50_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1yiBteqgIZoOeywE8AhGmEQl7FTVwrQmf',
 | 
			
		||||
    'osnet_x1_0_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1IosIFlLiulGIjwW3H8uMRmx3MzPwf86x',
 | 
			
		||||
    'osnet_x0_75_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1fhjSS_7SUGCioIf2SWXaRGPqIY9j7-uw',
 | 
			
		||||
 | 
			
		||||
    'osnet_x0_5_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1DHgmb6XV4fwG3n-CnCM0zdL9nMsZ9_RF',
 | 
			
		||||
    'osnet_x0_25_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1Kkx2zW89jq_NETu4u42CFZTMVD5Hwm6e',
 | 
			
		||||
    'osnet_ibn_x1_0_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1q3Sj2ii34NlfxA4LvmHdWO_75NDRmECJ',
 | 
			
		||||
    'osnet_ain_x1_0_msmt17.pt':
 | 
			
		||||
    'https://drive.google.com/uc?id=1SigwBE6mPdqiJMqhuIY4aqC7--5CsMal',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def show_downloadeable_models():
 | 
			
		||||
    print('\nAvailable .pt ReID models for automatic download')
 | 
			
		||||
    print(list(__trained_urls.keys()))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_model_url(model):
 | 
			
		||||
    if model.name in __trained_urls:
 | 
			
		||||
        return __trained_urls[model.name]
 | 
			
		||||
    else:
 | 
			
		||||
        None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_model_in_model_types(model):
 | 
			
		||||
    if model.name in __model_types:
 | 
			
		||||
        return True
 | 
			
		||||
    else:
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_model_name(model):
 | 
			
		||||
    for x in __model_types:
 | 
			
		||||
        if x in model.name:
 | 
			
		||||
            return x
 | 
			
		||||
    return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def download_url(url, dst):
 | 
			
		||||
    """Downloads file from a url to a destination.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        url (str): url to download file.
 | 
			
		||||
        dst (str): destination path.
 | 
			
		||||
    """
 | 
			
		||||
    from six.moves import urllib
 | 
			
		||||
    print('* url="{}"'.format(url))
 | 
			
		||||
    print('* destination="{}"'.format(dst))
 | 
			
		||||
 | 
			
		||||
    def _reporthook(count, block_size, total_size):
 | 
			
		||||
        global start_time
 | 
			
		||||
        if count == 0:
 | 
			
		||||
            start_time = time.time()
 | 
			
		||||
            return
 | 
			
		||||
        duration = time.time() - start_time
 | 
			
		||||
        progress_size = int(count * block_size)
 | 
			
		||||
        speed = int(progress_size / (1024*duration))
 | 
			
		||||
        percent = int(count * block_size * 100 / total_size)
 | 
			
		||||
        sys.stdout.write(
 | 
			
		||||
            '\r...%d%%, %d MB, %d KB/s, %d seconds passed' %
 | 
			
		||||
            (percent, progress_size / (1024*1024), speed, duration)
 | 
			
		||||
        )
 | 
			
		||||
        sys.stdout.flush()
 | 
			
		||||
 | 
			
		||||
    urllib.request.urlretrieve(url, dst, _reporthook)
 | 
			
		||||
    sys.stdout.write('\n')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def load_pretrained_weights(model, weight_path):
 | 
			
		||||
    r"""Loads pretrianed weights to model.
 | 
			
		||||
 | 
			
		||||
    Features::
 | 
			
		||||
        - Incompatible layers (unmatched in name or size) will be ignored.
 | 
			
		||||
        - Can automatically deal with keys containing "module.".
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        model (nn.Module): network model.
 | 
			
		||||
        weight_path (str): path to pretrained weights.
 | 
			
		||||
 | 
			
		||||
    Examples::
 | 
			
		||||
        >>> from torchreid.utils import load_pretrained_weights
 | 
			
		||||
        >>> weight_path = 'log/my_model/model-best.pth.tar'
 | 
			
		||||
        >>> load_pretrained_weights(model, weight_path)
 | 
			
		||||
    """
 | 
			
		||||
    checkpoint = torch.load(weight_path)
 | 
			
		||||
    if 'state_dict' in checkpoint:
 | 
			
		||||
        state_dict = checkpoint['state_dict']
 | 
			
		||||
    else:
 | 
			
		||||
        state_dict = checkpoint
 | 
			
		||||
 | 
			
		||||
    model_dict = model.state_dict()
 | 
			
		||||
    new_state_dict = OrderedDict()
 | 
			
		||||
    matched_layers, discarded_layers = [], []
 | 
			
		||||
 | 
			
		||||
    for k, v in state_dict.items():
 | 
			
		||||
        if k.startswith('module.'):
 | 
			
		||||
            k = k[7:] # discard module.
 | 
			
		||||
 | 
			
		||||
        if k in model_dict and model_dict[k].size() == v.size():
 | 
			
		||||
            new_state_dict[k] = v
 | 
			
		||||
            matched_layers.append(k)
 | 
			
		||||
        else:
 | 
			
		||||
            discarded_layers.append(k)
 | 
			
		||||
 | 
			
		||||
    model_dict.update(new_state_dict)
 | 
			
		||||
    model.load_state_dict(model_dict)
 | 
			
		||||
 | 
			
		||||
    if len(matched_layers) == 0:
 | 
			
		||||
        warnings.warn(
 | 
			
		||||
            'The pretrained weights "{}" cannot be loaded, '
 | 
			
		||||
            'please check the key names manually '
 | 
			
		||||
            '(** ignored and continue **)'.format(weight_path)
 | 
			
		||||
        )
 | 
			
		||||
    else:
 | 
			
		||||
        print(
 | 
			
		||||
            'Successfully loaded pretrained weights from "{}"'.
 | 
			
		||||
            format(weight_path)
 | 
			
		||||
        )
 | 
			
		||||
        if len(discarded_layers) > 0:
 | 
			
		||||
            print(
 | 
			
		||||
                '** The following layers are discarded '
 | 
			
		||||
                'due to unmatched keys or layer size: {}'.
 | 
			
		||||
                format(discarded_layers)
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										237
									
								
								feeder/trackers/strongsort/reid_multibackend.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										237
									
								
								feeder/trackers/strongsort/reid_multibackend.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,237 @@
 | 
			
		|||
import torch.nn as nn
 | 
			
		||||
import torch
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
import numpy as np
 | 
			
		||||
from itertools import islice
 | 
			
		||||
import torchvision.transforms as transforms
 | 
			
		||||
import cv2
 | 
			
		||||
import sys
 | 
			
		||||
import torchvision.transforms as T
 | 
			
		||||
from collections import OrderedDict, namedtuple
 | 
			
		||||
import gdown
 | 
			
		||||
from os.path import exists as file_exists
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
from ultralytics.yolo.utils.checks import check_requirements, check_version
 | 
			
		||||
from ultralytics.yolo.utils import LOGGER
 | 
			
		||||
from trackers.strongsort.deep.reid_model_factory import (show_downloadeable_models, get_model_url, get_model_name,
 | 
			
		||||
                                                          download_url, load_pretrained_weights)
 | 
			
		||||
from trackers.strongsort.deep.models import build_model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
 | 
			
		||||
    # Check file(s) for acceptable suffix
 | 
			
		||||
    if file and suffix:
 | 
			
		||||
        if isinstance(suffix, str):
 | 
			
		||||
            suffix = [suffix]
 | 
			
		||||
        for f in file if isinstance(file, (list, tuple)) else [file]:
 | 
			
		||||
            s = Path(f).suffix.lower()  # file suffix
 | 
			
		||||
            if len(s):
 | 
			
		||||
                assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ReIDDetectMultiBackend(nn.Module):
 | 
			
		||||
    # ReID models MultiBackend class for python inference on various backends
 | 
			
		||||
    def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'), fp16=False):
 | 
			
		||||
        super().__init__()
 | 
			
		||||
 | 
			
		||||
        w = weights[0] if isinstance(weights, list) else weights
 | 
			
		||||
        self.pt, self.jit, self.onnx, self.xml, self.engine, self.tflite = self.model_type(w)  # get backend
 | 
			
		||||
        self.fp16 = fp16
 | 
			
		||||
        self.fp16 &= self.pt or self.jit or self.engine  # FP16
 | 
			
		||||
 | 
			
		||||
        # Build transform functions
 | 
			
		||||
        self.device = device
 | 
			
		||||
        self.image_size=(256, 128)
 | 
			
		||||
        self.pixel_mean=[0.485, 0.456, 0.406]
 | 
			
		||||
        self.pixel_std=[0.229, 0.224, 0.225]
 | 
			
		||||
        self.transforms = []
 | 
			
		||||
        self.transforms += [T.Resize(self.image_size)]
 | 
			
		||||
        self.transforms += [T.ToTensor()]
 | 
			
		||||
        self.transforms += [T.Normalize(mean=self.pixel_mean, std=self.pixel_std)]
 | 
			
		||||
        self.preprocess = T.Compose(self.transforms)
 | 
			
		||||
        self.to_pil = T.ToPILImage()
 | 
			
		||||
 | 
			
		||||
        model_name = get_model_name(w)
 | 
			
		||||
 | 
			
		||||
        if w.suffix == '.pt':
 | 
			
		||||
            model_url = get_model_url(w)
 | 
			
		||||
            if not file_exists(w) and model_url is not None:
 | 
			
		||||
                gdown.download(model_url, str(w), quiet=False)
 | 
			
		||||
            elif file_exists(w):
 | 
			
		||||
                pass
 | 
			
		||||
            else:
 | 
			
		||||
                print(f'No URL associated to the chosen StrongSORT weights ({w}). Choose between:')
 | 
			
		||||
                show_downloadeable_models()
 | 
			
		||||
                exit()
 | 
			
		||||
 | 
			
		||||
        # Build model
 | 
			
		||||
        self.model = build_model(
 | 
			
		||||
            model_name,
 | 
			
		||||
            num_classes=1,
 | 
			
		||||
            pretrained=not (w and w.is_file()),
 | 
			
		||||
            use_gpu=device
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        if self.pt:  # PyTorch
 | 
			
		||||
            # populate model arch with weights
 | 
			
		||||
            if w and w.is_file() and w.suffix == '.pt':
 | 
			
		||||
                load_pretrained_weights(self.model, w)
 | 
			
		||||
                
 | 
			
		||||
            self.model.to(device).eval()
 | 
			
		||||
            self.model.half() if self.fp16 else  self.model.float()
 | 
			
		||||
        elif self.jit:
 | 
			
		||||
            LOGGER.info(f'Loading {w} for TorchScript inference...')
 | 
			
		||||
            self.model = torch.jit.load(w)
 | 
			
		||||
            self.model.half() if self.fp16 else self.model.float()
 | 
			
		||||
        elif self.onnx:  # ONNX Runtime
 | 
			
		||||
            LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
 | 
			
		||||
            cuda = torch.cuda.is_available() and device.type != 'cpu'
 | 
			
		||||
            #check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
 | 
			
		||||
            import onnxruntime
 | 
			
		||||
            providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
 | 
			
		||||
            self.session = onnxruntime.InferenceSession(str(w), providers=providers)
 | 
			
		||||
        elif self.engine:  # TensorRT
 | 
			
		||||
            LOGGER.info(f'Loading {w} for TensorRT inference...')
 | 
			
		||||
            import tensorrt as trt  # https://developer.nvidia.com/nvidia-tensorrt-download
 | 
			
		||||
            check_version(trt.__version__, '7.0.0', hard=True)  # require tensorrt>=7.0.0
 | 
			
		||||
            if device.type == 'cpu':
 | 
			
		||||
                device = torch.device('cuda:0')
 | 
			
		||||
            Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
 | 
			
		||||
            logger = trt.Logger(trt.Logger.INFO)
 | 
			
		||||
            with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
 | 
			
		||||
                self.model_ = runtime.deserialize_cuda_engine(f.read())
 | 
			
		||||
            self.context = self.model_.create_execution_context()
 | 
			
		||||
            self.bindings = OrderedDict()
 | 
			
		||||
            self.fp16 = False  # default updated below
 | 
			
		||||
            dynamic = False
 | 
			
		||||
            for index in range(self.model_.num_bindings):
 | 
			
		||||
                name = self.model_.get_binding_name(index)
 | 
			
		||||
                dtype = trt.nptype(self.model_.get_binding_dtype(index))
 | 
			
		||||
                if self.model_.binding_is_input(index):
 | 
			
		||||
                    if -1 in tuple(self.model_.get_binding_shape(index)):  # dynamic
 | 
			
		||||
                        dynamic = True
 | 
			
		||||
                        self.context.set_binding_shape(index, tuple(self.model_.get_profile_shape(0, index)[2]))
 | 
			
		||||
                    if dtype == np.float16:
 | 
			
		||||
                        self.fp16 = True
 | 
			
		||||
                shape = tuple(self.context.get_binding_shape(index))
 | 
			
		||||
                im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
 | 
			
		||||
                self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
 | 
			
		||||
            self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items())
 | 
			
		||||
            batch_size = self.bindings['images'].shape[0]  # if dynamic, this is instead max batch size
 | 
			
		||||
        elif self.xml:  # OpenVINO
 | 
			
		||||
            LOGGER.info(f'Loading {w} for OpenVINO inference...')
 | 
			
		||||
            check_requirements(('openvino',))  # requires openvino-dev: https://pypi.org/project/openvino-dev/
 | 
			
		||||
            from openvino.runtime import Core, Layout, get_batch
 | 
			
		||||
            ie = Core()
 | 
			
		||||
            if not Path(w).is_file():  # if not *.xml
 | 
			
		||||
                w = next(Path(w).glob('*.xml'))  # get *.xml file from *_openvino_model dir
 | 
			
		||||
            network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
 | 
			
		||||
            if network.get_parameters()[0].get_layout().empty:
 | 
			
		||||
                network.get_parameters()[0].set_layout(Layout("NCWH"))
 | 
			
		||||
            batch_dim = get_batch(network)
 | 
			
		||||
            if batch_dim.is_static:
 | 
			
		||||
                batch_size = batch_dim.get_length()
 | 
			
		||||
            self.executable_network = ie.compile_model(network, device_name="CPU")  # device_name="MYRIAD" for Intel NCS2
 | 
			
		||||
            self.output_layer = next(iter(self.executable_network.outputs))
 | 
			
		||||
        
 | 
			
		||||
        elif self.tflite:
 | 
			
		||||
            LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
 | 
			
		||||
            try:  # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
 | 
			
		||||
                from tflite_runtime.interpreter import Interpreter, load_delegate
 | 
			
		||||
            except ImportError:
 | 
			
		||||
                import tensorflow as tf
 | 
			
		||||
                Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
 | 
			
		||||
            self.interpreter = tf.lite.Interpreter(model_path=w)
 | 
			
		||||
            self.interpreter.allocate_tensors()
 | 
			
		||||
            # Get input and output tensors.
 | 
			
		||||
            self.input_details = self.interpreter.get_input_details()
 | 
			
		||||
            self.output_details = self.interpreter.get_output_details()
 | 
			
		||||
            
 | 
			
		||||
            # Test model on random input data.
 | 
			
		||||
            input_data = np.array(np.random.random_sample((1,256,128,3)), dtype=np.float32)
 | 
			
		||||
            self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
 | 
			
		||||
            
 | 
			
		||||
            self.interpreter.invoke()
 | 
			
		||||
 | 
			
		||||
            # The function `get_tensor()` returns a copy of the tensor data.
 | 
			
		||||
            output_data = self.interpreter.get_tensor(self.output_details[0]['index'])
 | 
			
		||||
        else:
 | 
			
		||||
            print('This model framework is not supported yet!')
 | 
			
		||||
            exit()
 | 
			
		||||
        
 | 
			
		||||
        
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def model_type(p='path/to/model.pt'):
 | 
			
		||||
        # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
 | 
			
		||||
        from trackers.reid_export import export_formats
 | 
			
		||||
        sf = list(export_formats().Suffix)  # export suffixes
 | 
			
		||||
        check_suffix(p, sf)  # checks
 | 
			
		||||
        types = [s in Path(p).name for s in sf]
 | 
			
		||||
        return types
 | 
			
		||||
 | 
			
		||||
    def _preprocess(self, im_batch):
 | 
			
		||||
 | 
			
		||||
        images = []
 | 
			
		||||
        for element in im_batch:
 | 
			
		||||
            image = self.to_pil(element)
 | 
			
		||||
            image = self.preprocess(image)
 | 
			
		||||
            images.append(image)
 | 
			
		||||
 | 
			
		||||
        images = torch.stack(images, dim=0)
 | 
			
		||||
        images = images.to(self.device)
 | 
			
		||||
 | 
			
		||||
        return images
 | 
			
		||||
    
 | 
			
		||||
    
 | 
			
		||||
    def forward(self, im_batch):
 | 
			
		||||
        
 | 
			
		||||
        # preprocess batch
 | 
			
		||||
        im_batch = self._preprocess(im_batch)
 | 
			
		||||
 | 
			
		||||
        # batch to half
 | 
			
		||||
        if self.fp16 and im_batch.dtype != torch.float16:
 | 
			
		||||
           im_batch = im_batch.half()
 | 
			
		||||
 | 
			
		||||
        # batch processing
 | 
			
		||||
        features = []
 | 
			
		||||
        if self.pt:
 | 
			
		||||
            features = self.model(im_batch)
 | 
			
		||||
        elif self.jit:  # TorchScript
 | 
			
		||||
            features = self.model(im_batch)
 | 
			
		||||
        elif self.onnx:  # ONNX Runtime
 | 
			
		||||
            im_batch = im_batch.cpu().numpy()  # torch to numpy
 | 
			
		||||
            features = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im_batch})[0]
 | 
			
		||||
        elif self.engine:  # TensorRT
 | 
			
		||||
            if True and im_batch.shape != self.bindings['images'].shape:
 | 
			
		||||
                i_in, i_out = (self.model_.get_binding_index(x) for x in ('images', 'output'))
 | 
			
		||||
                self.context.set_binding_shape(i_in, im_batch.shape)  # reshape if dynamic
 | 
			
		||||
                self.bindings['images'] = self.bindings['images']._replace(shape=im_batch.shape)
 | 
			
		||||
                self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out)))
 | 
			
		||||
            s = self.bindings['images'].shape
 | 
			
		||||
            assert im_batch.shape == s, f"input size {im_batch.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
 | 
			
		||||
            self.binding_addrs['images'] = int(im_batch.data_ptr())
 | 
			
		||||
            self.context.execute_v2(list(self.binding_addrs.values()))
 | 
			
		||||
            features = self.bindings['output'].data
 | 
			
		||||
        elif self.xml:  # OpenVINO
 | 
			
		||||
            im_batch = im_batch.cpu().numpy()  # FP32
 | 
			
		||||
            features = self.executable_network([im_batch])[self.output_layer]
 | 
			
		||||
        else:
 | 
			
		||||
            print('Framework not supported at the moment, we are working on it...')
 | 
			
		||||
            exit()
 | 
			
		||||
 | 
			
		||||
        if isinstance(features, (list, tuple)):
 | 
			
		||||
            return self.from_numpy(features[0]) if len(features) == 1 else [self.from_numpy(x) for x in features]
 | 
			
		||||
        else:
 | 
			
		||||
            return self.from_numpy(features)
 | 
			
		||||
 | 
			
		||||
    def from_numpy(self, x):
 | 
			
		||||
        return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
 | 
			
		||||
 | 
			
		||||
    def warmup(self, imgsz=[(256, 128, 3)]):
 | 
			
		||||
        # Warmup model by running inference once
 | 
			
		||||
        warmup_types = self.pt, self.jit, self.onnx, self.engine, self.tflite
 | 
			
		||||
        if any(warmup_types) and self.device.type != 'cpu':
 | 
			
		||||
            im = [np.empty(*imgsz).astype(np.uint8)]  # input
 | 
			
		||||
            for _ in range(2 if self.jit else 1):  #
 | 
			
		||||
                self.forward(im)  # warmup
 | 
			
		||||
							
								
								
									
										
											BIN
										
									
								
								feeder/trackers/strongsort/results/output_04.gif
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								feeder/trackers/strongsort/results/output_04.gif
									
										
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| 
		 After Width: | Height: | Size: 8.2 MiB  | 
							
								
								
									
										
											BIN
										
									
								
								feeder/trackers/strongsort/results/output_th025.gif
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								feeder/trackers/strongsort/results/output_th025.gif
									
										
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| 
		 After Width: | Height: | Size: 9.6 MiB  | 
							
								
								
									
										
											BIN
										
									
								
								feeder/trackers/strongsort/results/track_all_1280_025conf.gif
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								feeder/trackers/strongsort/results/track_all_1280_025conf.gif
									
										
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| 
		 After Width: | Height: | Size: 8.3 MiB  | 
										
											Binary file not shown.
										
									
								
							| 
		 After Width: | Height: | Size: 7.9 MiB  | 
										
											Binary file not shown.
										
									
								
							| 
		 After Width: | Height: | Size: 7.5 MiB  | 
							
								
								
									
										0
									
								
								feeder/trackers/strongsort/sort/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								feeder/trackers/strongsort/sort/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
								
								
									
										58
									
								
								feeder/trackers/strongsort/sort/detection.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								feeder/trackers/strongsort/sort/detection.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,58 @@
 | 
			
		|||
# vim: expandtab:ts=4:sw=4
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Detection(object):
 | 
			
		||||
    """
 | 
			
		||||
    This class represents a bounding box detection in a single image.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    tlwh : array_like
 | 
			
		||||
        Bounding box in format `(x, y, w, h)`.
 | 
			
		||||
    confidence : float
 | 
			
		||||
        Detector confidence score.
 | 
			
		||||
    feature : array_like
 | 
			
		||||
        A feature vector that describes the object contained in this image.
 | 
			
		||||
 | 
			
		||||
    Attributes
 | 
			
		||||
    ----------
 | 
			
		||||
    tlwh : ndarray
 | 
			
		||||
        Bounding box in format `(top left x, top left y, width, height)`.
 | 
			
		||||
    confidence : ndarray
 | 
			
		||||
        Detector confidence score.
 | 
			
		||||
    feature : ndarray | NoneType
 | 
			
		||||
        A feature vector that describes the object contained in this image.
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, tlwh, confidence, feature):
 | 
			
		||||
        self.tlwh = np.asarray(tlwh, dtype=np.float32)
 | 
			
		||||
        self.confidence = float(confidence)
 | 
			
		||||
        self.feature = np.asarray(feature.cpu(), dtype=np.float32)
 | 
			
		||||
 | 
			
		||||
    def to_tlbr(self):
 | 
			
		||||
        """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
 | 
			
		||||
        `(top left, bottom right)`.
 | 
			
		||||
        """
 | 
			
		||||
        ret = self.tlwh.copy()
 | 
			
		||||
        ret[2:] += ret[:2]
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    def to_xyah(self):
 | 
			
		||||
        """Convert bounding box to format `(center x, center y, aspect ratio,
 | 
			
		||||
        height)`, where the aspect ratio is `width / height`.
 | 
			
		||||
        """
 | 
			
		||||
        ret = self.tlwh.copy()
 | 
			
		||||
        ret[:2] += ret[2:] / 2
 | 
			
		||||
        ret[2] /= ret[3]
 | 
			
		||||
        return ret
 | 
			
		||||
    
 | 
			
		||||
def to_xyah_ext(bbox):
 | 
			
		||||
    """Convert bounding box to format `(center x, center y, aspect ratio,
 | 
			
		||||
    height)`, where the aspect ratio is `width / height`.
 | 
			
		||||
    """
 | 
			
		||||
    ret = bbox.copy()
 | 
			
		||||
    ret[:2] += ret[2:] / 2
 | 
			
		||||
    ret[2] /= ret[3]
 | 
			
		||||
    return ret
 | 
			
		||||
							
								
								
									
										82
									
								
								feeder/trackers/strongsort/sort/iou_matching.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										82
									
								
								feeder/trackers/strongsort/sort/iou_matching.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,82 @@
 | 
			
		|||
# vim: expandtab:ts=4:sw=4
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
import numpy as np
 | 
			
		||||
from . import linear_assignment
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def iou(bbox, candidates):
 | 
			
		||||
    """Computer intersection over union.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    bbox : ndarray
 | 
			
		||||
        A bounding box in format `(top left x, top left y, width, height)`.
 | 
			
		||||
    candidates : ndarray
 | 
			
		||||
        A matrix of candidate bounding boxes (one per row) in the same format
 | 
			
		||||
        as `bbox`.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    ndarray
 | 
			
		||||
        The intersection over union in [0, 1] between the `bbox` and each
 | 
			
		||||
        candidate. A higher score means a larger fraction of the `bbox` is
 | 
			
		||||
        occluded by the candidate.
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
 | 
			
		||||
    candidates_tl = candidates[:, :2]
 | 
			
		||||
    candidates_br = candidates[:, :2] + candidates[:, 2:]
 | 
			
		||||
 | 
			
		||||
    tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
 | 
			
		||||
               np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
 | 
			
		||||
    br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
 | 
			
		||||
               np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
 | 
			
		||||
    wh = np.maximum(0., br - tl)
 | 
			
		||||
 | 
			
		||||
    area_intersection = wh.prod(axis=1)
 | 
			
		||||
    area_bbox = bbox[2:].prod()
 | 
			
		||||
    area_candidates = candidates[:, 2:].prod(axis=1)
 | 
			
		||||
    return area_intersection / (area_bbox + area_candidates - area_intersection)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def iou_cost(tracks, detections, track_indices=None,
 | 
			
		||||
             detection_indices=None):
 | 
			
		||||
    """An intersection over union distance metric.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    tracks : List[deep_sort.track.Track]
 | 
			
		||||
        A list of tracks.
 | 
			
		||||
    detections : List[deep_sort.detection.Detection]
 | 
			
		||||
        A list of detections.
 | 
			
		||||
    track_indices : Optional[List[int]]
 | 
			
		||||
        A list of indices to tracks that should be matched. Defaults to
 | 
			
		||||
        all `tracks`.
 | 
			
		||||
    detection_indices : Optional[List[int]]
 | 
			
		||||
        A list of indices to detections that should be matched. Defaults
 | 
			
		||||
        to all `detections`.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    ndarray
 | 
			
		||||
        Returns a cost matrix of shape
 | 
			
		||||
        len(track_indices), len(detection_indices) where entry (i, j) is
 | 
			
		||||
        `1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    if track_indices is None:
 | 
			
		||||
        track_indices = np.arange(len(tracks))
 | 
			
		||||
    if detection_indices is None:
 | 
			
		||||
        detection_indices = np.arange(len(detections))
 | 
			
		||||
 | 
			
		||||
    cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
 | 
			
		||||
    for row, track_idx in enumerate(track_indices):
 | 
			
		||||
        if tracks[track_idx].time_since_update > 1:
 | 
			
		||||
            cost_matrix[row, :] = linear_assignment.INFTY_COST
 | 
			
		||||
            continue
 | 
			
		||||
 | 
			
		||||
        bbox = tracks[track_idx].to_tlwh()
 | 
			
		||||
        candidates = np.asarray(
 | 
			
		||||
            [detections[i].tlwh for i in detection_indices])
 | 
			
		||||
        cost_matrix[row, :] = 1. - iou(bbox, candidates)
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
							
								
								
									
										214
									
								
								feeder/trackers/strongsort/sort/kalman_filter.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										214
									
								
								feeder/trackers/strongsort/sort/kalman_filter.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,214 @@
 | 
			
		|||
# vim: expandtab:ts=4:sw=4
 | 
			
		||||
import numpy as np
 | 
			
		||||
import scipy.linalg
 | 
			
		||||
"""
 | 
			
		||||
Table for the 0.95 quantile of the chi-square distribution with N degrees of
 | 
			
		||||
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
 | 
			
		||||
function and used as Mahalanobis gating threshold.
 | 
			
		||||
"""
 | 
			
		||||
chi2inv95 = {
 | 
			
		||||
    1: 3.8415,
 | 
			
		||||
    2: 5.9915,
 | 
			
		||||
    3: 7.8147,
 | 
			
		||||
    4: 9.4877,
 | 
			
		||||
    5: 11.070,
 | 
			
		||||
    6: 12.592,
 | 
			
		||||
    7: 14.067,
 | 
			
		||||
    8: 15.507,
 | 
			
		||||
    9: 16.919}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class KalmanFilter(object):
 | 
			
		||||
    """
 | 
			
		||||
    A simple Kalman filter for tracking bounding boxes in image space.
 | 
			
		||||
    The 8-dimensional state space
 | 
			
		||||
        x, y, a, h, vx, vy, va, vh
 | 
			
		||||
    contains the bounding box center position (x, y), aspect ratio a, height h,
 | 
			
		||||
    and their respective velocities.
 | 
			
		||||
    Object motion follows a constant velocity model. The bounding box location
 | 
			
		||||
    (x, y, a, h) is taken as direct observation of the state space (linear
 | 
			
		||||
    observation model).
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        ndim, dt = 4, 1.
 | 
			
		||||
 | 
			
		||||
        # Create Kalman filter model matrices.
 | 
			
		||||
        self._motion_mat = np.eye(2 * ndim, 2 * ndim)
 | 
			
		||||
        for i in range(ndim):
 | 
			
		||||
            self._motion_mat[i, ndim + i] = dt
 | 
			
		||||
 | 
			
		||||
        self._update_mat = np.eye(ndim, 2 * ndim)
 | 
			
		||||
 | 
			
		||||
        # Motion and observation uncertainty are chosen relative to the current
 | 
			
		||||
        # state estimate. These weights control the amount of uncertainty in
 | 
			
		||||
        # the model. This is a bit hacky.
 | 
			
		||||
        self._std_weight_position = 1. / 20
 | 
			
		||||
        self._std_weight_velocity = 1. / 160
 | 
			
		||||
 | 
			
		||||
    def initiate(self, measurement):
 | 
			
		||||
        """Create track from unassociated measurement.
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        measurement : ndarray
 | 
			
		||||
            Bounding box coordinates (x, y, a, h) with center position (x, y),
 | 
			
		||||
            aspect ratio a, and height h.
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the mean vector (8 dimensional) and covariance matrix (8x8
 | 
			
		||||
            dimensional) of the new track. Unobserved velocities are initialized
 | 
			
		||||
            to 0 mean.
 | 
			
		||||
        """
 | 
			
		||||
        mean_pos = measurement
 | 
			
		||||
        mean_vel = np.zeros_like(mean_pos)
 | 
			
		||||
        mean = np.r_[mean_pos, mean_vel]
 | 
			
		||||
 | 
			
		||||
        std = [
 | 
			
		||||
            2 * self._std_weight_position * measurement[0],   # the center point x
 | 
			
		||||
            2 * self._std_weight_position * measurement[1],   # the center point y
 | 
			
		||||
            1 * measurement[2],                               # the ratio of width/height
 | 
			
		||||
            2 * self._std_weight_position * measurement[3],   # the height
 | 
			
		||||
            10 * self._std_weight_velocity * measurement[0],
 | 
			
		||||
            10 * self._std_weight_velocity * measurement[1],
 | 
			
		||||
            0.1 * measurement[2],
 | 
			
		||||
            10 * self._std_weight_velocity * measurement[3]]
 | 
			
		||||
        covariance = np.diag(np.square(std))
 | 
			
		||||
        return mean, covariance
 | 
			
		||||
 | 
			
		||||
    def predict(self, mean, covariance):
 | 
			
		||||
        """Run Kalman filter prediction step.
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            The 8 dimensional mean vector of the object state at the previous
 | 
			
		||||
            time step.
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            The 8x8 dimensional covariance matrix of the object state at the
 | 
			
		||||
            previous time step.
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the mean vector and covariance matrix of the predicted
 | 
			
		||||
            state. Unobserved velocities are initialized to 0 mean.
 | 
			
		||||
        """
 | 
			
		||||
        std_pos = [
 | 
			
		||||
            self._std_weight_position * mean[0],
 | 
			
		||||
            self._std_weight_position * mean[1],
 | 
			
		||||
            1 * mean[2],
 | 
			
		||||
            self._std_weight_position * mean[3]]
 | 
			
		||||
        std_vel = [
 | 
			
		||||
            self._std_weight_velocity * mean[0],
 | 
			
		||||
            self._std_weight_velocity * mean[1],
 | 
			
		||||
            0.1 * mean[2],
 | 
			
		||||
            self._std_weight_velocity * mean[3]]
 | 
			
		||||
        motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
 | 
			
		||||
 | 
			
		||||
        mean = np.dot(self._motion_mat, mean)
 | 
			
		||||
        covariance = np.linalg.multi_dot((
 | 
			
		||||
            self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
 | 
			
		||||
 | 
			
		||||
        return mean, covariance
 | 
			
		||||
 | 
			
		||||
    def project(self, mean, covariance, confidence=.0):
 | 
			
		||||
        """Project state distribution to measurement space.
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            The state's mean vector (8 dimensional array).
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            The state's covariance matrix (8x8 dimensional).
 | 
			
		||||
        confidence: (dyh) 检测框置信度
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the projected mean and covariance matrix of the given state
 | 
			
		||||
            estimate.
 | 
			
		||||
        """
 | 
			
		||||
        std = [
 | 
			
		||||
            self._std_weight_position * mean[3],
 | 
			
		||||
            self._std_weight_position * mean[3],
 | 
			
		||||
            1e-1,
 | 
			
		||||
            self._std_weight_position * mean[3]]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        std = [(1 - confidence) * x for x in std]
 | 
			
		||||
 | 
			
		||||
        innovation_cov = np.diag(np.square(std))
 | 
			
		||||
 | 
			
		||||
        mean = np.dot(self._update_mat, mean)
 | 
			
		||||
        covariance = np.linalg.multi_dot((
 | 
			
		||||
            self._update_mat, covariance, self._update_mat.T))
 | 
			
		||||
        return mean, covariance + innovation_cov
 | 
			
		||||
 | 
			
		||||
    def update(self, mean, covariance, measurement, confidence=.0):
 | 
			
		||||
        """Run Kalman filter correction step.
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            The predicted state's mean vector (8 dimensional).
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            The state's covariance matrix (8x8 dimensional).
 | 
			
		||||
        measurement : ndarray
 | 
			
		||||
            The 4 dimensional measurement vector (x, y, a, h), where (x, y)
 | 
			
		||||
            is the center position, a the aspect ratio, and h the height of the
 | 
			
		||||
            bounding box.
 | 
			
		||||
        confidence: (dyh)检测框置信度
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        (ndarray, ndarray)
 | 
			
		||||
            Returns the measurement-corrected state distribution.
 | 
			
		||||
        """
 | 
			
		||||
        projected_mean, projected_cov = self.project(mean, covariance, confidence)
 | 
			
		||||
 | 
			
		||||
        chol_factor, lower = scipy.linalg.cho_factor(
 | 
			
		||||
            projected_cov, lower=True, check_finite=False)
 | 
			
		||||
        kalman_gain = scipy.linalg.cho_solve(
 | 
			
		||||
            (chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
 | 
			
		||||
            check_finite=False).T
 | 
			
		||||
        innovation = measurement - projected_mean
 | 
			
		||||
 | 
			
		||||
        new_mean = mean + np.dot(innovation, kalman_gain.T)
 | 
			
		||||
        new_covariance = covariance - np.linalg.multi_dot((
 | 
			
		||||
            kalman_gain, projected_cov, kalman_gain.T))
 | 
			
		||||
        return new_mean, new_covariance
 | 
			
		||||
 | 
			
		||||
    def gating_distance(self, mean, covariance, measurements,
 | 
			
		||||
                        only_position=False):
 | 
			
		||||
        """Compute gating distance between state distribution and measurements.
 | 
			
		||||
        A suitable distance threshold can be obtained from `chi2inv95`. If
 | 
			
		||||
        `only_position` is False, the chi-square distribution has 4 degrees of
 | 
			
		||||
        freedom, otherwise 2.
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        mean : ndarray
 | 
			
		||||
            Mean vector over the state distribution (8 dimensional).
 | 
			
		||||
        covariance : ndarray
 | 
			
		||||
            Covariance of the state distribution (8x8 dimensional).
 | 
			
		||||
        measurements : ndarray
 | 
			
		||||
            An Nx4 dimensional matrix of N measurements, each in
 | 
			
		||||
            format (x, y, a, h) where (x, y) is the bounding box center
 | 
			
		||||
            position, a the aspect ratio, and h the height.
 | 
			
		||||
        only_position : Optional[bool]
 | 
			
		||||
            If True, distance computation is done with respect to the bounding
 | 
			
		||||
            box center position only.
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        ndarray
 | 
			
		||||
            Returns an array of length N, where the i-th element contains the
 | 
			
		||||
            squared Mahalanobis distance between (mean, covariance) and
 | 
			
		||||
            `measurements[i]`.
 | 
			
		||||
        """
 | 
			
		||||
        mean, covariance = self.project(mean, covariance)
 | 
			
		||||
 | 
			
		||||
        if only_position:
 | 
			
		||||
            mean, covariance = mean[:2], covariance[:2, :2]
 | 
			
		||||
            measurements = measurements[:, :2]
 | 
			
		||||
 | 
			
		||||
        cholesky_factor = np.linalg.cholesky(covariance)
 | 
			
		||||
        d = measurements - mean
 | 
			
		||||
        z = scipy.linalg.solve_triangular(
 | 
			
		||||
            cholesky_factor, d.T, lower=True, check_finite=False,
 | 
			
		||||
            overwrite_b=True)
 | 
			
		||||
        squared_maha = np.sum(z * z, axis=0)
 | 
			
		||||
        return squared_maha
 | 
			
		||||
							
								
								
									
										174
									
								
								feeder/trackers/strongsort/sort/linear_assignment.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										174
									
								
								feeder/trackers/strongsort/sort/linear_assignment.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,174 @@
 | 
			
		|||
# vim: expandtab:ts=4:sw=4
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
import numpy as np
 | 
			
		||||
from scipy.optimize import linear_sum_assignment
 | 
			
		||||
from . import kalman_filter
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
INFTY_COST = 1e+5
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def min_cost_matching(
 | 
			
		||||
        distance_metric, max_distance, tracks, detections, track_indices=None,
 | 
			
		||||
        detection_indices=None):
 | 
			
		||||
    """Solve linear assignment problem.
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
 | 
			
		||||
        The distance metric is given a list of tracks and detections as well as
 | 
			
		||||
        a list of N track indices and M detection indices. The metric should
 | 
			
		||||
        return the NxM dimensional cost matrix, where element (i, j) is the
 | 
			
		||||
        association cost between the i-th track in the given track indices and
 | 
			
		||||
        the j-th detection in the given detection_indices.
 | 
			
		||||
    max_distance : float
 | 
			
		||||
        Gating threshold. Associations with cost larger than this value are
 | 
			
		||||
        disregarded.
 | 
			
		||||
    tracks : List[track.Track]
 | 
			
		||||
        A list of predicted tracks at the current time step.
 | 
			
		||||
    detections : List[detection.Detection]
 | 
			
		||||
        A list of detections at the current time step.
 | 
			
		||||
    track_indices : List[int]
 | 
			
		||||
        List of track indices that maps rows in `cost_matrix` to tracks in
 | 
			
		||||
        `tracks` (see description above).
 | 
			
		||||
    detection_indices : List[int]
 | 
			
		||||
        List of detection indices that maps columns in `cost_matrix` to
 | 
			
		||||
        detections in `detections` (see description above).
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    (List[(int, int)], List[int], List[int])
 | 
			
		||||
        Returns a tuple with the following three entries:
 | 
			
		||||
        * A list of matched track and detection indices.
 | 
			
		||||
        * A list of unmatched track indices.
 | 
			
		||||
        * A list of unmatched detection indices.
 | 
			
		||||
    """
 | 
			
		||||
    if track_indices is None:
 | 
			
		||||
        track_indices = np.arange(len(tracks))
 | 
			
		||||
    if detection_indices is None:
 | 
			
		||||
        detection_indices = np.arange(len(detections))
 | 
			
		||||
 | 
			
		||||
    if len(detection_indices) == 0 or len(track_indices) == 0:
 | 
			
		||||
        return [], track_indices, detection_indices  # Nothing to match.
 | 
			
		||||
 | 
			
		||||
    cost_matrix = distance_metric(
 | 
			
		||||
        tracks, detections, track_indices, detection_indices)
 | 
			
		||||
    cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
 | 
			
		||||
    row_indices, col_indices = linear_sum_assignment(cost_matrix)
 | 
			
		||||
 | 
			
		||||
    matches, unmatched_tracks, unmatched_detections = [], [], []
 | 
			
		||||
    for col, detection_idx in enumerate(detection_indices):
 | 
			
		||||
        if col not in col_indices:
 | 
			
		||||
            unmatched_detections.append(detection_idx)
 | 
			
		||||
    for row, track_idx in enumerate(track_indices):
 | 
			
		||||
        if row not in row_indices:
 | 
			
		||||
            unmatched_tracks.append(track_idx)
 | 
			
		||||
    for row, col in zip(row_indices, col_indices):
 | 
			
		||||
        track_idx = track_indices[row]
 | 
			
		||||
        detection_idx = detection_indices[col]
 | 
			
		||||
        if cost_matrix[row, col] > max_distance:
 | 
			
		||||
            unmatched_tracks.append(track_idx)
 | 
			
		||||
            unmatched_detections.append(detection_idx)
 | 
			
		||||
        else:
 | 
			
		||||
            matches.append((track_idx, detection_idx))
 | 
			
		||||
    return matches, unmatched_tracks, unmatched_detections
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def matching_cascade(
 | 
			
		||||
        distance_metric, max_distance, cascade_depth, tracks, detections,
 | 
			
		||||
        track_indices=None, detection_indices=None):
 | 
			
		||||
    """Run matching cascade.
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
 | 
			
		||||
        The distance metric is given a list of tracks and detections as well as
 | 
			
		||||
        a list of N track indices and M detection indices. The metric should
 | 
			
		||||
        return the NxM dimensional cost matrix, where element (i, j) is the
 | 
			
		||||
        association cost between the i-th track in the given track indices and
 | 
			
		||||
        the j-th detection in the given detection indices.
 | 
			
		||||
    max_distance : float
 | 
			
		||||
        Gating threshold. Associations with cost larger than this value are
 | 
			
		||||
        disregarded.
 | 
			
		||||
    cascade_depth: int
 | 
			
		||||
        The cascade depth, should be se to the maximum track age.
 | 
			
		||||
    tracks : List[track.Track]
 | 
			
		||||
        A list of predicted tracks at the current time step.
 | 
			
		||||
    detections : List[detection.Detection]
 | 
			
		||||
        A list of detections at the current time step.
 | 
			
		||||
    track_indices : Optional[List[int]]
 | 
			
		||||
        List of track indices that maps rows in `cost_matrix` to tracks in
 | 
			
		||||
        `tracks` (see description above). Defaults to all tracks.
 | 
			
		||||
    detection_indices : Optional[List[int]]
 | 
			
		||||
        List of detection indices that maps columns in `cost_matrix` to
 | 
			
		||||
        detections in `detections` (see description above). Defaults to all
 | 
			
		||||
        detections.
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    (List[(int, int)], List[int], List[int])
 | 
			
		||||
        Returns a tuple with the following three entries:
 | 
			
		||||
        * A list of matched track and detection indices.
 | 
			
		||||
        * A list of unmatched track indices.
 | 
			
		||||
        * A list of unmatched detection indices.
 | 
			
		||||
    """
 | 
			
		||||
    if track_indices is None:
 | 
			
		||||
        track_indices = list(range(len(tracks)))
 | 
			
		||||
    if detection_indices is None:
 | 
			
		||||
        detection_indices = list(range(len(detections)))
 | 
			
		||||
 | 
			
		||||
    unmatched_detections = detection_indices
 | 
			
		||||
    matches = []
 | 
			
		||||
    track_indices_l = [
 | 
			
		||||
        k for k in track_indices
 | 
			
		||||
        # if tracks[k].time_since_update == 1 + level
 | 
			
		||||
    ]
 | 
			
		||||
    matches_l, _, unmatched_detections = \
 | 
			
		||||
        min_cost_matching(
 | 
			
		||||
            distance_metric, max_distance, tracks, detections,
 | 
			
		||||
            track_indices_l, unmatched_detections)
 | 
			
		||||
    matches += matches_l
 | 
			
		||||
    unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
 | 
			
		||||
    return matches, unmatched_tracks, unmatched_detections
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def gate_cost_matrix(
 | 
			
		||||
        cost_matrix, tracks, detections, track_indices, detection_indices, mc_lambda,
 | 
			
		||||
        gated_cost=INFTY_COST, only_position=False):
 | 
			
		||||
    """Invalidate infeasible entries in cost matrix based on the state
 | 
			
		||||
    distributions obtained by Kalman filtering.
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    kf : The Kalman filter.
 | 
			
		||||
    cost_matrix : ndarray
 | 
			
		||||
        The NxM dimensional cost matrix, where N is the number of track indices
 | 
			
		||||
        and M is the number of detection indices, such that entry (i, j) is the
 | 
			
		||||
        association cost between `tracks[track_indices[i]]` and
 | 
			
		||||
        `detections[detection_indices[j]]`.
 | 
			
		||||
    tracks : List[track.Track]
 | 
			
		||||
        A list of predicted tracks at the current time step.
 | 
			
		||||
    detections : List[detection.Detection]
 | 
			
		||||
        A list of detections at the current time step.
 | 
			
		||||
    track_indices : List[int]
 | 
			
		||||
        List of track indices that maps rows in `cost_matrix` to tracks in
 | 
			
		||||
        `tracks` (see description above).
 | 
			
		||||
    detection_indices : List[int]
 | 
			
		||||
        List of detection indices that maps columns in `cost_matrix` to
 | 
			
		||||
        detections in `detections` (see description above).
 | 
			
		||||
    gated_cost : Optional[float]
 | 
			
		||||
        Entries in the cost matrix corresponding to infeasible associations are
 | 
			
		||||
        set this value. Defaults to a very large value.
 | 
			
		||||
    only_position : Optional[bool]
 | 
			
		||||
        If True, only the x, y position of the state distribution is considered
 | 
			
		||||
        during gating. Defaults to False.
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    ndarray
 | 
			
		||||
        Returns the modified cost matrix.
 | 
			
		||||
    """
 | 
			
		||||
    gating_dim = 2 if only_position else 4
 | 
			
		||||
    gating_threshold = kalman_filter.chi2inv95[gating_dim]
 | 
			
		||||
    measurements = np.asarray(
 | 
			
		||||
        [detections[i].to_xyah() for i in detection_indices])
 | 
			
		||||
    for row, track_idx in enumerate(track_indices):
 | 
			
		||||
        track = tracks[track_idx]
 | 
			
		||||
        gating_distance = track.kf.gating_distance(track.mean, track.covariance, measurements, only_position)
 | 
			
		||||
        cost_matrix[row, gating_distance > gating_threshold] = gated_cost
 | 
			
		||||
        cost_matrix[row] = mc_lambda * cost_matrix[row] + (1 - mc_lambda) *  gating_distance
 | 
			
		||||
    return cost_matrix
 | 
			
		||||
							
								
								
									
										162
									
								
								feeder/trackers/strongsort/sort/nn_matching.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										162
									
								
								feeder/trackers/strongsort/sort/nn_matching.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,162 @@
 | 
			
		|||
# vim: expandtab:ts=4:sw=4
 | 
			
		||||
import numpy as np
 | 
			
		||||
import sys
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _pdist(a, b):
 | 
			
		||||
    """Compute pair-wise squared distance between points in `a` and `b`.
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    a : array_like
 | 
			
		||||
        An NxM matrix of N samples of dimensionality M.
 | 
			
		||||
    b : array_like
 | 
			
		||||
        An LxM matrix of L samples of dimensionality M.
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    ndarray
 | 
			
		||||
        Returns a matrix of size len(a), len(b) such that eleement (i, j)
 | 
			
		||||
        contains the squared distance between `a[i]` and `b[j]`.
 | 
			
		||||
    """
 | 
			
		||||
    a, b = np.asarray(a), np.asarray(b)
 | 
			
		||||
    if len(a) == 0 or len(b) == 0:
 | 
			
		||||
        return np.zeros((len(a), len(b)))
 | 
			
		||||
    a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)
 | 
			
		||||
    r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]
 | 
			
		||||
    r2 = np.clip(r2, 0., float(np.inf))
 | 
			
		||||
    return r2
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _cosine_distance(a, b, data_is_normalized=False):
 | 
			
		||||
    """Compute pair-wise cosine distance between points in `a` and `b`.
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    a : array_like
 | 
			
		||||
        An NxM matrix of N samples of dimensionality M.
 | 
			
		||||
    b : array_like
 | 
			
		||||
        An LxM matrix of L samples of dimensionality M.
 | 
			
		||||
    data_is_normalized : Optional[bool]
 | 
			
		||||
        If True, assumes rows in a and b are unit length vectors.
 | 
			
		||||
        Otherwise, a and b are explicitly normalized to lenght 1.
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    ndarray
 | 
			
		||||
        Returns a matrix of size len(a), len(b) such that eleement (i, j)
 | 
			
		||||
        contains the squared distance between `a[i]` and `b[j]`.
 | 
			
		||||
    """
 | 
			
		||||
    if not data_is_normalized:
 | 
			
		||||
        a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
 | 
			
		||||
        b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
 | 
			
		||||
    return 1. - np.dot(a, b.T)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _nn_euclidean_distance(x, y):
 | 
			
		||||
    """ Helper function for nearest neighbor distance metric (Euclidean).
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    x : ndarray
 | 
			
		||||
        A matrix of N row-vectors (sample points).
 | 
			
		||||
    y : ndarray
 | 
			
		||||
        A matrix of M row-vectors (query points).
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    ndarray
 | 
			
		||||
        A vector of length M that contains for each entry in `y` the
 | 
			
		||||
        smallest Euclidean distance to a sample in `x`.
 | 
			
		||||
    """
 | 
			
		||||
    # x_ = torch.from_numpy(np.asarray(x) / np.linalg.norm(x, axis=1, keepdims=True))
 | 
			
		||||
    # y_ = torch.from_numpy(np.asarray(y) / np.linalg.norm(y, axis=1, keepdims=True))
 | 
			
		||||
    distances = distances = _pdist(x, y)
 | 
			
		||||
    return np.maximum(0.0, torch.min(distances, axis=0)[0].numpy())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _nn_cosine_distance(x, y):
 | 
			
		||||
    """ Helper function for nearest neighbor distance metric (cosine).
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    x : ndarray
 | 
			
		||||
        A matrix of N row-vectors (sample points).
 | 
			
		||||
    y : ndarray
 | 
			
		||||
        A matrix of M row-vectors (query points).
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    ndarray
 | 
			
		||||
        A vector of length M that contains for each entry in `y` the
 | 
			
		||||
        smallest cosine distance to a sample in `x`.
 | 
			
		||||
    """
 | 
			
		||||
    x_ = torch.from_numpy(np.asarray(x))
 | 
			
		||||
    y_ = torch.from_numpy(np.asarray(y))
 | 
			
		||||
    distances = _cosine_distance(x_, y_)
 | 
			
		||||
    distances = distances
 | 
			
		||||
    return distances.min(axis=0)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class NearestNeighborDistanceMetric(object):
 | 
			
		||||
    """
 | 
			
		||||
    A nearest neighbor distance metric that, for each target, returns
 | 
			
		||||
    the closest distance to any sample that has been observed so far.
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    metric : str
 | 
			
		||||
        Either "euclidean" or "cosine".
 | 
			
		||||
    matching_threshold: float
 | 
			
		||||
        The matching threshold. Samples with larger distance are considered an
 | 
			
		||||
        invalid match.
 | 
			
		||||
    budget : Optional[int]
 | 
			
		||||
        If not None, fix samples per class to at most this number. Removes
 | 
			
		||||
        the oldest samples when the budget is reached.
 | 
			
		||||
    Attributes
 | 
			
		||||
    ----------
 | 
			
		||||
    samples : Dict[int -> List[ndarray]]
 | 
			
		||||
        A dictionary that maps from target identities to the list of samples
 | 
			
		||||
        that have been observed so far.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, metric, matching_threshold, budget=None):
 | 
			
		||||
        if metric == "euclidean":
 | 
			
		||||
            self._metric = _nn_euclidean_distance
 | 
			
		||||
        elif metric == "cosine":
 | 
			
		||||
            self._metric = _nn_cosine_distance
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "Invalid metric; must be either 'euclidean' or 'cosine'")
 | 
			
		||||
        self.matching_threshold = matching_threshold
 | 
			
		||||
        self.budget = budget
 | 
			
		||||
        self.samples = {}
 | 
			
		||||
 | 
			
		||||
    def partial_fit(self, features, targets, active_targets):
 | 
			
		||||
        """Update the distance metric with new data.
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        features : ndarray
 | 
			
		||||
            An NxM matrix of N features of dimensionality M.
 | 
			
		||||
        targets : ndarray
 | 
			
		||||
            An integer array of associated target identities.
 | 
			
		||||
        active_targets : List[int]
 | 
			
		||||
            A list of targets that are currently present in the scene.
 | 
			
		||||
        """
 | 
			
		||||
        for feature, target in zip(features, targets):
 | 
			
		||||
            self.samples.setdefault(target, []).append(feature)
 | 
			
		||||
            if self.budget is not None:
 | 
			
		||||
                self.samples[target] = self.samples[target][-self.budget:]
 | 
			
		||||
        self.samples = {k: self.samples[k] for k in active_targets}
 | 
			
		||||
 | 
			
		||||
    def distance(self, features, targets):
 | 
			
		||||
        """Compute distance between features and targets.
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        features : ndarray
 | 
			
		||||
            An NxM matrix of N features of dimensionality M.
 | 
			
		||||
        targets : List[int]
 | 
			
		||||
            A list of targets to match the given `features` against.
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        ndarray
 | 
			
		||||
            Returns a cost matrix of shape len(targets), len(features), where
 | 
			
		||||
            element (i, j) contains the closest squared distance between
 | 
			
		||||
            `targets[i]` and `features[j]`.
 | 
			
		||||
        """
 | 
			
		||||
        cost_matrix = np.zeros((len(targets), len(features)))
 | 
			
		||||
        for i, target in enumerate(targets):
 | 
			
		||||
            cost_matrix[i, :] = self._metric(self.samples[target], features)
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
							
								
								
									
										73
									
								
								feeder/trackers/strongsort/sort/preprocessing.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										73
									
								
								feeder/trackers/strongsort/sort/preprocessing.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,73 @@
 | 
			
		|||
# vim: expandtab:ts=4:sw=4
 | 
			
		||||
import numpy as np
 | 
			
		||||
import cv2
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def non_max_suppression(boxes, max_bbox_overlap, scores=None):
 | 
			
		||||
    """Suppress overlapping detections.
 | 
			
		||||
 | 
			
		||||
    Original code from [1]_ has been adapted to include confidence score.
 | 
			
		||||
 | 
			
		||||
    .. [1] http://www.pyimagesearch.com/2015/02/16/
 | 
			
		||||
           faster-non-maximum-suppression-python/
 | 
			
		||||
 | 
			
		||||
    Examples
 | 
			
		||||
    --------
 | 
			
		||||
 | 
			
		||||
        >>> boxes = [d.roi for d in detections]
 | 
			
		||||
        >>> scores = [d.confidence for d in detections]
 | 
			
		||||
        >>> indices = non_max_suppression(boxes, max_bbox_overlap, scores)
 | 
			
		||||
        >>> detections = [detections[i] for i in indices]
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    boxes : ndarray
 | 
			
		||||
        Array of ROIs (x, y, width, height).
 | 
			
		||||
    max_bbox_overlap : float
 | 
			
		||||
        ROIs that overlap more than this values are suppressed.
 | 
			
		||||
    scores : Optional[array_like]
 | 
			
		||||
        Detector confidence score.
 | 
			
		||||
 | 
			
		||||
    Returns
 | 
			
		||||
    -------
 | 
			
		||||
    List[int]
 | 
			
		||||
        Returns indices of detections that have survived non-maxima suppression.
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    if len(boxes) == 0:
 | 
			
		||||
        return []
 | 
			
		||||
 | 
			
		||||
    boxes = boxes.astype(np.float)
 | 
			
		||||
    pick = []
 | 
			
		||||
 | 
			
		||||
    x1 = boxes[:, 0]
 | 
			
		||||
    y1 = boxes[:, 1]
 | 
			
		||||
    x2 = boxes[:, 2] + boxes[:, 0]
 | 
			
		||||
    y2 = boxes[:, 3] + boxes[:, 1]
 | 
			
		||||
 | 
			
		||||
    area = (x2 - x1 + 1) * (y2 - y1 + 1)
 | 
			
		||||
    if scores is not None:
 | 
			
		||||
        idxs = np.argsort(scores)
 | 
			
		||||
    else:
 | 
			
		||||
        idxs = np.argsort(y2)
 | 
			
		||||
 | 
			
		||||
    while len(idxs) > 0:
 | 
			
		||||
        last = len(idxs) - 1
 | 
			
		||||
        i = idxs[last]
 | 
			
		||||
        pick.append(i)
 | 
			
		||||
 | 
			
		||||
        xx1 = np.maximum(x1[i], x1[idxs[:last]])
 | 
			
		||||
        yy1 = np.maximum(y1[i], y1[idxs[:last]])
 | 
			
		||||
        xx2 = np.minimum(x2[i], x2[idxs[:last]])
 | 
			
		||||
        yy2 = np.minimum(y2[i], y2[idxs[:last]])
 | 
			
		||||
 | 
			
		||||
        w = np.maximum(0, xx2 - xx1 + 1)
 | 
			
		||||
        h = np.maximum(0, yy2 - yy1 + 1)
 | 
			
		||||
 | 
			
		||||
        overlap = (w * h) / area[idxs[:last]]
 | 
			
		||||
 | 
			
		||||
        idxs = np.delete(
 | 
			
		||||
            idxs, np.concatenate(
 | 
			
		||||
                ([last], np.where(overlap > max_bbox_overlap)[0])))
 | 
			
		||||
 | 
			
		||||
    return pick
 | 
			
		||||
							
								
								
									
										317
									
								
								feeder/trackers/strongsort/sort/track.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										317
									
								
								feeder/trackers/strongsort/sort/track.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,317 @@
 | 
			
		|||
# vim: expandtab:ts=4:sw=4
 | 
			
		||||
import cv2
 | 
			
		||||
import numpy as np
 | 
			
		||||
from trackers.strongsort.sort.kalman_filter import KalmanFilter
 | 
			
		||||
from collections import deque
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TrackState:
 | 
			
		||||
    """
 | 
			
		||||
    Enumeration type for the single target track state. Newly created tracks are
 | 
			
		||||
    classified as `tentative` until enough evidence has been collected. Then,
 | 
			
		||||
    the track state is changed to `confirmed`. Tracks that are no longer alive
 | 
			
		||||
    are classified as `deleted` to mark them for removal from the set of active
 | 
			
		||||
    tracks.
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    Tentative = 1
 | 
			
		||||
    Confirmed = 2
 | 
			
		||||
    Deleted = 3
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Track:
 | 
			
		||||
    """
 | 
			
		||||
    A single target track with state space `(x, y, a, h)` and associated
 | 
			
		||||
    velocities, where `(x, y)` is the center of the bounding box, `a` is the
 | 
			
		||||
    aspect ratio and `h` is the height.
 | 
			
		||||
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    mean : ndarray
 | 
			
		||||
        Mean vector of the initial state distribution.
 | 
			
		||||
    covariance : ndarray
 | 
			
		||||
        Covariance matrix of the initial state distribution.
 | 
			
		||||
    track_id : int
 | 
			
		||||
        A unique track identifier.
 | 
			
		||||
    n_init : int
 | 
			
		||||
        Number of consecutive detections before the track is confirmed. The
 | 
			
		||||
        track state is set to `Deleted` if a miss occurs within the first
 | 
			
		||||
        `n_init` frames.
 | 
			
		||||
    max_age : int
 | 
			
		||||
        The maximum number of consecutive misses before the track state is
 | 
			
		||||
        set to `Deleted`.
 | 
			
		||||
    feature : Optional[ndarray]
 | 
			
		||||
        Feature vector of the detection this track originates from. If not None,
 | 
			
		||||
        this feature is added to the `features` cache.
 | 
			
		||||
 | 
			
		||||
    Attributes
 | 
			
		||||
    ----------
 | 
			
		||||
    mean : ndarray
 | 
			
		||||
        Mean vector of the initial state distribution.
 | 
			
		||||
    covariance : ndarray
 | 
			
		||||
        Covariance matrix of the initial state distribution.
 | 
			
		||||
    track_id : int
 | 
			
		||||
        A unique track identifier.
 | 
			
		||||
    hits : int
 | 
			
		||||
        Total number of measurement updates.
 | 
			
		||||
    age : int
 | 
			
		||||
        Total number of frames since first occurance.
 | 
			
		||||
    time_since_update : int
 | 
			
		||||
        Total number of frames since last measurement update.
 | 
			
		||||
    state : TrackState
 | 
			
		||||
        The current track state.
 | 
			
		||||
    features : List[ndarray]
 | 
			
		||||
        A cache of features. On each measurement update, the associated feature
 | 
			
		||||
        vector is added to this list.
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, detection, track_id, class_id, conf, n_init, max_age, ema_alpha,
 | 
			
		||||
                 feature=None):
 | 
			
		||||
        self.track_id = track_id
 | 
			
		||||
        self.class_id = int(class_id)
 | 
			
		||||
        self.hits = 1
 | 
			
		||||
        self.age = 1
 | 
			
		||||
        self.time_since_update = 0
 | 
			
		||||
        self.max_num_updates_wo_assignment = 7
 | 
			
		||||
        self.updates_wo_assignment = 0
 | 
			
		||||
        self.ema_alpha = ema_alpha
 | 
			
		||||
 | 
			
		||||
        self.state = TrackState.Tentative
 | 
			
		||||
        self.features = []
 | 
			
		||||
        if feature is not None:
 | 
			
		||||
            feature /= np.linalg.norm(feature)
 | 
			
		||||
            self.features.append(feature)
 | 
			
		||||
 | 
			
		||||
        self.conf = conf
 | 
			
		||||
        self._n_init = n_init
 | 
			
		||||
        self._max_age = max_age
 | 
			
		||||
 | 
			
		||||
        self.kf = KalmanFilter()
 | 
			
		||||
        self.mean, self.covariance = self.kf.initiate(detection)
 | 
			
		||||
        
 | 
			
		||||
        # Initializing trajectory queue
 | 
			
		||||
        self.q = deque(maxlen=25)
 | 
			
		||||
 | 
			
		||||
    def to_tlwh(self):
 | 
			
		||||
        """Get current position in bounding box format `(top left x, top left y,
 | 
			
		||||
        width, height)`.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        ndarray
 | 
			
		||||
            The bounding box.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        ret = self.mean[:4].copy()
 | 
			
		||||
        ret[2] *= ret[3]
 | 
			
		||||
        ret[:2] -= ret[2:] / 2
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    def to_tlbr(self):
 | 
			
		||||
        """Get kf estimated current position in bounding box format `(min x, miny, max x,
 | 
			
		||||
        max y)`.
 | 
			
		||||
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        ndarray
 | 
			
		||||
            The predicted kf bounding box.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        ret = self.to_tlwh()
 | 
			
		||||
        ret[2:] = ret[:2] + ret[2:]
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def ECC(self, src, dst, warp_mode = cv2.MOTION_EUCLIDEAN, eps = 1e-5,
 | 
			
		||||
        max_iter = 100, scale = 0.1, align = False):
 | 
			
		||||
        """Compute the warp matrix from src to dst.
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        src : ndarray 
 | 
			
		||||
            An NxM matrix of source img(BGR or Gray), it must be the same format as dst.
 | 
			
		||||
        dst : ndarray
 | 
			
		||||
            An NxM matrix of target img(BGR or Gray).
 | 
			
		||||
        warp_mode: flags of opencv
 | 
			
		||||
            translation: cv2.MOTION_TRANSLATION
 | 
			
		||||
            rotated and shifted: cv2.MOTION_EUCLIDEAN
 | 
			
		||||
            affine(shift,rotated,shear): cv2.MOTION_AFFINE
 | 
			
		||||
            homography(3d): cv2.MOTION_HOMOGRAPHY
 | 
			
		||||
        eps: float
 | 
			
		||||
            the threshold of the increment in the correlation coefficient between two iterations
 | 
			
		||||
        max_iter: int
 | 
			
		||||
            the number of iterations.
 | 
			
		||||
        scale: float or [int, int]
 | 
			
		||||
            scale_ratio: float
 | 
			
		||||
            scale_size: [W, H]
 | 
			
		||||
        align: bool
 | 
			
		||||
            whether to warp affine or perspective transforms to the source image
 | 
			
		||||
        Returns
 | 
			
		||||
        -------
 | 
			
		||||
        warp matrix : ndarray
 | 
			
		||||
            Returns the warp matrix from src to dst.
 | 
			
		||||
            if motion models is homography, the warp matrix will be 3x3, otherwise 2x3
 | 
			
		||||
        src_aligned: ndarray
 | 
			
		||||
            aligned source image of gray
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        # BGR2GRAY
 | 
			
		||||
        if src.ndim == 3:
 | 
			
		||||
            # Convert images to grayscale
 | 
			
		||||
            src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
 | 
			
		||||
            dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
 | 
			
		||||
 | 
			
		||||
        # make the imgs smaller to speed up
 | 
			
		||||
        if scale is not None:
 | 
			
		||||
            if isinstance(scale, float) or isinstance(scale, int):
 | 
			
		||||
                if scale != 1:
 | 
			
		||||
                    src_r = cv2.resize(src, (0, 0), fx = scale, fy = scale,interpolation =  cv2.INTER_LINEAR)
 | 
			
		||||
                    dst_r = cv2.resize(dst, (0, 0), fx = scale, fy = scale,interpolation =  cv2.INTER_LINEAR)
 | 
			
		||||
                    scale = [scale, scale]
 | 
			
		||||
                else:
 | 
			
		||||
                    src_r, dst_r = src, dst
 | 
			
		||||
                    scale = None
 | 
			
		||||
            else:
 | 
			
		||||
                if scale[0] != src.shape[1] and scale[1] != src.shape[0]:
 | 
			
		||||
                    src_r = cv2.resize(src, (scale[0], scale[1]), interpolation = cv2.INTER_LINEAR)
 | 
			
		||||
                    dst_r = cv2.resize(dst, (scale[0], scale[1]), interpolation=cv2.INTER_LINEAR)
 | 
			
		||||
                    scale = [scale[0] / src.shape[1], scale[1] / src.shape[0]]
 | 
			
		||||
                else:
 | 
			
		||||
                    src_r, dst_r = src, dst
 | 
			
		||||
                    scale = None
 | 
			
		||||
        else:
 | 
			
		||||
            src_r, dst_r = src, dst
 | 
			
		||||
 | 
			
		||||
        # Define 2x3 or 3x3 matrices and initialize the matrix to identity
 | 
			
		||||
        if warp_mode == cv2.MOTION_HOMOGRAPHY :
 | 
			
		||||
            warp_matrix = np.eye(3, 3, dtype=np.float32)
 | 
			
		||||
        else :
 | 
			
		||||
            warp_matrix = np.eye(2, 3, dtype=np.float32)
 | 
			
		||||
 | 
			
		||||
        # Define termination criteria
 | 
			
		||||
        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, max_iter, eps)
 | 
			
		||||
 | 
			
		||||
        # Run the ECC algorithm. The results are stored in warp_matrix.
 | 
			
		||||
        try:
 | 
			
		||||
            (cc, warp_matrix) = cv2.findTransformECC (src_r, dst_r, warp_matrix, warp_mode, criteria, None, 1)
 | 
			
		||||
        except cv2.error as e:
 | 
			
		||||
            print('ecc transform failed')
 | 
			
		||||
            return None, None
 | 
			
		||||
        
 | 
			
		||||
        if scale is not None:
 | 
			
		||||
            warp_matrix[0, 2] = warp_matrix[0, 2] / scale[0]
 | 
			
		||||
            warp_matrix[1, 2] = warp_matrix[1, 2] / scale[1]
 | 
			
		||||
 | 
			
		||||
        if align:
 | 
			
		||||
            sz = src.shape
 | 
			
		||||
            if warp_mode == cv2.MOTION_HOMOGRAPHY:
 | 
			
		||||
                # Use warpPerspective for Homography
 | 
			
		||||
                src_aligned = cv2.warpPerspective(src, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR)
 | 
			
		||||
            else :
 | 
			
		||||
                # Use warpAffine for Translation, Euclidean and Affine
 | 
			
		||||
                src_aligned = cv2.warpAffine(src, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR)
 | 
			
		||||
            return warp_matrix, src_aligned
 | 
			
		||||
        else:
 | 
			
		||||
            return warp_matrix, None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def get_matrix(self, matrix):
 | 
			
		||||
        eye = np.eye(3)
 | 
			
		||||
        dist = np.linalg.norm(eye - matrix)
 | 
			
		||||
        if dist < 100:
 | 
			
		||||
            return matrix
 | 
			
		||||
        else:
 | 
			
		||||
            return eye
 | 
			
		||||
 | 
			
		||||
    def camera_update(self, previous_frame, next_frame):
 | 
			
		||||
        warp_matrix, src_aligned = self.ECC(previous_frame, next_frame)
 | 
			
		||||
        if warp_matrix is None and src_aligned is None:
 | 
			
		||||
            return
 | 
			
		||||
        [a,b] = warp_matrix
 | 
			
		||||
        warp_matrix=np.array([a,b,[0,0,1]])
 | 
			
		||||
        warp_matrix = warp_matrix.tolist()
 | 
			
		||||
        matrix = self.get_matrix(warp_matrix)
 | 
			
		||||
 | 
			
		||||
        x1, y1, x2, y2 = self.to_tlbr()
 | 
			
		||||
        x1_, y1_, _ = matrix @ np.array([x1, y1, 1]).T
 | 
			
		||||
        x2_, y2_, _ = matrix @ np.array([x2, y2, 1]).T
 | 
			
		||||
        w, h = x2_ - x1_, y2_ - y1_
 | 
			
		||||
        cx, cy = x1_ + w / 2, y1_ + h / 2
 | 
			
		||||
        self.mean[:4] = [cx, cy, w / h, h]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def increment_age(self):
 | 
			
		||||
        self.age += 1
 | 
			
		||||
        self.time_since_update += 1
 | 
			
		||||
 | 
			
		||||
    def predict(self, kf):
 | 
			
		||||
        """Propagate the state distribution to the current time step using a
 | 
			
		||||
        Kalman filter prediction step.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        kf : kalman_filter.KalmanFilter
 | 
			
		||||
            The Kalman filter.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        self.mean, self.covariance = self.kf.predict(self.mean, self.covariance)
 | 
			
		||||
        self.age += 1
 | 
			
		||||
        self.time_since_update += 1
 | 
			
		||||
        
 | 
			
		||||
    def update_kf(self, bbox, confidence=0.5):
 | 
			
		||||
        self.updates_wo_assignment = self.updates_wo_assignment + 1
 | 
			
		||||
        self.mean, self.covariance = self.kf.update(self.mean, self.covariance, bbox, confidence)
 | 
			
		||||
        tlbr = self.to_tlbr()
 | 
			
		||||
        x_c = int((tlbr[0] + tlbr[2]) / 2)
 | 
			
		||||
        y_c = int((tlbr[1] + tlbr[3]) / 2)
 | 
			
		||||
        self.q.append(('predupdate', (x_c, y_c)))
 | 
			
		||||
 | 
			
		||||
    def update(self, detection, class_id, conf):
 | 
			
		||||
        """Perform Kalman filter measurement update step and update the feature
 | 
			
		||||
        cache.
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        detection : Detection
 | 
			
		||||
            The associated detection.
 | 
			
		||||
        """
 | 
			
		||||
        self.conf = conf
 | 
			
		||||
        self.class_id = class_id.int()
 | 
			
		||||
        self.mean, self.covariance = self.kf.update(self.mean, self.covariance, detection.to_xyah(), detection.confidence)
 | 
			
		||||
 | 
			
		||||
        feature = detection.feature / np.linalg.norm(detection.feature)
 | 
			
		||||
 | 
			
		||||
        smooth_feat = self.ema_alpha * self.features[-1] + (1 - self.ema_alpha) * feature
 | 
			
		||||
        smooth_feat /= np.linalg.norm(smooth_feat)
 | 
			
		||||
        self.features = [smooth_feat]
 | 
			
		||||
 | 
			
		||||
        self.hits += 1
 | 
			
		||||
        self.time_since_update = 0
 | 
			
		||||
        if self.state == TrackState.Tentative and self.hits >= self._n_init:
 | 
			
		||||
            self.state = TrackState.Confirmed
 | 
			
		||||
        
 | 
			
		||||
        tlbr = self.to_tlbr()
 | 
			
		||||
        x_c = int((tlbr[0] + tlbr[2]) / 2)
 | 
			
		||||
        y_c = int((tlbr[1] + tlbr[3]) / 2)
 | 
			
		||||
        self.q.append(('observationupdate', (x_c, y_c)))
 | 
			
		||||
 | 
			
		||||
    def mark_missed(self):
 | 
			
		||||
        """Mark this track as missed (no association at the current time step).
 | 
			
		||||
        """
 | 
			
		||||
        if self.state == TrackState.Tentative:
 | 
			
		||||
            self.state = TrackState.Deleted
 | 
			
		||||
        elif self.time_since_update > self._max_age:
 | 
			
		||||
            self.state = TrackState.Deleted
 | 
			
		||||
 | 
			
		||||
    def is_tentative(self):
 | 
			
		||||
        """Returns True if this track is tentative (unconfirmed).
 | 
			
		||||
        """
 | 
			
		||||
        return self.state == TrackState.Tentative
 | 
			
		||||
 | 
			
		||||
    def is_confirmed(self):
 | 
			
		||||
        """Returns True if this track is confirmed."""
 | 
			
		||||
        return self.state == TrackState.Confirmed
 | 
			
		||||
 | 
			
		||||
    def is_deleted(self):
 | 
			
		||||
        """Returns True if this track is dead and should be deleted."""
 | 
			
		||||
        return self.state == TrackState.Deleted
 | 
			
		||||
							
								
								
									
										192
									
								
								feeder/trackers/strongsort/sort/tracker.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										192
									
								
								feeder/trackers/strongsort/sort/tracker.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,192 @@
 | 
			
		|||
# vim: expandtab:ts=4:sw=4
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
import numpy as np
 | 
			
		||||
from . import kalman_filter
 | 
			
		||||
from . import linear_assignment
 | 
			
		||||
from . import iou_matching
 | 
			
		||||
from . import detection
 | 
			
		||||
from .track import Track
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Tracker:
 | 
			
		||||
    """
 | 
			
		||||
    This is the multi-target tracker.
 | 
			
		||||
    Parameters
 | 
			
		||||
    ----------
 | 
			
		||||
    metric : nn_matching.NearestNeighborDistanceMetric
 | 
			
		||||
        A distance metric for measurement-to-track association.
 | 
			
		||||
    max_age : int
 | 
			
		||||
        Maximum number of missed misses before a track is deleted.
 | 
			
		||||
    n_init : int
 | 
			
		||||
        Number of consecutive detections before the track is confirmed. The
 | 
			
		||||
        track state is set to `Deleted` if a miss occurs within the first
 | 
			
		||||
        `n_init` frames.
 | 
			
		||||
    Attributes
 | 
			
		||||
    ----------
 | 
			
		||||
    metric : nn_matching.NearestNeighborDistanceMetric
 | 
			
		||||
        The distance metric used for measurement to track association.
 | 
			
		||||
    max_age : int
 | 
			
		||||
        Maximum number of missed misses before a track is deleted.
 | 
			
		||||
    n_init : int
 | 
			
		||||
        Number of frames that a track remains in initialization phase.
 | 
			
		||||
    kf : kalman_filter.KalmanFilter
 | 
			
		||||
        A Kalman filter to filter target trajectories in image space.
 | 
			
		||||
    tracks : List[Track]
 | 
			
		||||
        The list of active tracks at the current time step.
 | 
			
		||||
    """
 | 
			
		||||
    GATING_THRESHOLD = np.sqrt(kalman_filter.chi2inv95[4])
 | 
			
		||||
 | 
			
		||||
    def __init__(self, metric, max_iou_dist=0.9, max_age=30, max_unmatched_preds=7, n_init=3, _lambda=0, ema_alpha=0.9, mc_lambda=0.995):
 | 
			
		||||
        self.metric = metric
 | 
			
		||||
        self.max_iou_dist = max_iou_dist
 | 
			
		||||
        self.max_age = max_age
 | 
			
		||||
        self.n_init = n_init
 | 
			
		||||
        self._lambda = _lambda
 | 
			
		||||
        self.ema_alpha = ema_alpha
 | 
			
		||||
        self.mc_lambda = mc_lambda
 | 
			
		||||
        self.max_unmatched_preds = max_unmatched_preds
 | 
			
		||||
        
 | 
			
		||||
        self.kf = kalman_filter.KalmanFilter()
 | 
			
		||||
        self.tracks = []
 | 
			
		||||
        self._next_id = 1
 | 
			
		||||
 | 
			
		||||
    def predict(self):
 | 
			
		||||
        """Propagate track state distributions one time step forward.
 | 
			
		||||
 | 
			
		||||
        This function should be called once every time step, before `update`.
 | 
			
		||||
        """
 | 
			
		||||
        for track in self.tracks:
 | 
			
		||||
            track.predict(self.kf)
 | 
			
		||||
 | 
			
		||||
    def increment_ages(self):
 | 
			
		||||
        for track in self.tracks:
 | 
			
		||||
            track.increment_age()
 | 
			
		||||
            track.mark_missed()
 | 
			
		||||
 | 
			
		||||
    def camera_update(self, previous_img, current_img):
 | 
			
		||||
        for track in self.tracks:
 | 
			
		||||
            track.camera_update(previous_img, current_img)
 | 
			
		||||
            
 | 
			
		||||
    def pred_n_update_all_tracks(self):
 | 
			
		||||
        """Perform predictions and updates for all tracks by its own predicted state.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        self.predict()
 | 
			
		||||
        for t in self.tracks:
 | 
			
		||||
            if self.max_unmatched_preds != 0 and t.updates_wo_assignment < t.max_num_updates_wo_assignment:
 | 
			
		||||
                bbox = t.to_tlwh()
 | 
			
		||||
                t.update_kf(detection.to_xyah_ext(bbox))
 | 
			
		||||
 | 
			
		||||
    def update(self, detections, classes, confidences):
 | 
			
		||||
        """Perform measurement update and track management.
 | 
			
		||||
 | 
			
		||||
        Parameters
 | 
			
		||||
        ----------
 | 
			
		||||
        detections : List[deep_sort.detection.Detection]
 | 
			
		||||
            A list of detections at the current time step.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        # Run matching cascade.
 | 
			
		||||
        matches, unmatched_tracks, unmatched_detections = \
 | 
			
		||||
            self._match(detections)
 | 
			
		||||
 | 
			
		||||
        # Update track set.
 | 
			
		||||
        for track_idx, detection_idx in matches:
 | 
			
		||||
            self.tracks[track_idx].update(
 | 
			
		||||
                detections[detection_idx], classes[detection_idx], confidences[detection_idx])
 | 
			
		||||
        for track_idx in unmatched_tracks:
 | 
			
		||||
            self.tracks[track_idx].mark_missed()
 | 
			
		||||
            if self.max_unmatched_preds != 0 and self.tracks[track_idx].updates_wo_assignment < self.tracks[track_idx].max_num_updates_wo_assignment:
 | 
			
		||||
                bbox = self.tracks[track_idx].to_tlwh()
 | 
			
		||||
                self.tracks[track_idx].update_kf(detection.to_xyah_ext(bbox))
 | 
			
		||||
        for detection_idx in unmatched_detections:
 | 
			
		||||
            self._initiate_track(detections[detection_idx], classes[detection_idx].item(), confidences[detection_idx].item())
 | 
			
		||||
        self.tracks = [t for t in self.tracks if not t.is_deleted()]
 | 
			
		||||
 | 
			
		||||
        # Update distance metric.
 | 
			
		||||
        active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
 | 
			
		||||
        features, targets = [], []
 | 
			
		||||
        for track in self.tracks:
 | 
			
		||||
            if not track.is_confirmed():
 | 
			
		||||
                continue
 | 
			
		||||
            features += track.features
 | 
			
		||||
            targets += [track.track_id for _ in track.features]
 | 
			
		||||
        self.metric.partial_fit(np.asarray(features), np.asarray(targets), active_targets)
 | 
			
		||||
 | 
			
		||||
    def _full_cost_metric(self, tracks, dets, track_indices, detection_indices):
 | 
			
		||||
        """
 | 
			
		||||
        This implements the full lambda-based cost-metric. However, in doing so, it disregards
 | 
			
		||||
        the possibility to gate the position only which is provided by
 | 
			
		||||
        linear_assignment.gate_cost_matrix(). Instead, I gate by everything.
 | 
			
		||||
        Note that the Mahalanobis distance is itself an unnormalised metric. Given the cosine
 | 
			
		||||
        distance being normalised, we employ a quick and dirty normalisation based on the
 | 
			
		||||
        threshold: that is, we divide the positional-cost by the gating threshold, thus ensuring
 | 
			
		||||
        that the valid values range 0-1.
 | 
			
		||||
        Note also that the authors work with the squared distance. I also sqrt this, so that it
 | 
			
		||||
        is more intuitive in terms of values.
 | 
			
		||||
        """
 | 
			
		||||
        # Compute First the Position-based Cost Matrix
 | 
			
		||||
        pos_cost = np.empty([len(track_indices), len(detection_indices)])
 | 
			
		||||
        msrs = np.asarray([dets[i].to_xyah() for i in detection_indices])
 | 
			
		||||
        for row, track_idx in enumerate(track_indices):
 | 
			
		||||
            pos_cost[row, :] = np.sqrt(
 | 
			
		||||
                self.kf.gating_distance(
 | 
			
		||||
                    tracks[track_idx].mean, tracks[track_idx].covariance, msrs, False
 | 
			
		||||
                )
 | 
			
		||||
            ) / self.GATING_THRESHOLD
 | 
			
		||||
        pos_gate = pos_cost > 1.0
 | 
			
		||||
        # Now Compute the Appearance-based Cost Matrix
 | 
			
		||||
        app_cost = self.metric.distance(
 | 
			
		||||
            np.array([dets[i].feature for i in detection_indices]),
 | 
			
		||||
            np.array([tracks[i].track_id for i in track_indices]),
 | 
			
		||||
        )
 | 
			
		||||
        app_gate = app_cost > self.metric.matching_threshold
 | 
			
		||||
        # Now combine and threshold
 | 
			
		||||
        cost_matrix = self._lambda * pos_cost + (1 - self._lambda) * app_cost
 | 
			
		||||
        cost_matrix[np.logical_or(pos_gate, app_gate)] = linear_assignment.INFTY_COST
 | 
			
		||||
        # Return Matrix
 | 
			
		||||
        return cost_matrix
 | 
			
		||||
 | 
			
		||||
    def _match(self, detections):
 | 
			
		||||
 | 
			
		||||
        def gated_metric(tracks, dets, track_indices, detection_indices):
 | 
			
		||||
            features = np.array([dets[i].feature for i in detection_indices])
 | 
			
		||||
            targets = np.array([tracks[i].track_id for i in track_indices])
 | 
			
		||||
            cost_matrix = self.metric.distance(features, targets)
 | 
			
		||||
            cost_matrix = linear_assignment.gate_cost_matrix(cost_matrix, tracks, dets, track_indices, detection_indices, self.mc_lambda)
 | 
			
		||||
 | 
			
		||||
            return cost_matrix
 | 
			
		||||
 | 
			
		||||
        # Split track set into confirmed and unconfirmed tracks.
 | 
			
		||||
        confirmed_tracks = [
 | 
			
		||||
            i for i, t in enumerate(self.tracks) if t.is_confirmed()]
 | 
			
		||||
        unconfirmed_tracks = [
 | 
			
		||||
            i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
 | 
			
		||||
 | 
			
		||||
        # Associate confirmed tracks using appearance features.
 | 
			
		||||
        matches_a, unmatched_tracks_a, unmatched_detections = \
 | 
			
		||||
            linear_assignment.matching_cascade(
 | 
			
		||||
                gated_metric, self.metric.matching_threshold, self.max_age,
 | 
			
		||||
                self.tracks, detections, confirmed_tracks)
 | 
			
		||||
 | 
			
		||||
        # Associate remaining tracks together with unconfirmed tracks using IOU.
 | 
			
		||||
        iou_track_candidates = unconfirmed_tracks + [
 | 
			
		||||
            k for k in unmatched_tracks_a if
 | 
			
		||||
            self.tracks[k].time_since_update == 1]
 | 
			
		||||
        unmatched_tracks_a = [
 | 
			
		||||
            k for k in unmatched_tracks_a if
 | 
			
		||||
            self.tracks[k].time_since_update != 1]
 | 
			
		||||
        matches_b, unmatched_tracks_b, unmatched_detections = \
 | 
			
		||||
            linear_assignment.min_cost_matching(
 | 
			
		||||
                iou_matching.iou_cost, self.max_iou_dist, self.tracks,
 | 
			
		||||
                detections, iou_track_candidates, unmatched_detections)
 | 
			
		||||
 | 
			
		||||
        matches = matches_a + matches_b
 | 
			
		||||
        unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
 | 
			
		||||
        return matches, unmatched_tracks, unmatched_detections
 | 
			
		||||
 | 
			
		||||
    def _initiate_track(self, detection, class_id, conf):
 | 
			
		||||
        self.tracks.append(Track(
 | 
			
		||||
            detection.to_xyah(), self._next_id, class_id, conf, self.n_init, self.max_age, self.ema_alpha,
 | 
			
		||||
            detection.feature))
 | 
			
		||||
        self._next_id += 1
 | 
			
		||||
							
								
								
									
										151
									
								
								feeder/trackers/strongsort/strong_sort.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										151
									
								
								feeder/trackers/strongsort/strong_sort.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,151 @@
 | 
			
		|||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
import sys
 | 
			
		||||
import cv2
 | 
			
		||||
import gdown
 | 
			
		||||
from os.path import exists as file_exists, join
 | 
			
		||||
import torchvision.transforms as transforms
 | 
			
		||||
 | 
			
		||||
from sort.nn_matching import NearestNeighborDistanceMetric
 | 
			
		||||
from sort.detection import Detection
 | 
			
		||||
from sort.tracker import Tracker
 | 
			
		||||
 | 
			
		||||
from reid_multibackend import ReIDDetectMultiBackend
 | 
			
		||||
 | 
			
		||||
from ultralytics.yolo.utils.ops import xyxy2xywh
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class StrongSORT(object):
 | 
			
		||||
    def __init__(self, 
 | 
			
		||||
                 model_weights,
 | 
			
		||||
                 device,
 | 
			
		||||
                 fp16,
 | 
			
		||||
                 max_dist=0.2,
 | 
			
		||||
                 max_iou_dist=0.7,
 | 
			
		||||
                 max_age=70,
 | 
			
		||||
                 max_unmatched_preds=7,
 | 
			
		||||
                 n_init=3,
 | 
			
		||||
                 nn_budget=100,
 | 
			
		||||
                 mc_lambda=0.995,
 | 
			
		||||
                 ema_alpha=0.9
 | 
			
		||||
                ):
 | 
			
		||||
 | 
			
		||||
        self.model = ReIDDetectMultiBackend(weights=model_weights, device=device, fp16=fp16)
 | 
			
		||||
        
 | 
			
		||||
        self.max_dist = max_dist
 | 
			
		||||
        metric = NearestNeighborDistanceMetric(
 | 
			
		||||
            "cosine", self.max_dist, nn_budget)
 | 
			
		||||
        self.tracker = Tracker(
 | 
			
		||||
            metric, max_iou_dist=max_iou_dist, max_age=max_age, n_init=n_init, max_unmatched_preds=max_unmatched_preds, mc_lambda=mc_lambda, ema_alpha=ema_alpha)
 | 
			
		||||
 | 
			
		||||
    def update(self, dets,  ori_img):
 | 
			
		||||
        
 | 
			
		||||
        xyxys = dets[:, 0:4]
 | 
			
		||||
        confs = dets[:, 4]
 | 
			
		||||
        clss = dets[:, 5]
 | 
			
		||||
        
 | 
			
		||||
        classes = clss.numpy()
 | 
			
		||||
        xywhs = xyxy2xywh(xyxys.numpy())
 | 
			
		||||
        confs = confs.numpy()
 | 
			
		||||
        self.height, self.width = ori_img.shape[:2]
 | 
			
		||||
        
 | 
			
		||||
        # generate detections
 | 
			
		||||
        features = self._get_features(xywhs, ori_img)
 | 
			
		||||
        bbox_tlwh = self._xywh_to_tlwh(xywhs)
 | 
			
		||||
        detections = [Detection(bbox_tlwh[i], conf, features[i]) for i, conf in enumerate(
 | 
			
		||||
            confs)]
 | 
			
		||||
 | 
			
		||||
        # run on non-maximum supression
 | 
			
		||||
        boxes = np.array([d.tlwh for d in detections])
 | 
			
		||||
        scores = np.array([d.confidence for d in detections])
 | 
			
		||||
 | 
			
		||||
        # update tracker
 | 
			
		||||
        self.tracker.predict()
 | 
			
		||||
        self.tracker.update(detections, clss, confs)
 | 
			
		||||
 | 
			
		||||
        # output bbox identities
 | 
			
		||||
        outputs = []
 | 
			
		||||
        for track in self.tracker.tracks:
 | 
			
		||||
            if not track.is_confirmed() or track.time_since_update > 1:
 | 
			
		||||
                continue
 | 
			
		||||
 | 
			
		||||
            box = track.to_tlwh()
 | 
			
		||||
            x1, y1, x2, y2 = self._tlwh_to_xyxy(box)
 | 
			
		||||
            
 | 
			
		||||
            track_id = track.track_id
 | 
			
		||||
            class_id = track.class_id
 | 
			
		||||
            conf = track.conf
 | 
			
		||||
            queue = track.q
 | 
			
		||||
            outputs.append(np.array([x1, y1, x2, y2, track_id, class_id, conf, queue], dtype=object))
 | 
			
		||||
        if len(outputs) > 0:
 | 
			
		||||
            outputs = np.stack(outputs, axis=0)
 | 
			
		||||
        return outputs
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    TODO:
 | 
			
		||||
        Convert bbox from xc_yc_w_h to xtl_ytl_w_h
 | 
			
		||||
    Thanks JieChen91@github.com for reporting this bug!
 | 
			
		||||
    """
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def _xywh_to_tlwh(bbox_xywh):
 | 
			
		||||
        if isinstance(bbox_xywh, np.ndarray):
 | 
			
		||||
            bbox_tlwh = bbox_xywh.copy()
 | 
			
		||||
        elif isinstance(bbox_xywh, torch.Tensor):
 | 
			
		||||
            bbox_tlwh = bbox_xywh.clone()
 | 
			
		||||
        bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2.
 | 
			
		||||
        bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2.
 | 
			
		||||
        return bbox_tlwh
 | 
			
		||||
 | 
			
		||||
    def _xywh_to_xyxy(self, bbox_xywh):
 | 
			
		||||
        x, y, w, h = bbox_xywh
 | 
			
		||||
        x1 = max(int(x - w / 2), 0)
 | 
			
		||||
        x2 = min(int(x + w / 2), self.width - 1)
 | 
			
		||||
        y1 = max(int(y - h / 2), 0)
 | 
			
		||||
        y2 = min(int(y + h / 2), self.height - 1)
 | 
			
		||||
        return x1, y1, x2, y2
 | 
			
		||||
 | 
			
		||||
    def _tlwh_to_xyxy(self, bbox_tlwh):
 | 
			
		||||
        """
 | 
			
		||||
        TODO:
 | 
			
		||||
            Convert bbox from xtl_ytl_w_h to xc_yc_w_h
 | 
			
		||||
        Thanks JieChen91@github.com for reporting this bug!
 | 
			
		||||
        """
 | 
			
		||||
        x, y, w, h = bbox_tlwh
 | 
			
		||||
        x1 = max(int(x), 0)
 | 
			
		||||
        x2 = min(int(x+w), self.width - 1)
 | 
			
		||||
        y1 = max(int(y), 0)
 | 
			
		||||
        y2 = min(int(y+h), self.height - 1)
 | 
			
		||||
        return x1, y1, x2, y2
 | 
			
		||||
 | 
			
		||||
    def increment_ages(self):
 | 
			
		||||
        self.tracker.increment_ages()
 | 
			
		||||
 | 
			
		||||
    def _xyxy_to_tlwh(self, bbox_xyxy):
 | 
			
		||||
        x1, y1, x2, y2 = bbox_xyxy
 | 
			
		||||
 | 
			
		||||
        t = x1
 | 
			
		||||
        l = y1
 | 
			
		||||
        w = int(x2 - x1)
 | 
			
		||||
        h = int(y2 - y1)
 | 
			
		||||
        return t, l, w, h
 | 
			
		||||
 | 
			
		||||
    def _get_features(self, bbox_xywh, ori_img):
 | 
			
		||||
        im_crops = []
 | 
			
		||||
        for box in bbox_xywh:
 | 
			
		||||
            x1, y1, x2, y2 = self._xywh_to_xyxy(box)
 | 
			
		||||
            im = ori_img[y1:y2, x1:x2]
 | 
			
		||||
            im_crops.append(im)
 | 
			
		||||
        if im_crops:
 | 
			
		||||
            features = self.model(im_crops)
 | 
			
		||||
        else:
 | 
			
		||||
            features = np.array([])
 | 
			
		||||
        return features
 | 
			
		||||
    
 | 
			
		||||
    def trajectory(self, im0, q, color):
 | 
			
		||||
        # Add rectangle to image (PIL-only)
 | 
			
		||||
        for i, p in enumerate(q):
 | 
			
		||||
            thickness = int(np.sqrt(float (i + 1)) * 1.5)
 | 
			
		||||
            if p[0] == 'observationupdate': 
 | 
			
		||||
                cv2.circle(im0, p[1], 2, color=color, thickness=thickness)
 | 
			
		||||
            else:
 | 
			
		||||
                cv2.circle(im0, p[1], 2, color=(255,255,255), thickness=thickness)
 | 
			
		||||
							
								
								
									
										0
									
								
								feeder/trackers/strongsort/utils/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								feeder/trackers/strongsort/utils/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
								
								
									
										13
									
								
								feeder/trackers/strongsort/utils/asserts.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								feeder/trackers/strongsort/utils/asserts.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,13 @@
 | 
			
		|||
from os import environ
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def assert_in(file, files_to_check):
 | 
			
		||||
    if file not in files_to_check:
 | 
			
		||||
        raise AssertionError("{} does not exist in the list".format(str(file)))
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def assert_in_env(check_list: list):
 | 
			
		||||
    for item in check_list:
 | 
			
		||||
        assert_in(item, environ.keys())
 | 
			
		||||
    return True
 | 
			
		||||
							
								
								
									
										36
									
								
								feeder/trackers/strongsort/utils/draw.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								feeder/trackers/strongsort/utils/draw.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,36 @@
 | 
			
		|||
import numpy as np
 | 
			
		||||
import cv2
 | 
			
		||||
 | 
			
		||||
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def compute_color_for_labels(label):
 | 
			
		||||
    """
 | 
			
		||||
    Simple function that adds fixed color depending on the class
 | 
			
		||||
    """
 | 
			
		||||
    color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
 | 
			
		||||
    return tuple(color)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def draw_boxes(img, bbox, identities=None, offset=(0,0)):
 | 
			
		||||
    for i,box in enumerate(bbox):
 | 
			
		||||
        x1,y1,x2,y2 = [int(i) for i in box]
 | 
			
		||||
        x1 += offset[0]
 | 
			
		||||
        x2 += offset[0]
 | 
			
		||||
        y1 += offset[1]
 | 
			
		||||
        y2 += offset[1]
 | 
			
		||||
        # box text and bar
 | 
			
		||||
        id = int(identities[i]) if identities is not None else 0    
 | 
			
		||||
        color = compute_color_for_labels(id)
 | 
			
		||||
        label = '{}{:d}'.format("", id)
 | 
			
		||||
        t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0]
 | 
			
		||||
        cv2.rectangle(img,(x1, y1),(x2,y2),color,3)
 | 
			
		||||
        cv2.rectangle(img,(x1, y1),(x1+t_size[0]+3,y1+t_size[1]+4), color,-1)
 | 
			
		||||
        cv2.putText(img,label,(x1,y1+t_size[1]+4), cv2.FONT_HERSHEY_PLAIN, 2, [255,255,255], 2)
 | 
			
		||||
    return img
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    for i in range(82):
 | 
			
		||||
        print(compute_color_for_labels(i))
 | 
			
		||||
							
								
								
									
										103
									
								
								feeder/trackers/strongsort/utils/evaluation.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										103
									
								
								feeder/trackers/strongsort/utils/evaluation.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,103 @@
 | 
			
		|||
import os
 | 
			
		||||
import numpy as np
 | 
			
		||||
import copy
 | 
			
		||||
import motmetrics as mm
 | 
			
		||||
mm.lap.default_solver = 'lap'
 | 
			
		||||
from utils.io import read_results, unzip_objs
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Evaluator(object):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, data_root, seq_name, data_type):
 | 
			
		||||
        self.data_root = data_root
 | 
			
		||||
        self.seq_name = seq_name
 | 
			
		||||
        self.data_type = data_type
 | 
			
		||||
 | 
			
		||||
        self.load_annotations()
 | 
			
		||||
        self.reset_accumulator()
 | 
			
		||||
 | 
			
		||||
    def load_annotations(self):
 | 
			
		||||
        assert self.data_type == 'mot'
 | 
			
		||||
 | 
			
		||||
        gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
 | 
			
		||||
        self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
 | 
			
		||||
        self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
 | 
			
		||||
 | 
			
		||||
    def reset_accumulator(self):
 | 
			
		||||
        self.acc = mm.MOTAccumulator(auto_id=True)
 | 
			
		||||
 | 
			
		||||
    def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
 | 
			
		||||
        # results
 | 
			
		||||
        trk_tlwhs = np.copy(trk_tlwhs)
 | 
			
		||||
        trk_ids = np.copy(trk_ids)
 | 
			
		||||
 | 
			
		||||
        # gts
 | 
			
		||||
        gt_objs = self.gt_frame_dict.get(frame_id, [])
 | 
			
		||||
        gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
 | 
			
		||||
 | 
			
		||||
        # ignore boxes
 | 
			
		||||
        ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
 | 
			
		||||
        ignore_tlwhs = unzip_objs(ignore_objs)[0]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        # remove ignored results
 | 
			
		||||
        keep = np.ones(len(trk_tlwhs), dtype=bool)
 | 
			
		||||
        iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
 | 
			
		||||
        if len(iou_distance) > 0:
 | 
			
		||||
            match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
 | 
			
		||||
            match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
 | 
			
		||||
            match_ious = iou_distance[match_is, match_js]
 | 
			
		||||
 | 
			
		||||
            match_js = np.asarray(match_js, dtype=int)
 | 
			
		||||
            match_js = match_js[np.logical_not(np.isnan(match_ious))]
 | 
			
		||||
            keep[match_js] = False
 | 
			
		||||
            trk_tlwhs = trk_tlwhs[keep]
 | 
			
		||||
            trk_ids = trk_ids[keep]
 | 
			
		||||
 | 
			
		||||
        # get distance matrix
 | 
			
		||||
        iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
 | 
			
		||||
 | 
			
		||||
        # acc
 | 
			
		||||
        self.acc.update(gt_ids, trk_ids, iou_distance)
 | 
			
		||||
 | 
			
		||||
        if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):
 | 
			
		||||
            events = self.acc.last_mot_events  # only supported by https://github.com/longcw/py-motmetrics
 | 
			
		||||
        else:
 | 
			
		||||
            events = None
 | 
			
		||||
        return events
 | 
			
		||||
 | 
			
		||||
    def eval_file(self, filename):
 | 
			
		||||
        self.reset_accumulator()
 | 
			
		||||
 | 
			
		||||
        result_frame_dict = read_results(filename, self.data_type, is_gt=False)
 | 
			
		||||
        frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))
 | 
			
		||||
        for frame_id in frames:
 | 
			
		||||
            trk_objs = result_frame_dict.get(frame_id, [])
 | 
			
		||||
            trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
 | 
			
		||||
            self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
 | 
			
		||||
 | 
			
		||||
        return self.acc
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
 | 
			
		||||
        names = copy.deepcopy(names)
 | 
			
		||||
        if metrics is None:
 | 
			
		||||
            metrics = mm.metrics.motchallenge_metrics
 | 
			
		||||
        metrics = copy.deepcopy(metrics)
 | 
			
		||||
 | 
			
		||||
        mh = mm.metrics.create()
 | 
			
		||||
        summary = mh.compute_many(
 | 
			
		||||
            accs,
 | 
			
		||||
            metrics=metrics,
 | 
			
		||||
            names=names,
 | 
			
		||||
            generate_overall=True
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        return summary
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def save_summary(summary, filename):
 | 
			
		||||
        import pandas as pd
 | 
			
		||||
        writer = pd.ExcelWriter(filename)
 | 
			
		||||
        summary.to_excel(writer)
 | 
			
		||||
        writer.save()
 | 
			
		||||
							
								
								
									
										133
									
								
								feeder/trackers/strongsort/utils/io.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										133
									
								
								feeder/trackers/strongsort/utils/io.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,133 @@
 | 
			
		|||
import os
 | 
			
		||||
from typing import Dict
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
# from utils.log import get_logger
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def write_results(filename, results, data_type):
 | 
			
		||||
    if data_type == 'mot':
 | 
			
		||||
        save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n'
 | 
			
		||||
    elif data_type == 'kitti':
 | 
			
		||||
        save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
 | 
			
		||||
    else:
 | 
			
		||||
        raise ValueError(data_type)
 | 
			
		||||
 | 
			
		||||
    with open(filename, 'w') as f:
 | 
			
		||||
        for frame_id, tlwhs, track_ids in results:
 | 
			
		||||
            if data_type == 'kitti':
 | 
			
		||||
                frame_id -= 1
 | 
			
		||||
            for tlwh, track_id in zip(tlwhs, track_ids):
 | 
			
		||||
                if track_id < 0:
 | 
			
		||||
                    continue
 | 
			
		||||
                x1, y1, w, h = tlwh
 | 
			
		||||
                x2, y2 = x1 + w, y1 + h
 | 
			
		||||
                line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
 | 
			
		||||
                f.write(line)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# def write_results(filename, results_dict: Dict, data_type: str):
 | 
			
		||||
#     if not filename:
 | 
			
		||||
#         return
 | 
			
		||||
#     path = os.path.dirname(filename)
 | 
			
		||||
#     if not os.path.exists(path):
 | 
			
		||||
#         os.makedirs(path)
 | 
			
		||||
 | 
			
		||||
#     if data_type in ('mot', 'mcmot', 'lab'):
 | 
			
		||||
#         save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
 | 
			
		||||
#     elif data_type == 'kitti':
 | 
			
		||||
#         save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
 | 
			
		||||
#     else:
 | 
			
		||||
#         raise ValueError(data_type)
 | 
			
		||||
 | 
			
		||||
#     with open(filename, 'w') as f:
 | 
			
		||||
#         for frame_id, frame_data in results_dict.items():
 | 
			
		||||
#             if data_type == 'kitti':
 | 
			
		||||
#                 frame_id -= 1
 | 
			
		||||
#             for tlwh, track_id in frame_data:
 | 
			
		||||
#                 if track_id < 0:
 | 
			
		||||
#                     continue
 | 
			
		||||
#                 x1, y1, w, h = tlwh
 | 
			
		||||
#                 x2, y2 = x1 + w, y1 + h
 | 
			
		||||
#                 line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)
 | 
			
		||||
#                 f.write(line)
 | 
			
		||||
#     logger.info('Save results to {}'.format(filename))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
 | 
			
		||||
    if data_type in ('mot', 'lab'):
 | 
			
		||||
        read_fun = read_mot_results
 | 
			
		||||
    else:
 | 
			
		||||
        raise ValueError('Unknown data type: {}'.format(data_type))
 | 
			
		||||
 | 
			
		||||
    return read_fun(filename, is_gt, is_ignore)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
labels={'ped', ...			% 1
 | 
			
		||||
'person_on_vhcl', ...	% 2
 | 
			
		||||
'car', ...				% 3
 | 
			
		||||
'bicycle', ...			% 4
 | 
			
		||||
'mbike', ...			% 5
 | 
			
		||||
'non_mot_vhcl', ...		% 6
 | 
			
		||||
'static_person', ...	% 7
 | 
			
		||||
'distractor', ...		% 8
 | 
			
		||||
'occluder', ...			% 9
 | 
			
		||||
'occluder_on_grnd', ...		%10
 | 
			
		||||
'occluder_full', ...		% 11
 | 
			
		||||
'reflection', ...		% 12
 | 
			
		||||
'crowd' ...			% 13
 | 
			
		||||
};
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_mot_results(filename, is_gt, is_ignore):
 | 
			
		||||
    valid_labels = {1}
 | 
			
		||||
    ignore_labels = {2, 7, 8, 12}
 | 
			
		||||
    results_dict = dict()
 | 
			
		||||
    if os.path.isfile(filename):
 | 
			
		||||
        with open(filename, 'r') as f:
 | 
			
		||||
            for line in f.readlines():
 | 
			
		||||
                linelist = line.split(',')
 | 
			
		||||
                if len(linelist) < 7:
 | 
			
		||||
                    continue
 | 
			
		||||
                fid = int(linelist[0])
 | 
			
		||||
                if fid < 1:
 | 
			
		||||
                    continue
 | 
			
		||||
                results_dict.setdefault(fid, list())
 | 
			
		||||
 | 
			
		||||
                if is_gt:
 | 
			
		||||
                    if 'MOT16-' in filename or 'MOT17-' in filename:
 | 
			
		||||
                        label = int(float(linelist[7]))
 | 
			
		||||
                        mark = int(float(linelist[6]))
 | 
			
		||||
                        if mark == 0 or label not in valid_labels:
 | 
			
		||||
                            continue
 | 
			
		||||
                    score = 1
 | 
			
		||||
                elif is_ignore:
 | 
			
		||||
                    if 'MOT16-' in filename or 'MOT17-' in filename:
 | 
			
		||||
                        label = int(float(linelist[7]))
 | 
			
		||||
                        vis_ratio = float(linelist[8])
 | 
			
		||||
                        if label not in ignore_labels and vis_ratio >= 0:
 | 
			
		||||
                            continue
 | 
			
		||||
                    else:
 | 
			
		||||
                        continue
 | 
			
		||||
                    score = 1
 | 
			
		||||
                else:
 | 
			
		||||
                    score = float(linelist[6])
 | 
			
		||||
 | 
			
		||||
                tlwh = tuple(map(float, linelist[2:6]))
 | 
			
		||||
                target_id = int(linelist[1])
 | 
			
		||||
 | 
			
		||||
                results_dict[fid].append((tlwh, target_id, score))
 | 
			
		||||
 | 
			
		||||
    return results_dict
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def unzip_objs(objs):
 | 
			
		||||
    if len(objs) > 0:
 | 
			
		||||
        tlwhs, ids, scores = zip(*objs)
 | 
			
		||||
    else:
 | 
			
		||||
        tlwhs, ids, scores = [], [], []
 | 
			
		||||
    tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
 | 
			
		||||
 | 
			
		||||
    return tlwhs, ids, scores
 | 
			
		||||
							
								
								
									
										383
									
								
								feeder/trackers/strongsort/utils/json_logger.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										383
									
								
								feeder/trackers/strongsort/utils/json_logger.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,383 @@
 | 
			
		|||
"""
 | 
			
		||||
References:
 | 
			
		||||
    https://medium.com/analytics-vidhya/creating-a-custom-logging-mechanism-for-real-time-object-detection-using-tdd-4ca2cfcd0a2f
 | 
			
		||||
"""
 | 
			
		||||
import json
 | 
			
		||||
from os import makedirs
 | 
			
		||||
from os.path import exists, join
 | 
			
		||||
from datetime import datetime
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class JsonMeta(object):
 | 
			
		||||
    HOURS = 3
 | 
			
		||||
    MINUTES = 59
 | 
			
		||||
    SECONDS = 59
 | 
			
		||||
    PATH_TO_SAVE = 'LOGS'
 | 
			
		||||
    DEFAULT_FILE_NAME = 'remaining'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BaseJsonLogger(object):
 | 
			
		||||
    """
 | 
			
		||||
    This is the base class that returns __dict__ of its own
 | 
			
		||||
    it also returns the dicts of objects in the attributes that are list instances
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def dic(self):
 | 
			
		||||
        # returns dicts of objects
 | 
			
		||||
        out = {}
 | 
			
		||||
        for k, v in self.__dict__.items():
 | 
			
		||||
            if hasattr(v, 'dic'):
 | 
			
		||||
                out[k] = v.dic()
 | 
			
		||||
            elif isinstance(v, list):
 | 
			
		||||
                out[k] = self.list(v)
 | 
			
		||||
            else:
 | 
			
		||||
                out[k] = v
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def list(values):
 | 
			
		||||
        # applies the dic method on items in the list
 | 
			
		||||
        return [v.dic() if hasattr(v, 'dic') else v for v in values]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Label(BaseJsonLogger):
 | 
			
		||||
    """
 | 
			
		||||
    For each bounding box there are various categories with confidences. Label class keeps track of that information.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, category: str, confidence: float):
 | 
			
		||||
        self.category = category
 | 
			
		||||
        self.confidence = confidence
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Bbox(BaseJsonLogger):
 | 
			
		||||
    """
 | 
			
		||||
    This module stores the information for each frame and use them in JsonParser
 | 
			
		||||
    Attributes:
 | 
			
		||||
        labels (list): List of label module.
 | 
			
		||||
        top (int):
 | 
			
		||||
        left (int):
 | 
			
		||||
        width (int):
 | 
			
		||||
        height (int):
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        bbox_id (float):
 | 
			
		||||
        top (int):
 | 
			
		||||
        left (int):
 | 
			
		||||
        width (int):
 | 
			
		||||
        height (int):
 | 
			
		||||
 | 
			
		||||
    References:
 | 
			
		||||
        Check Label module for better understanding.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, bbox_id, top, left, width, height):
 | 
			
		||||
        self.labels = []
 | 
			
		||||
        self.bbox_id = bbox_id
 | 
			
		||||
        self.top = top
 | 
			
		||||
        self.left = left
 | 
			
		||||
        self.width = width
 | 
			
		||||
        self.height = height
 | 
			
		||||
 | 
			
		||||
    def add_label(self, category, confidence):
 | 
			
		||||
        # adds category and confidence only if top_k is not exceeded.
 | 
			
		||||
        self.labels.append(Label(category, confidence))
 | 
			
		||||
 | 
			
		||||
    def labels_full(self, value):
 | 
			
		||||
        return len(self.labels) == value
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Frame(BaseJsonLogger):
 | 
			
		||||
    """
 | 
			
		||||
    This module stores the information for each frame and use them in JsonParser
 | 
			
		||||
    Attributes:
 | 
			
		||||
        timestamp (float): The elapsed time of captured frame
 | 
			
		||||
        frame_id (int): The frame number of the captured video
 | 
			
		||||
        bboxes (list of Bbox objects): Stores the list of bbox objects.
 | 
			
		||||
 | 
			
		||||
    References:
 | 
			
		||||
        Check Bbox class for better information
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        timestamp (float):
 | 
			
		||||
        frame_id (int):
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, frame_id: int, timestamp: float = None):
 | 
			
		||||
        self.frame_id = frame_id
 | 
			
		||||
        self.timestamp = timestamp
 | 
			
		||||
        self.bboxes = []
 | 
			
		||||
 | 
			
		||||
    def add_bbox(self, bbox_id: int, top: int, left: int, width: int, height: int):
 | 
			
		||||
        bboxes_ids = [bbox.bbox_id for bbox in self.bboxes]
 | 
			
		||||
        if bbox_id not in bboxes_ids:
 | 
			
		||||
            self.bboxes.append(Bbox(bbox_id, top, left, width, height))
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError("Frame with id: {} already has a Bbox with id: {}".format(self.frame_id, bbox_id))
 | 
			
		||||
 | 
			
		||||
    def add_label_to_bbox(self, bbox_id: int, category: str, confidence: float):
 | 
			
		||||
        bboxes = {bbox.id: bbox for bbox in self.bboxes}
 | 
			
		||||
        if bbox_id in bboxes.keys():
 | 
			
		||||
            res = bboxes.get(bbox_id)
 | 
			
		||||
            res.add_label(category, confidence)
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError('the bbox with id: {} does not exists!'.format(bbox_id))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BboxToJsonLogger(BaseJsonLogger):
 | 
			
		||||
    """
 | 
			
		||||
    ُ This module is designed to automate the task of logging jsons. An example json is used
 | 
			
		||||
    to show the contents of json file shortly
 | 
			
		||||
    Example:
 | 
			
		||||
          {
 | 
			
		||||
          "video_details": {
 | 
			
		||||
            "frame_width": 1920,
 | 
			
		||||
            "frame_height": 1080,
 | 
			
		||||
            "frame_rate": 20,
 | 
			
		||||
            "video_name": "/home/gpu/codes/MSD/pedestrian_2/project/public/camera1.avi"
 | 
			
		||||
          },
 | 
			
		||||
          "frames": [
 | 
			
		||||
            {
 | 
			
		||||
              "frame_id": 329,
 | 
			
		||||
              "timestamp": 3365.1254
 | 
			
		||||
              "bboxes": [
 | 
			
		||||
                {
 | 
			
		||||
                  "labels": [
 | 
			
		||||
                    {
 | 
			
		||||
                      "category": "pedestrian",
 | 
			
		||||
                      "confidence": 0.9
 | 
			
		||||
                    }
 | 
			
		||||
                  ],
 | 
			
		||||
                  "bbox_id": 0,
 | 
			
		||||
                  "top": 1257,
 | 
			
		||||
                  "left": 138,
 | 
			
		||||
                  "width": 68,
 | 
			
		||||
                  "height": 109
 | 
			
		||||
                }
 | 
			
		||||
              ]
 | 
			
		||||
            }],
 | 
			
		||||
 | 
			
		||||
    Attributes:
 | 
			
		||||
        frames (dict): It's a dictionary that maps each frame_id to json attributes.
 | 
			
		||||
        video_details (dict): information about video file.
 | 
			
		||||
        top_k_labels (int): shows the allowed number of labels
 | 
			
		||||
        start_time (datetime object): we use it to automate the json output by time.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        top_k_labels (int): shows the allowed number of labels
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, top_k_labels: int = 1):
 | 
			
		||||
        self.frames = {}
 | 
			
		||||
        self.video_details = self.video_details = dict(frame_width=None, frame_height=None, frame_rate=None,
 | 
			
		||||
                                                       video_name=None)
 | 
			
		||||
        self.top_k_labels = top_k_labels
 | 
			
		||||
        self.start_time = datetime.now()
 | 
			
		||||
 | 
			
		||||
    def set_top_k(self, value):
 | 
			
		||||
        self.top_k_labels = value
 | 
			
		||||
 | 
			
		||||
    def frame_exists(self, frame_id: int) -> bool:
 | 
			
		||||
        """
 | 
			
		||||
        Args:
 | 
			
		||||
            frame_id (int):
 | 
			
		||||
 | 
			
		||||
        Returns:
 | 
			
		||||
            bool: true if frame_id is recognized
 | 
			
		||||
        """
 | 
			
		||||
        return frame_id in self.frames.keys()
 | 
			
		||||
 | 
			
		||||
    def add_frame(self, frame_id: int, timestamp: float = None) -> None:
 | 
			
		||||
        """
 | 
			
		||||
        Args:
 | 
			
		||||
            frame_id (int):
 | 
			
		||||
            timestamp (float): opencv captured frame time property
 | 
			
		||||
 | 
			
		||||
        Raises:
 | 
			
		||||
             ValueError: if frame_id would not exist in class frames attribute
 | 
			
		||||
 | 
			
		||||
        Returns:
 | 
			
		||||
            None
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        if not self.frame_exists(frame_id):
 | 
			
		||||
            self.frames[frame_id] = Frame(frame_id, timestamp)
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError("Frame id: {} already exists".format(frame_id))
 | 
			
		||||
 | 
			
		||||
    def bbox_exists(self, frame_id: int, bbox_id: int) -> bool:
 | 
			
		||||
        """
 | 
			
		||||
        Args:
 | 
			
		||||
            frame_id:
 | 
			
		||||
            bbox_id:
 | 
			
		||||
 | 
			
		||||
        Returns:
 | 
			
		||||
            bool: if bbox exists in frame bboxes list
 | 
			
		||||
        """
 | 
			
		||||
        bboxes = []
 | 
			
		||||
        if self.frame_exists(frame_id=frame_id):
 | 
			
		||||
            bboxes = [bbox.bbox_id for bbox in self.frames[frame_id].bboxes]
 | 
			
		||||
        return bbox_id in bboxes
 | 
			
		||||
 | 
			
		||||
    def find_bbox(self, frame_id: int, bbox_id: int):
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            frame_id:
 | 
			
		||||
            bbox_id:
 | 
			
		||||
 | 
			
		||||
        Returns:
 | 
			
		||||
            bbox_id (int):
 | 
			
		||||
 | 
			
		||||
        Raises:
 | 
			
		||||
            ValueError: if bbox_id does not exist in the bbox list of specific frame.
 | 
			
		||||
        """
 | 
			
		||||
        if not self.bbox_exists(frame_id, bbox_id):
 | 
			
		||||
            raise ValueError("frame with id: {} does not contain bbox with id: {}".format(frame_id, bbox_id))
 | 
			
		||||
        bboxes = {bbox.bbox_id: bbox for bbox in self.frames[frame_id].bboxes}
 | 
			
		||||
        return bboxes.get(bbox_id)
 | 
			
		||||
 | 
			
		||||
    def add_bbox_to_frame(self, frame_id: int, bbox_id: int, top: int, left: int, width: int, height: int) -> None:
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            frame_id (int):
 | 
			
		||||
            bbox_id (int):
 | 
			
		||||
            top (int):
 | 
			
		||||
            left (int):
 | 
			
		||||
            width (int):
 | 
			
		||||
            height (int):
 | 
			
		||||
 | 
			
		||||
        Returns:
 | 
			
		||||
            None
 | 
			
		||||
 | 
			
		||||
        Raises:
 | 
			
		||||
            ValueError: if bbox_id already exist in frame information with frame_id
 | 
			
		||||
            ValueError: if frame_id does not exist in frames attribute
 | 
			
		||||
        """
 | 
			
		||||
        if self.frame_exists(frame_id):
 | 
			
		||||
            frame = self.frames[frame_id]
 | 
			
		||||
            if not self.bbox_exists(frame_id, bbox_id):
 | 
			
		||||
                frame.add_bbox(bbox_id, top, left, width, height)
 | 
			
		||||
            else:
 | 
			
		||||
                raise ValueError(
 | 
			
		||||
                    "frame with frame_id: {} already contains the bbox with id: {} ".format(frame_id, bbox_id))
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError("frame with frame_id: {} does not exist".format(frame_id))
 | 
			
		||||
 | 
			
		||||
    def add_label_to_bbox(self, frame_id: int, bbox_id: int, category: str, confidence: float):
 | 
			
		||||
        """
 | 
			
		||||
        Args:
 | 
			
		||||
            frame_id:
 | 
			
		||||
            bbox_id:
 | 
			
		||||
            category:
 | 
			
		||||
            confidence: the confidence value returned from yolo detection
 | 
			
		||||
 | 
			
		||||
        Returns:
 | 
			
		||||
            None
 | 
			
		||||
 | 
			
		||||
        Raises:
 | 
			
		||||
            ValueError: if labels quota (top_k_labels) exceeds.
 | 
			
		||||
        """
 | 
			
		||||
        bbox = self.find_bbox(frame_id, bbox_id)
 | 
			
		||||
        if not bbox.labels_full(self.top_k_labels):
 | 
			
		||||
            bbox.add_label(category, confidence)
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError("labels in frame_id: {}, bbox_id: {} is fulled".format(frame_id, bbox_id))
 | 
			
		||||
 | 
			
		||||
    def add_video_details(self, frame_width: int = None, frame_height: int = None, frame_rate: int = None,
 | 
			
		||||
                          video_name: str = None):
 | 
			
		||||
        self.video_details['frame_width'] = frame_width
 | 
			
		||||
        self.video_details['frame_height'] = frame_height
 | 
			
		||||
        self.video_details['frame_rate'] = frame_rate
 | 
			
		||||
        self.video_details['video_name'] = video_name
 | 
			
		||||
 | 
			
		||||
    def output(self):
 | 
			
		||||
        output = {'video_details': self.video_details}
 | 
			
		||||
        result = list(self.frames.values())
 | 
			
		||||
        output['frames'] = [item.dic() for item in result]
 | 
			
		||||
        return output
 | 
			
		||||
 | 
			
		||||
    def json_output(self, output_name):
 | 
			
		||||
        """
 | 
			
		||||
        Args:
 | 
			
		||||
            output_name:
 | 
			
		||||
 | 
			
		||||
        Returns:
 | 
			
		||||
            None
 | 
			
		||||
 | 
			
		||||
        Notes:
 | 
			
		||||
            It creates the json output with `output_name` name.
 | 
			
		||||
        """
 | 
			
		||||
        if not output_name.endswith('.json'):
 | 
			
		||||
            output_name += '.json'
 | 
			
		||||
        with open(output_name, 'w') as file:
 | 
			
		||||
            json.dump(self.output(), file)
 | 
			
		||||
        file.close()
 | 
			
		||||
 | 
			
		||||
    def set_start(self):
 | 
			
		||||
        self.start_time = datetime.now()
 | 
			
		||||
 | 
			
		||||
    def schedule_output_by_time(self, output_dir=JsonMeta.PATH_TO_SAVE, hours: int = 0, minutes: int = 0,
 | 
			
		||||
                                seconds: int = 60) -> None:
 | 
			
		||||
        """
 | 
			
		||||
        Notes:
 | 
			
		||||
            Creates folder and then periodically stores the jsons on that address.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            output_dir (str): the directory where output files will be stored
 | 
			
		||||
            hours (int):
 | 
			
		||||
            minutes (int):
 | 
			
		||||
            seconds (int):
 | 
			
		||||
 | 
			
		||||
        Returns:
 | 
			
		||||
            None
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        end = datetime.now()
 | 
			
		||||
        interval = 0
 | 
			
		||||
        interval += abs(min([hours, JsonMeta.HOURS]) * 3600)
 | 
			
		||||
        interval += abs(min([minutes, JsonMeta.MINUTES]) * 60)
 | 
			
		||||
        interval += abs(min([seconds, JsonMeta.SECONDS]))
 | 
			
		||||
        diff = (end - self.start_time).seconds
 | 
			
		||||
 | 
			
		||||
        if diff > interval:
 | 
			
		||||
            output_name = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '.json'
 | 
			
		||||
            if not exists(output_dir):
 | 
			
		||||
                makedirs(output_dir)
 | 
			
		||||
            output = join(output_dir, output_name)
 | 
			
		||||
            self.json_output(output_name=output)
 | 
			
		||||
            self.frames = {}
 | 
			
		||||
            self.start_time = datetime.now()
 | 
			
		||||
 | 
			
		||||
    def schedule_output_by_frames(self, frames_quota, frame_counter, output_dir=JsonMeta.PATH_TO_SAVE):
 | 
			
		||||
        """
 | 
			
		||||
        saves as the number of frames quota increases higher.
 | 
			
		||||
        :param frames_quota:
 | 
			
		||||
        :param frame_counter:
 | 
			
		||||
        :param output_dir:
 | 
			
		||||
        :return:
 | 
			
		||||
        """
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    def flush(self, output_dir):
 | 
			
		||||
        """
 | 
			
		||||
        Notes:
 | 
			
		||||
            We use this function to output jsons whenever possible.
 | 
			
		||||
            like the time that we exit the while loop of opencv.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            output_dir:
 | 
			
		||||
 | 
			
		||||
        Returns:
 | 
			
		||||
            None
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        filename = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '-remaining.json'
 | 
			
		||||
        output = join(output_dir, filename)
 | 
			
		||||
        self.json_output(output_name=output)
 | 
			
		||||
							
								
								
									
										17
									
								
								feeder/trackers/strongsort/utils/log.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								feeder/trackers/strongsort/utils/log.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,17 @@
 | 
			
		|||
import logging
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_logger(name='root'):
 | 
			
		||||
    formatter = logging.Formatter(
 | 
			
		||||
        # fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
 | 
			
		||||
        fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
 | 
			
		||||
 | 
			
		||||
    handler = logging.StreamHandler()
 | 
			
		||||
    handler.setFormatter(formatter)
 | 
			
		||||
 | 
			
		||||
    logger = logging.getLogger(name)
 | 
			
		||||
    logger.setLevel(logging.INFO)
 | 
			
		||||
    logger.addHandler(handler)
 | 
			
		||||
    return logger
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										41
									
								
								feeder/trackers/strongsort/utils/parser.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										41
									
								
								feeder/trackers/strongsort/utils/parser.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,41 @@
 | 
			
		|||
import os
 | 
			
		||||
import yaml
 | 
			
		||||
from easydict import EasyDict as edict
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class YamlParser(edict):
 | 
			
		||||
    """
 | 
			
		||||
    This is yaml parser based on EasyDict.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, cfg_dict=None, config_file=None):
 | 
			
		||||
        if cfg_dict is None:
 | 
			
		||||
            cfg_dict = {}
 | 
			
		||||
 | 
			
		||||
        if config_file is not None:
 | 
			
		||||
            assert(os.path.isfile(config_file))
 | 
			
		||||
            with open(config_file, 'r') as fo:
 | 
			
		||||
                yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader)
 | 
			
		||||
                cfg_dict.update(yaml_)
 | 
			
		||||
 | 
			
		||||
        super(YamlParser, self).__init__(cfg_dict)
 | 
			
		||||
 | 
			
		||||
    def merge_from_file(self, config_file):
 | 
			
		||||
        with open(config_file, 'r') as fo:
 | 
			
		||||
            yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader)
 | 
			
		||||
            self.update(yaml_)
 | 
			
		||||
 | 
			
		||||
    def merge_from_dict(self, config_dict):
 | 
			
		||||
        self.update(config_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_config(config_file=None):
 | 
			
		||||
    return YamlParser(config_file=config_file)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    cfg = YamlParser(config_file="../configs/yolov3.yaml")
 | 
			
		||||
    cfg.merge_from_file("../configs/strong_sort.yaml")
 | 
			
		||||
 | 
			
		||||
    import ipdb
 | 
			
		||||
    ipdb.set_trace()
 | 
			
		||||
							
								
								
									
										39
									
								
								feeder/trackers/strongsort/utils/tools.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								feeder/trackers/strongsort/utils/tools.py
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,39 @@
 | 
			
		|||
from functools import wraps
 | 
			
		||||
from time import time
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_video(ext: str):
 | 
			
		||||
    """
 | 
			
		||||
    Returns true if ext exists in
 | 
			
		||||
    allowed_exts for video files.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        ext:
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    allowed_exts = ('.mp4', '.webm', '.ogg', '.avi', '.wmv', '.mkv', '.3gp')
 | 
			
		||||
    return any((ext.endswith(x) for x in allowed_exts))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def tik_tok(func):
 | 
			
		||||
    """
 | 
			
		||||
    keep track of time for each process.
 | 
			
		||||
    Args:
 | 
			
		||||
        func:
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    @wraps(func)
 | 
			
		||||
    def _time_it(*args, **kwargs):
 | 
			
		||||
        start = time()
 | 
			
		||||
        try:
 | 
			
		||||
            return func(*args, **kwargs)
 | 
			
		||||
        finally:
 | 
			
		||||
            end_ = time()
 | 
			
		||||
            print("time: {:.03f}s, fps: {:.03f}".format(end_ - start, 1 / (end_ - start)))
 | 
			
		||||
 | 
			
		||||
    return _time_it
 | 
			
		||||
							
								
								
									
										
											BIN
										
									
								
								feeder/video/sample.mp4
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								feeder/video/sample.mp4
									
										
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										612
									
								
								feeder/video/sample.mp4-strongsort.log
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										612
									
								
								feeder/video/sample.mp4-strongsort.log
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,612 @@
 | 
			
		|||
{"bbox": [1208, 574, 1312, 640], "id": 1, "cls": 2, "conf": 0.7392573952674866, "frame_idx": 2, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1206, 573, 1311, 639], "id": 1, "cls": 2, "conf": 0.7638279795646667, "frame_idx": 3, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1205, 573, 1310, 640], "id": 1, "cls": 2, "conf": 0.745888352394104, "frame_idx": 4, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1205, 572, 1310, 640], "id": 1, "cls": 2, "conf": 0.7273551821708679, "frame_idx": 5, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1204, 572, 1310, 641], "id": 1, "cls": 2, "conf": 0.7593294382095337, "frame_idx": 6, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1203, 571, 1309, 641], "id": 1, "cls": 2, "conf": 0.7566904425621033, "frame_idx": 7, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1202, 570, 1309, 642], "id": 1, "cls": 2, "conf": 0.7727674245834351, "frame_idx": 8, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1202, 570, 1308, 642], "id": 1, "cls": 2, "conf": 0.7940199375152588, "frame_idx": 9, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1200, 570, 1308, 642], "id": 1, "cls": 2, "conf": 0.7740529179573059, "frame_idx": 10, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1200, 570, 1308, 642], "id": 1, "cls": 2, "conf": 0.7652700543403625, "frame_idx": 11, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1201, 571, 1307, 642], "id": 1, "cls": 2, "conf": 0.8012721538543701, "frame_idx": 12, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1200, 570, 1309, 642], "id": 1, "cls": 2, "conf": 0.7976530194282532, "frame_idx": 13, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1199, 569, 1311, 643], "id": 1, "cls": 2, "conf": 0.812846302986145, "frame_idx": 14, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1198, 570, 1310, 643], "id": 1, "cls": 2, "conf": 0.8232163190841675, "frame_idx": 15, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1194, 569, 1309, 644], "id": 1, "cls": 2, "conf": 0.8198840022087097, "frame_idx": 16, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1195, 569, 1306, 643], "id": 1, "cls": 2, "conf": 0.7693840861320496, "frame_idx": 17, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1193, 569, 1305, 645], "id": 1, "cls": 2, "conf": 0.7881284356117249, "frame_idx": 18, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1192, 570, 1305, 645], "id": 1, "cls": 2, "conf": 0.8157638311386108, "frame_idx": 19, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1192, 570, 1305, 644], "id": 1, "cls": 2, "conf": 0.8246914744377136, "frame_idx": 20, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1190, 569, 1305, 645], "id": 1, "cls": 2, "conf": 0.828994631767273, "frame_idx": 21, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1190, 569, 1304, 644], "id": 1, "cls": 2, "conf": 0.8013927936553955, "frame_idx": 22, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1190, 568, 1303, 644], "id": 1, "cls": 2, "conf": 0.8276790380477905, "frame_idx": 23, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1188, 568, 1304, 645], "id": 1, "cls": 2, "conf": 0.8594380021095276, "frame_idx": 24, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1186, 568, 1304, 645], "id": 1, "cls": 2, "conf": 0.8706213235855103, "frame_idx": 25, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1187, 568, 1303, 644], "id": 1, "cls": 2, "conf": 0.8731331825256348, "frame_idx": 26, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1182, 568, 1303, 645], "id": 1, "cls": 2, "conf": 0.87749844789505, "frame_idx": 27, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1182, 569, 1302, 645], "id": 1, "cls": 2, "conf": 0.8746338486671448, "frame_idx": 28, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1181, 568, 1303, 646], "id": 1, "cls": 2, "conf": 0.8688514828681946, "frame_idx": 29, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1180, 569, 1301, 646], "id": 1, "cls": 2, "conf": 0.8689095973968506, "frame_idx": 30, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1179, 568, 1302, 647], "id": 1, "cls": 2, "conf": 0.8720865249633789, "frame_idx": 31, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1178, 568, 1301, 647], "id": 1, "cls": 2, "conf": 0.8609508275985718, "frame_idx": 32, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1177, 568, 1300, 647], "id": 1, "cls": 2, "conf": 0.8541733026504517, "frame_idx": 33, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1178, 569, 1299, 648], "id": 1, "cls": 2, "conf": 0.8305150270462036, "frame_idx": 34, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1177, 569, 1297, 647], "id": 1, "cls": 2, "conf": 0.8163544535636902, "frame_idx": 35, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1175, 568, 1298, 648], "id": 1, "cls": 2, "conf": 0.8103095293045044, "frame_idx": 36, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1174, 568, 1297, 648], "id": 1, "cls": 2, "conf": 0.8175411820411682, "frame_idx": 37, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1171, 569, 1297, 648], "id": 1, "cls": 2, "conf": 0.8210935592651367, "frame_idx": 38, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1171, 568, 1295, 648], "id": 1, "cls": 2, "conf": 0.8320956826210022, "frame_idx": 39, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1167, 568, 1294, 649], "id": 1, "cls": 2, "conf": 0.7790266275405884, "frame_idx": 40, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1166, 568, 1293, 648], "id": 1, "cls": 2, "conf": 0.7791686058044434, "frame_idx": 41, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1166, 568, 1292, 648], "id": 1, "cls": 2, "conf": 0.7617875933647156, "frame_idx": 42, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1164, 567, 1293, 649], "id": 1, "cls": 2, "conf": 0.7618439793586731, "frame_idx": 43, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1162, 567, 1293, 649], "id": 1, "cls": 2, "conf": 0.7654961347579956, "frame_idx": 44, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1161, 567, 1292, 649], "id": 1, "cls": 2, "conf": 0.7552655935287476, "frame_idx": 45, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1160, 568, 1290, 649], "id": 1, "cls": 2, "conf": 0.7659391164779663, "frame_idx": 46, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1158, 570, 1289, 650], "id": 1, "cls": 2, "conf": 0.7770782709121704, "frame_idx": 47, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1156, 569, 1290, 651], "id": 1, "cls": 2, "conf": 0.776265025138855, "frame_idx": 48, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1156, 568, 1289, 649], "id": 1, "cls": 2, "conf": 0.7784299850463867, "frame_idx": 49, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1153, 567, 1289, 650], "id": 1, "cls": 2, "conf": 0.7925119400024414, "frame_idx": 50, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1154, 568, 1290, 651], "id": 1, "cls": 2, "conf": 0.7904253005981445, "frame_idx": 51, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1152, 569, 1291, 651], "id": 1, "cls": 2, "conf": 0.7655163407325745, "frame_idx": 52, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1151, 569, 1291, 651], "id": 1, "cls": 2, "conf": 0.7518490552902222, "frame_idx": 53, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1149, 569, 1289, 652], "id": 1, "cls": 2, "conf": 0.7494193911552429, "frame_idx": 54, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1147, 570, 1289, 654], "id": 1, "cls": 2, "conf": 0.7891559600830078, "frame_idx": 55, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1145, 570, 1289, 655], "id": 1, "cls": 2, "conf": 0.7939369082450867, "frame_idx": 56, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1142, 569, 1289, 656], "id": 1, "cls": 2, "conf": 0.8129497170448303, "frame_idx": 57, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1141, 570, 1287, 656], "id": 1, "cls": 2, "conf": 0.8340080380439758, "frame_idx": 58, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1140, 569, 1288, 657], "id": 1, "cls": 2, "conf": 0.8393167853355408, "frame_idx": 59, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1141, 570, 1287, 657], "id": 1, "cls": 2, "conf": 0.8389145135879517, "frame_idx": 60, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1139, 569, 1285, 658], "id": 1, "cls": 2, "conf": 0.8342702388763428, "frame_idx": 61, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1138, 570, 1284, 658], "id": 1, "cls": 2, "conf": 0.8394166827201843, "frame_idx": 62, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1135, 569, 1284, 658], "id": 1, "cls": 2, "conf": 0.8471781611442566, "frame_idx": 63, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1131, 568, 1281, 659], "id": 1, "cls": 2, "conf": 0.8232806921005249, "frame_idx": 64, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1129, 568, 1279, 660], "id": 1, "cls": 2, "conf": 0.865515410900116, "frame_idx": 65, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1128, 569, 1282, 661], "id": 1, "cls": 2, "conf": 0.8378810882568359, "frame_idx": 66, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1126, 569, 1282, 661], "id": 1, "cls": 2, "conf": 0.8417340517044067, "frame_idx": 67, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1126, 569, 1281, 661], "id": 1, "cls": 2, "conf": 0.8533654808998108, "frame_idx": 68, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1125, 569, 1281, 660], "id": 1, "cls": 2, "conf": 0.8475178480148315, "frame_idx": 69, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1123, 569, 1280, 661], "id": 1, "cls": 2, "conf": 0.8625006675720215, "frame_idx": 70, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1120, 568, 1278, 662], "id": 1, "cls": 2, "conf": 0.8567495346069336, "frame_idx": 71, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1120, 569, 1276, 663], "id": 1, "cls": 2, "conf": 0.8443597555160522, "frame_idx": 72, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1118, 568, 1276, 663], "id": 1, "cls": 2, "conf": 0.8420413732528687, "frame_idx": 73, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1115, 567, 1276, 663], "id": 1, "cls": 2, "conf": 0.8549453020095825, "frame_idx": 74, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1115, 567, 1275, 664], "id": 1, "cls": 2, "conf": 0.8429552316665649, "frame_idx": 75, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1112, 567, 1273, 665], "id": 1, "cls": 2, "conf": 0.8485922813415527, "frame_idx": 76, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1111, 567, 1273, 666], "id": 1, "cls": 2, "conf": 0.8699796199798584, "frame_idx": 77, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1109, 565, 1273, 666], "id": 1, "cls": 2, "conf": 0.8823856115341187, "frame_idx": 78, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1107, 564, 1274, 667], "id": 1, "cls": 2, "conf": 0.8547831177711487, "frame_idx": 79, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1106, 565, 1271, 667], "id": 1, "cls": 2, "conf": 0.8556330800056458, "frame_idx": 80, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1105, 564, 1271, 667], "id": 1, "cls": 2, "conf": 0.8522816896438599, "frame_idx": 81, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1103, 562, 1271, 668], "id": 1, "cls": 2, "conf": 0.8402776718139648, "frame_idx": 82, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1098, 561, 1272, 669], "id": 1, "cls": 2, "conf": 0.849938154220581, "frame_idx": 83, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1095, 561, 1272, 669], "id": 1, "cls": 2, "conf": 0.8956634998321533, "frame_idx": 84, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1092, 561, 1272, 670], "id": 1, "cls": 2, "conf": 0.9015648365020752, "frame_idx": 85, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1093, 562, 1271, 670], "id": 1, "cls": 2, "conf": 0.8583961725234985, "frame_idx": 86, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1091, 562, 1271, 672], "id": 1, "cls": 2, "conf": 0.8442841172218323, "frame_idx": 87, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1089, 562, 1270, 672], "id": 1, "cls": 2, "conf": 0.8542094230651855, "frame_idx": 88, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1085, 560, 1267, 672], "id": 1, "cls": 2, "conf": 0.8753722310066223, "frame_idx": 89, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1081, 559, 1266, 673], "id": 1, "cls": 2, "conf": 0.8686020970344543, "frame_idx": 90, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1079, 558, 1266, 673], "id": 1, "cls": 2, "conf": 0.8676679134368896, "frame_idx": 91, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1075, 558, 1265, 674], "id": 1, "cls": 2, "conf": 0.8485567569732666, "frame_idx": 92, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1074, 558, 1264, 674], "id": 1, "cls": 2, "conf": 0.8431268334388733, "frame_idx": 93, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1074, 557, 1264, 674], "id": 1, "cls": 2, "conf": 0.8517748713493347, "frame_idx": 94, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1070, 559, 1262, 675], "id": 1, "cls": 2, "conf": 0.8630310297012329, "frame_idx": 95, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1068, 559, 1260, 676], "id": 1, "cls": 2, "conf": 0.8517524003982544, "frame_idx": 96, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1065, 557, 1260, 676], "id": 1, "cls": 2, "conf": 0.8309876918792725, "frame_idx": 97, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1062, 558, 1257, 676], "id": 1, "cls": 2, "conf": 0.820047914981842, "frame_idx": 98, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1058, 558, 1258, 680], "id": 1, "cls": 2, "conf": 0.8312326073646545, "frame_idx": 99, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1057, 557, 1255, 681], "id": 1, "cls": 2, "conf": 0.84773850440979, "frame_idx": 100, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1055, 558, 1253, 682], "id": 1, "cls": 2, "conf": 0.8278942108154297, "frame_idx": 101, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1052, 557, 1254, 682], "id": 1, "cls": 2, "conf": 0.8419964909553528, "frame_idx": 102, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1048, 554, 1253, 682], "id": 1, "cls": 2, "conf": 0.8698597550392151, "frame_idx": 103, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1045, 553, 1251, 683], "id": 1, "cls": 2, "conf": 0.8451534509658813, "frame_idx": 104, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1041, 553, 1250, 685], "id": 1, "cls": 2, "conf": 0.8478474617004395, "frame_idx": 105, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1037, 552, 1250, 685], "id": 1, "cls": 2, "conf": 0.8371977210044861, "frame_idx": 106, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1034, 552, 1249, 686], "id": 1, "cls": 2, "conf": 0.8587230443954468, "frame_idx": 107, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1032, 552, 1246, 687], "id": 1, "cls": 2, "conf": 0.8486429452896118, "frame_idx": 108, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1026, 552, 1246, 688], "id": 1, "cls": 2, "conf": 0.8577057123184204, "frame_idx": 109, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1024, 551, 1244, 687], "id": 1, "cls": 2, "conf": 0.847007155418396, "frame_idx": 110, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1020, 551, 1244, 689], "id": 1, "cls": 2, "conf": 0.8531818985939026, "frame_idx": 111, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1014, 550, 1245, 691], "id": 1, "cls": 2, "conf": 0.8777499794960022, "frame_idx": 112, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1011, 550, 1242, 692], "id": 1, "cls": 2, "conf": 0.8970717787742615, "frame_idx": 113, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1009, 550, 1241, 694], "id": 1, "cls": 2, "conf": 0.8887585401535034, "frame_idx": 114, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1007, 549, 1239, 695], "id": 1, "cls": 2, "conf": 0.8952226638793945, "frame_idx": 115, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1002, 549, 1240, 698], "id": 1, "cls": 2, "conf": 0.9019944667816162, "frame_idx": 116, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1000, 550, 1237, 699], "id": 1, "cls": 2, "conf": 0.8975278735160828, "frame_idx": 117, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [993, 549, 1237, 700], "id": 1, "cls": 2, "conf": 0.9004268646240234, "frame_idx": 118, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [988, 550, 1233, 701], "id": 1, "cls": 2, "conf": 0.8971960544586182, "frame_idx": 119, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [986, 549, 1231, 702], "id": 1, "cls": 2, "conf": 0.8989416360855103, "frame_idx": 120, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [980, 548, 1229, 704], "id": 1, "cls": 2, "conf": 0.889881432056427, "frame_idx": 121, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [975, 548, 1228, 708], "id": 1, "cls": 2, "conf": 0.8943332433700562, "frame_idx": 122, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [971, 548, 1228, 710], "id": 1, "cls": 2, "conf": 0.898472785949707, "frame_idx": 123, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [967, 547, 1226, 712], "id": 1, "cls": 2, "conf": 0.8931097388267517, "frame_idx": 124, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [963, 546, 1225, 713], "id": 1, "cls": 2, "conf": 0.8915606141090393, "frame_idx": 125, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [959, 546, 1223, 715], "id": 1, "cls": 2, "conf": 0.8841129541397095, "frame_idx": 126, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [955, 546, 1223, 717], "id": 1, "cls": 2, "conf": 0.850002646446228, "frame_idx": 127, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [950, 545, 1221, 718], "id": 1, "cls": 2, "conf": 0.8723787069320679, "frame_idx": 128, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [942, 544, 1220, 719], "id": 1, "cls": 2, "conf": 0.8795301914215088, "frame_idx": 129, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [940, 544, 1217, 720], "id": 1, "cls": 2, "conf": 0.8854840993881226, "frame_idx": 130, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [932, 543, 1217, 722], "id": 1, "cls": 2, "conf": 0.8812260031700134, "frame_idx": 131, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [927, 544, 1217, 725], "id": 1, "cls": 2, "conf": 0.8683909773826599, "frame_idx": 132, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [918, 543, 1216, 727], "id": 1, "cls": 2, "conf": 0.853493869304657, "frame_idx": 133, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [914, 543, 1214, 728], "id": 1, "cls": 2, "conf": 0.8531240224838257, "frame_idx": 134, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [908, 543, 1213, 730], "id": 1, "cls": 2, "conf": 0.8651628494262695, "frame_idx": 135, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [902, 542, 1209, 732], "id": 1, "cls": 2, "conf": 0.8718039989471436, "frame_idx": 136, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [894, 541, 1208, 735], "id": 1, "cls": 2, "conf": 0.848781943321228, "frame_idx": 137, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [888, 541, 1206, 736], "id": 1, "cls": 2, "conf": 0.8739963173866272, "frame_idx": 138, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [881, 541, 1204, 737], "id": 1, "cls": 2, "conf": 0.8722886443138123, "frame_idx": 139, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [872, 539, 1203, 738], "id": 1, "cls": 2, "conf": 0.8997212052345276, "frame_idx": 140, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [866, 539, 1200, 739], "id": 1, "cls": 2, "conf": 0.8821484446525574, "frame_idx": 141, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [860, 538, 1198, 744], "id": 1, "cls": 2, "conf": 0.8928354978561401, "frame_idx": 142, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [852, 536, 1197, 746], "id": 1, "cls": 2, "conf": 0.8943573832511902, "frame_idx": 143, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [843, 537, 1195, 748], "id": 1, "cls": 2, "conf": 0.8848525285720825, "frame_idx": 144, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [835, 536, 1194, 749], "id": 1, "cls": 2, "conf": 0.8749076724052429, "frame_idx": 145, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [826, 536, 1190, 751], "id": 1, "cls": 2, "conf": 0.8655844330787659, "frame_idx": 146, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [818, 538, 1186, 757], "id": 1, "cls": 2, "conf": 0.8978791236877441, "frame_idx": 147, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [810, 536, 1184, 759], "id": 1, "cls": 2, "conf": 0.9050822257995605, "frame_idx": 148, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [801, 533, 1181, 758], "id": 1, "cls": 2, "conf": 0.9211980104446411, "frame_idx": 149, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [791, 532, 1180, 762], "id": 1, "cls": 2, "conf": 0.9195648431777954, "frame_idx": 150, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [781, 530, 1177, 770], "id": 1, "cls": 2, "conf": 0.9223189353942871, "frame_idx": 151, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [769, 530, 1177, 772], "id": 1, "cls": 2, "conf": 0.9049766063690186, "frame_idx": 152, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [760, 528, 1175, 772], "id": 1, "cls": 2, "conf": 0.9004610776901245, "frame_idx": 153, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [749, 528, 1174, 776], "id": 1, "cls": 2, "conf": 0.9073677062988281, "frame_idx": 154, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [738, 526, 1171, 783], "id": 1, "cls": 2, "conf": 0.9120516777038574, "frame_idx": 155, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1254, 566, 1426, 643], "id": 2, "cls": 2, "conf": 0.702964186668396, "frame_idx": 155, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [725, 526, 1170, 785], "id": 1, "cls": 2, "conf": 0.9064223766326904, "frame_idx": 156, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1253, 568, 1422, 643], "id": 2, "cls": 2, "conf": 0.7038942575454712, "frame_idx": 156, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [712, 527, 1165, 789], "id": 1, "cls": 2, "conf": 0.9063256978988647, "frame_idx": 157, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1252, 568, 1421, 643], "id": 2, "cls": 2, "conf": 0.7038942575454712, "frame_idx": 157, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [699, 524, 1160, 793], "id": 1, "cls": 2, "conf": 0.8908406496047974, "frame_idx": 158, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [685, 524, 1159, 795], "id": 1, "cls": 2, "conf": 0.8844937682151794, "frame_idx": 159, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [673, 525, 1156, 799], "id": 1, "cls": 2, "conf": 0.8897193670272827, "frame_idx": 160, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [659, 524, 1152, 802], "id": 1, "cls": 2, "conf": 0.905559241771698, "frame_idx": 161, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [644, 522, 1149, 809], "id": 1, "cls": 2, "conf": 0.89296555519104, "frame_idx": 162, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [628, 522, 1146, 820], "id": 1, "cls": 2, "conf": 0.8848194479942322, "frame_idx": 163, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1242, 567, 1420, 642], "id": 2, "cls": 2, "conf": 0.717244029045105, "frame_idx": 163, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [611, 519, 1145, 821], "id": 1, "cls": 2, "conf": 0.9121138453483582, "frame_idx": 164, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1242, 568, 1418, 643], "id": 2, "cls": 2, "conf": 0.733672559261322, "frame_idx": 164, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [594, 520, 1141, 827], "id": 1, "cls": 2, "conf": 0.890241801738739, "frame_idx": 165, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1245, 569, 1416, 642], "id": 2, "cls": 2, "conf": 0.7150111794471741, "frame_idx": 165, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [574, 519, 1136, 832], "id": 1, "cls": 2, "conf": 0.9198168516159058, "frame_idx": 166, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1244, 569, 1415, 642], "id": 2, "cls": 2, "conf": 0.7150111794471741, "frame_idx": 166, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [555, 518, 1133, 839], "id": 1, "cls": 2, "conf": 0.9146777987480164, "frame_idx": 167, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [537, 515, 1129, 845], "id": 1, "cls": 2, "conf": 0.9021809101104736, "frame_idx": 168, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [516, 513, 1127, 854], "id": 1, "cls": 2, "conf": 0.9111503958702087, "frame_idx": 169, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [495, 510, 1126, 863], "id": 1, "cls": 2, "conf": 0.9124228954315186, "frame_idx": 170, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [471, 512, 1121, 872], "id": 1, "cls": 2, "conf": 0.9291900396347046, "frame_idx": 171, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [447, 509, 1116, 875], "id": 1, "cls": 2, "conf": 0.8657183051109314, "frame_idx": 172, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [423, 506, 1111, 881], "id": 1, "cls": 2, "conf": 0.8687337636947632, "frame_idx": 173, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [393, 505, 1105, 893], "id": 1, "cls": 2, "conf": 0.9182578921318054, "frame_idx": 174, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [368, 503, 1101, 899], "id": 1, "cls": 2, "conf": 0.9256529808044434, "frame_idx": 175, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [340, 502, 1096, 912], "id": 1, "cls": 2, "conf": 0.9282132983207703, "frame_idx": 176, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [303, 500, 1091, 924], "id": 1, "cls": 2, "conf": 0.9329380989074707, "frame_idx": 177, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [274, 499, 1087, 937], "id": 1, "cls": 2, "conf": 0.9455896019935608, "frame_idx": 178, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [233, 498, 1083, 946], "id": 1, "cls": 2, "conf": 0.9385244846343994, "frame_idx": 179, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [194, 496, 1077, 960], "id": 1, "cls": 2, "conf": 0.9393031001091003, "frame_idx": 180, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [153, 495, 1076, 972], "id": 1, "cls": 2, "conf": 0.9307792782783508, "frame_idx": 181, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [110, 492, 1067, 988], "id": 1, "cls": 2, "conf": 0.9395390748977661, "frame_idx": 182, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [57, 493, 1060, 1008], "id": 1, "cls": 2, "conf": 0.9405025243759155, "frame_idx": 183, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [4, 492, 1053, 1029], "id": 1, "cls": 2, "conf": 0.9425285458564758, "frame_idx": 184, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 490, 1047, 1043], "id": 1, "cls": 2, "conf": 0.9343565106391907, "frame_idx": 185, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 488, 1043, 1061], "id": 1, "cls": 2, "conf": 0.9273869395256042, "frame_idx": 186, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 484, 1035, 1071], "id": 1, "cls": 2, "conf": 0.9321094751358032, "frame_idx": 187, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 475, 1030, 1071], "id": 1, "cls": 2, "conf": 0.9317752122879028, "frame_idx": 188, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 473, 1025, 1073], "id": 1, "cls": 2, "conf": 0.9486481547355652, "frame_idx": 189, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1210, 567, 1396, 640], "id": 2, "cls": 2, "conf": 0.7311104536056519, "frame_idx": 189, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 472, 1016, 1073], "id": 1, "cls": 2, "conf": 0.952238917350769, "frame_idx": 190, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1211, 569, 1397, 642], "id": 2, "cls": 2, "conf": 0.7499367594718933, "frame_idx": 190, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 463, 1008, 1070], "id": 1, "cls": 2, "conf": 0.9457194209098816, "frame_idx": 191, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1219, 570, 1396, 641], "id": 2, "cls": 2, "conf": 0.7276124954223633, "frame_idx": 191, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 454, 1001, 1071], "id": 1, "cls": 2, "conf": 0.9511743187904358, "frame_idx": 192, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1218, 570, 1396, 641], "id": 2, "cls": 2, "conf": 0.7206576466560364, "frame_idx": 192, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 450, 994, 1069], "id": 1, "cls": 2, "conf": 0.9420279264450073, "frame_idx": 193, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1214, 570, 1395, 642], "id": 2, "cls": 2, "conf": 0.7134021520614624, "frame_idx": 193, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 446, 985, 1067], "id": 1, "cls": 2, "conf": 0.9500812292098999, "frame_idx": 194, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1215, 570, 1393, 642], "id": 2, "cls": 2, "conf": 0.7069892287254333, "frame_idx": 194, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 442, 976, 1066], "id": 1, "cls": 2, "conf": 0.9406448006629944, "frame_idx": 195, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1203, 568, 1391, 642], "id": 2, "cls": 2, "conf": 0.7376792430877686, "frame_idx": 195, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 441, 968, 1069], "id": 1, "cls": 2, "conf": 0.9537635445594788, "frame_idx": 196, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1202, 567, 1391, 642], "id": 2, "cls": 2, "conf": 0.7550773024559021, "frame_idx": 196, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 440, 960, 1069], "id": 1, "cls": 2, "conf": 0.9586692452430725, "frame_idx": 197, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1200, 566, 1392, 642], "id": 2, "cls": 2, "conf": 0.7765669822692871, "frame_idx": 197, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 431, 950, 1069], "id": 1, "cls": 2, "conf": 0.9550426006317139, "frame_idx": 198, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1198, 565, 1393, 643], "id": 2, "cls": 2, "conf": 0.7722377777099609, "frame_idx": 198, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 424, 938, 1065], "id": 1, "cls": 2, "conf": 0.9508339762687683, "frame_idx": 199, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1196, 565, 1392, 643], "id": 2, "cls": 2, "conf": 0.751980185508728, "frame_idx": 199, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 419, 927, 1065], "id": 1, "cls": 2, "conf": 0.9454301595687866, "frame_idx": 200, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1196, 566, 1392, 643], "id": 2, "cls": 2, "conf": 0.7461082935333252, "frame_idx": 200, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 413, 916, 1065], "id": 1, "cls": 2, "conf": 0.957693874835968, "frame_idx": 201, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1194, 565, 1392, 644], "id": 2, "cls": 2, "conf": 0.7643528580665588, "frame_idx": 201, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 407, 905, 1065], "id": 1, "cls": 2, "conf": 0.945280134677887, "frame_idx": 202, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1196, 565, 1392, 644], "id": 2, "cls": 2, "conf": 0.7613423466682434, "frame_idx": 202, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 409, 890, 1065], "id": 1, "cls": 2, "conf": 0.9535142183303833, "frame_idx": 203, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1194, 565, 1391, 644], "id": 2, "cls": 2, "conf": 0.7633638978004456, "frame_idx": 203, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 400, 875, 1065], "id": 1, "cls": 2, "conf": 0.9448526501655579, "frame_idx": 204, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1192, 565, 1391, 644], "id": 2, "cls": 2, "conf": 0.7550344467163086, "frame_idx": 204, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 395, 863, 1064], "id": 1, "cls": 2, "conf": 0.9526091814041138, "frame_idx": 205, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1192, 565, 1390, 644], "id": 2, "cls": 2, "conf": 0.7387273907661438, "frame_idx": 205, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 391, 851, 1062], "id": 1, "cls": 2, "conf": 0.9561181664466858, "frame_idx": 206, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1191, 565, 1390, 644], "id": 2, "cls": 2, "conf": 0.7227319478988647, "frame_idx": 206, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 385, 830, 1059], "id": 1, "cls": 2, "conf": 0.9433083534240723, "frame_idx": 207, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1189, 565, 1388, 644], "id": 2, "cls": 2, "conf": 0.703997015953064, "frame_idx": 207, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 369, 812, 1064], "id": 1, "cls": 2, "conf": 0.9332630634307861, "frame_idx": 208, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1192, 566, 1387, 644], "id": 2, "cls": 2, "conf": 0.7098210453987122, "frame_idx": 208, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 364, 792, 1067], "id": 1, "cls": 2, "conf": 0.945813775062561, "frame_idx": 209, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1189, 565, 1388, 644], "id": 2, "cls": 2, "conf": 0.7005091905593872, "frame_idx": 209, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 354, 774, 1068], "id": 1, "cls": 2, "conf": 0.9388237595558167, "frame_idx": 210, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1187, 565, 1385, 643], "id": 2, "cls": 2, "conf": 0.7079640030860901, "frame_idx": 210, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 351, 755, 1070], "id": 1, "cls": 2, "conf": 0.9397347569465637, "frame_idx": 211, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1185, 564, 1385, 644], "id": 2, "cls": 2, "conf": 0.7079640030860901, "frame_idx": 211, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 350, 729, 1068], "id": 1, "cls": 2, "conf": 0.949310839176178, "frame_idx": 212, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1183, 564, 1381, 643], "id": 2, "cls": 2, "conf": 0.7306272983551025, "frame_idx": 212, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 350, 703, 1068], "id": 1, "cls": 2, "conf": 0.9424352645874023, "frame_idx": 213, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1183, 564, 1383, 643], "id": 2, "cls": 2, "conf": 0.7504119873046875, "frame_idx": 213, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 350, 679, 1066], "id": 1, "cls": 2, "conf": 0.9429755806922913, "frame_idx": 214, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1181, 565, 1377, 644], "id": 2, "cls": 2, "conf": 0.7851810455322266, "frame_idx": 214, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 354, 650, 1069], "id": 1, "cls": 2, "conf": 0.9048929214477539, "frame_idx": 215, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1181, 565, 1378, 643], "id": 2, "cls": 2, "conf": 0.7938785552978516, "frame_idx": 215, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 378, 620, 1070], "id": 1, "cls": 2, "conf": 0.9180529713630676, "frame_idx": 216, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1182, 566, 1376, 643], "id": 2, "cls": 2, "conf": 0.7817256450653076, "frame_idx": 216, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 395, 588, 1069], "id": 1, "cls": 2, "conf": 0.9412034749984741, "frame_idx": 217, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1182, 565, 1374, 644], "id": 2, "cls": 2, "conf": 0.8047704100608826, "frame_idx": 217, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 427, 551, 1071], "id": 1, "cls": 2, "conf": 0.9319164752960205, "frame_idx": 218, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1182, 565, 1375, 643], "id": 2, "cls": 2, "conf": 0.7836374640464783, "frame_idx": 218, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 453, 510, 1072], "id": 1, "cls": 2, "conf": 0.9232752919197083, "frame_idx": 219, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1181, 566, 1371, 642], "id": 2, "cls": 2, "conf": 0.8103419542312622, "frame_idx": 219, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 503, 467, 1071], "id": 1, "cls": 2, "conf": 0.904760479927063, "frame_idx": 220, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1179, 566, 1371, 642], "id": 2, "cls": 2, "conf": 0.8125634789466858, "frame_idx": 220, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 549, 418, 1070], "id": 1, "cls": 2, "conf": 0.9279927611351013, "frame_idx": 221, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1179, 566, 1376, 642], "id": 2, "cls": 2, "conf": 0.8272838592529297, "frame_idx": 221, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 583, 363, 1068], "id": 1, "cls": 2, "conf": 0.9242643117904663, "frame_idx": 222, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1178, 565, 1374, 642], "id": 2, "cls": 2, "conf": 0.8221709132194519, "frame_idx": 222, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 593, 303, 1068], "id": 1, "cls": 2, "conf": 0.9143214821815491, "frame_idx": 223, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1177, 565, 1375, 644], "id": 2, "cls": 2, "conf": 0.8016420602798462, "frame_idx": 223, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 600, 238, 1069], "id": 1, "cls": 2, "conf": 0.8708683252334595, "frame_idx": 224, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1177, 565, 1376, 644], "id": 2, "cls": 2, "conf": 0.7917031645774841, "frame_idx": 224, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 616, 197, 1069], "id": 1, "cls": 2, "conf": 0.8708683252334595, "frame_idx": 225, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1178, 565, 1376, 643], "id": 2, "cls": 2, "conf": 0.78056401014328, "frame_idx": 225, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1177, 564, 1377, 644], "id": 2, "cls": 2, "conf": 0.7785735130310059, "frame_idx": 226, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1176, 565, 1370, 644], "id": 2, "cls": 2, "conf": 0.7929512858390808, "frame_idx": 227, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1174, 564, 1371, 645], "id": 2, "cls": 2, "conf": 0.8178865909576416, "frame_idx": 228, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1173, 564, 1371, 645], "id": 2, "cls": 2, "conf": 0.8109760284423828, "frame_idx": 229, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1173, 565, 1370, 645], "id": 2, "cls": 2, "conf": 0.7563623189926147, "frame_idx": 230, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1174, 565, 1370, 645], "id": 2, "cls": 2, "conf": 0.7083349227905273, "frame_idx": 231, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1173, 565, 1368, 645], "id": 2, "cls": 2, "conf": 0.7430815100669861, "frame_idx": 232, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1172, 564, 1359, 643], "id": 2, "cls": 2, "conf": 0.7816348075866699, "frame_idx": 233, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1171, 565, 1356, 642], "id": 2, "cls": 2, "conf": 0.8003019094467163, "frame_idx": 234, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1171, 563, 1360, 644], "id": 2, "cls": 2, "conf": 0.8223402500152588, "frame_idx": 235, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1169, 562, 1362, 645], "id": 2, "cls": 2, "conf": 0.8306653499603271, "frame_idx": 236, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1168, 562, 1359, 645], "id": 2, "cls": 2, "conf": 0.8245570659637451, "frame_idx": 237, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1170, 563, 1359, 645], "id": 2, "cls": 2, "conf": 0.818155825138092, "frame_idx": 238, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1168, 563, 1360, 645], "id": 2, "cls": 2, "conf": 0.8151793479919434, "frame_idx": 239, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1166, 564, 1357, 645], "id": 2, "cls": 2, "conf": 0.8082919120788574, "frame_idx": 240, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1165, 564, 1356, 645], "id": 2, "cls": 2, "conf": 0.8219642043113708, "frame_idx": 241, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1164, 564, 1353, 645], "id": 2, "cls": 2, "conf": 0.7999997138977051, "frame_idx": 242, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1164, 564, 1352, 645], "id": 2, "cls": 2, "conf": 0.7364180088043213, "frame_idx": 243, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1165, 565, 1349, 645], "id": 2, "cls": 2, "conf": 0.7858971357345581, "frame_idx": 244, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1164, 564, 1354, 646], "id": 2, "cls": 2, "conf": 0.7886779308319092, "frame_idx": 245, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1164, 564, 1348, 646], "id": 2, "cls": 2, "conf": 0.818172812461853, "frame_idx": 246, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1163, 564, 1348, 646], "id": 2, "cls": 2, "conf": 0.8523472547531128, "frame_idx": 247, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1164, 564, 1348, 645], "id": 2, "cls": 2, "conf": 0.8364881873130798, "frame_idx": 248, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1163, 563, 1346, 646], "id": 2, "cls": 2, "conf": 0.8150932788848877, "frame_idx": 249, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1163, 564, 1346, 646], "id": 2, "cls": 2, "conf": 0.8284506797790527, "frame_idx": 250, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1163, 563, 1347, 645], "id": 2, "cls": 2, "conf": 0.8243890404701233, "frame_idx": 251, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1163, 564, 1344, 646], "id": 2, "cls": 2, "conf": 0.848281741142273, "frame_idx": 252, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1163, 563, 1341, 646], "id": 2, "cls": 2, "conf": 0.8477445840835571, "frame_idx": 253, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1162, 563, 1339, 648], "id": 2, "cls": 2, "conf": 0.8400436043739319, "frame_idx": 254, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1161, 561, 1336, 647], "id": 2, "cls": 2, "conf": 0.7861170768737793, "frame_idx": 255, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1161, 562, 1338, 649], "id": 2, "cls": 2, "conf": 0.8120461702346802, "frame_idx": 256, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1161, 562, 1336, 648], "id": 2, "cls": 2, "conf": 0.7770818471908569, "frame_idx": 257, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1161, 561, 1332, 648], "id": 2, "cls": 2, "conf": 0.7602912187576294, "frame_idx": 258, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 560, 1331, 649], "id": 2, "cls": 2, "conf": 0.7476798295974731, "frame_idx": 259, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 560, 1330, 649], "id": 2, "cls": 2, "conf": 0.7798804640769958, "frame_idx": 260, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 560, 1328, 649], "id": 2, "cls": 2, "conf": 0.7794782519340515, "frame_idx": 261, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 561, 1328, 649], "id": 2, "cls": 2, "conf": 0.7535544037818909, "frame_idx": 262, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 561, 1326, 649], "id": 2, "cls": 2, "conf": 0.7481237649917603, "frame_idx": 263, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 561, 1325, 647], "id": 2, "cls": 2, "conf": 0.7650920152664185, "frame_idx": 264, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1158, 562, 1324, 647], "id": 2, "cls": 2, "conf": 0.8215755224227905, "frame_idx": 265, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 561, 1324, 647], "id": 2, "cls": 2, "conf": 0.8252439498901367, "frame_idx": 266, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1158, 561, 1323, 648], "id": 2, "cls": 2, "conf": 0.8128286004066467, "frame_idx": 267, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1157, 560, 1323, 649], "id": 2, "cls": 2, "conf": 0.8222718238830566, "frame_idx": 268, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1158, 560, 1323, 649], "id": 2, "cls": 2, "conf": 0.8110289573669434, "frame_idx": 269, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 560, 1323, 649], "id": 2, "cls": 2, "conf": 0.8318296074867249, "frame_idx": 270, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 561, 1321, 649], "id": 2, "cls": 2, "conf": 0.8325403332710266, "frame_idx": 271, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 560, 1323, 650], "id": 2, "cls": 2, "conf": 0.8335207104682922, "frame_idx": 272, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 560, 1321, 650], "id": 2, "cls": 2, "conf": 0.8333126902580261, "frame_idx": 273, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1158, 561, 1320, 650], "id": 2, "cls": 2, "conf": 0.8144757151603699, "frame_idx": 274, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 561, 1319, 650], "id": 2, "cls": 2, "conf": 0.809233546257019, "frame_idx": 275, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1160, 561, 1317, 650], "id": 2, "cls": 2, "conf": 0.7907527685165405, "frame_idx": 276, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 560, 1318, 650], "id": 2, "cls": 2, "conf": 0.8115890026092529, "frame_idx": 277, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1158, 559, 1317, 651], "id": 2, "cls": 2, "conf": 0.7833464741706848, "frame_idx": 278, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1158, 559, 1317, 651], "id": 2, "cls": 2, "conf": 0.7954601645469666, "frame_idx": 279, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 559, 1317, 651], "id": 2, "cls": 2, "conf": 0.774968683719635, "frame_idx": 280, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 559, 1316, 651], "id": 2, "cls": 2, "conf": 0.7699628472328186, "frame_idx": 281, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1158, 559, 1316, 651], "id": 2, "cls": 2, "conf": 0.7739447951316833, "frame_idx": 282, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1159, 559, 1315, 650], "id": 2, "cls": 2, "conf": 0.803051769733429, "frame_idx": 283, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1158, 558, 1312, 652], "id": 2, "cls": 2, "conf": 0.810187041759491, "frame_idx": 284, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1157, 557, 1311, 653], "id": 2, "cls": 2, "conf": 0.8035591840744019, "frame_idx": 285, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1157, 558, 1311, 653], "id": 2, "cls": 2, "conf": 0.8188391923904419, "frame_idx": 286, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1156, 558, 1311, 653], "id": 2, "cls": 2, "conf": 0.8180844187736511, "frame_idx": 287, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1157, 559, 1310, 653], "id": 2, "cls": 2, "conf": 0.8250501155853271, "frame_idx": 288, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1156, 559, 1309, 654], "id": 2, "cls": 2, "conf": 0.8236573338508606, "frame_idx": 289, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1156, 559, 1308, 654], "id": 2, "cls": 2, "conf": 0.8105210661888123, "frame_idx": 290, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1157, 560, 1307, 654], "id": 2, "cls": 2, "conf": 0.8106025457382202, "frame_idx": 291, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1155, 560, 1307, 655], "id": 2, "cls": 2, "conf": 0.788083016872406, "frame_idx": 292, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1157, 560, 1305, 654], "id": 2, "cls": 2, "conf": 0.7796603441238403, "frame_idx": 293, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1157, 560, 1304, 655], "id": 2, "cls": 2, "conf": 0.7901594638824463, "frame_idx": 294, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1155, 560, 1305, 656], "id": 2, "cls": 2, "conf": 0.7907295823097229, "frame_idx": 295, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1156, 560, 1303, 655], "id": 2, "cls": 2, "conf": 0.7933876514434814, "frame_idx": 296, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1157, 559, 1301, 655], "id": 2, "cls": 2, "conf": 0.7832263708114624, "frame_idx": 297, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1156, 559, 1301, 656], "id": 2, "cls": 2, "conf": 0.795276403427124, "frame_idx": 298, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1155, 559, 1301, 656], "id": 2, "cls": 2, "conf": 0.8082300424575806, "frame_idx": 299, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1155, 560, 1299, 656], "id": 2, "cls": 2, "conf": 0.7965103387832642, "frame_idx": 300, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1154, 560, 1300, 657], "id": 2, "cls": 2, "conf": 0.8124801516532898, "frame_idx": 301, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1153, 560, 1300, 657], "id": 2, "cls": 2, "conf": 0.8144661784172058, "frame_idx": 302, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1153, 561, 1299, 658], "id": 2, "cls": 2, "conf": 0.8181474208831787, "frame_idx": 303, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1152, 561, 1298, 658], "id": 2, "cls": 2, "conf": 0.8187706470489502, "frame_idx": 304, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1154, 560, 1298, 656], "id": 2, "cls": 2, "conf": 0.8268204927444458, "frame_idx": 305, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1155, 560, 1297, 655], "id": 2, "cls": 2, "conf": 0.8292365074157715, "frame_idx": 306, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1155, 560, 1295, 656], "id": 2, "cls": 2, "conf": 0.8298918008804321, "frame_idx": 307, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1154, 559, 1297, 657], "id": 2, "cls": 2, "conf": 0.8282919526100159, "frame_idx": 308, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1155, 559, 1298, 657], "id": 2, "cls": 2, "conf": 0.8358256816864014, "frame_idx": 309, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1154, 559, 1297, 657], "id": 2, "cls": 2, "conf": 0.8314154744148254, "frame_idx": 310, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1154, 559, 1297, 657], "id": 2, "cls": 2, "conf": 0.8324777483940125, "frame_idx": 311, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1154, 560, 1294, 657], "id": 2, "cls": 2, "conf": 0.8399393558502197, "frame_idx": 312, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1153, 559, 1295, 658], "id": 2, "cls": 2, "conf": 0.8377672433853149, "frame_idx": 313, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1152, 559, 1294, 658], "id": 2, "cls": 2, "conf": 0.8295931816101074, "frame_idx": 314, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1151, 559, 1293, 658], "id": 2, "cls": 2, "conf": 0.8257358074188232, "frame_idx": 315, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1151, 559, 1292, 658], "id": 2, "cls": 2, "conf": 0.8370307087898254, "frame_idx": 316, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1151, 560, 1291, 658], "id": 2, "cls": 2, "conf": 0.818547785282135, "frame_idx": 317, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1150, 559, 1292, 659], "id": 2, "cls": 2, "conf": 0.7911444306373596, "frame_idx": 318, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1150, 559, 1292, 659], "id": 2, "cls": 2, "conf": 0.7788093686103821, "frame_idx": 319, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1150, 559, 1293, 659], "id": 2, "cls": 2, "conf": 0.7597206830978394, "frame_idx": 320, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1150, 560, 1291, 659], "id": 2, "cls": 2, "conf": 0.7717625498771667, "frame_idx": 321, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1148, 559, 1291, 660], "id": 2, "cls": 2, "conf": 0.7833176255226135, "frame_idx": 322, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1148, 559, 1292, 660], "id": 2, "cls": 2, "conf": 0.7886781096458435, "frame_idx": 323, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1148, 559, 1292, 660], "id": 2, "cls": 2, "conf": 0.7795507311820984, "frame_idx": 324, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1149, 560, 1291, 660], "id": 2, "cls": 2, "conf": 0.7811378240585327, "frame_idx": 325, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1148, 560, 1291, 661], "id": 2, "cls": 2, "conf": 0.7874495387077332, "frame_idx": 326, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1149, 560, 1290, 662], "id": 2, "cls": 2, "conf": 0.8070158958435059, "frame_idx": 327, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1147, 560, 1291, 664], "id": 2, "cls": 2, "conf": 0.8095881342887878, "frame_idx": 328, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1146, 560, 1290, 663], "id": 2, "cls": 2, "conf": 0.8032857775688171, "frame_idx": 329, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1145, 560, 1290, 664], "id": 2, "cls": 2, "conf": 0.826309084892273, "frame_idx": 330, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1145, 560, 1291, 665], "id": 2, "cls": 2, "conf": 0.799944281578064, "frame_idx": 331, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1145, 561, 1290, 665], "id": 2, "cls": 2, "conf": 0.7787960767745972, "frame_idx": 332, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1144, 560, 1290, 665], "id": 2, "cls": 2, "conf": 0.7718071937561035, "frame_idx": 333, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1142, 559, 1291, 666], "id": 2, "cls": 2, "conf": 0.7858945727348328, "frame_idx": 334, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1143, 559, 1290, 665], "id": 2, "cls": 2, "conf": 0.809407114982605, "frame_idx": 335, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1142, 559, 1290, 666], "id": 2, "cls": 2, "conf": 0.8050354719161987, "frame_idx": 336, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1141, 559, 1289, 666], "id": 2, "cls": 2, "conf": 0.8001269102096558, "frame_idx": 337, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1140, 558, 1289, 667], "id": 2, "cls": 2, "conf": 0.8002896308898926, "frame_idx": 338, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1140, 559, 1288, 667], "id": 2, "cls": 2, "conf": 0.8237987160682678, "frame_idx": 339, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1139, 558, 1289, 667], "id": 2, "cls": 2, "conf": 0.8150033950805664, "frame_idx": 340, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1136, 558, 1291, 667], "id": 2, "cls": 2, "conf": 0.7948818802833557, "frame_idx": 341, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1138, 559, 1289, 668], "id": 2, "cls": 2, "conf": 0.8127124905586243, "frame_idx": 342, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1136, 558, 1290, 668], "id": 2, "cls": 2, "conf": 0.8126155138015747, "frame_idx": 343, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1135, 558, 1290, 668], "id": 2, "cls": 2, "conf": 0.8102937936782837, "frame_idx": 344, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1136, 558, 1290, 668], "id": 2, "cls": 2, "conf": 0.7925915718078613, "frame_idx": 345, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1138, 559, 1288, 669], "id": 2, "cls": 2, "conf": 0.7755674123764038, "frame_idx": 346, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1136, 558, 1288, 670], "id": 2, "cls": 2, "conf": 0.7737069129943848, "frame_idx": 347, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1136, 558, 1286, 669], "id": 2, "cls": 2, "conf": 0.7875550389289856, "frame_idx": 348, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1131, 557, 1286, 670], "id": 2, "cls": 2, "conf": 0.7827519178390503, "frame_idx": 349, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1131, 556, 1286, 670], "id": 2, "cls": 2, "conf": 0.7984418272972107, "frame_idx": 350, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1130, 555, 1287, 671], "id": 2, "cls": 2, "conf": 0.7734009027481079, "frame_idx": 351, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1130, 556, 1285, 671], "id": 2, "cls": 2, "conf": 0.7766426205635071, "frame_idx": 352, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1128, 555, 1286, 672], "id": 2, "cls": 2, "conf": 0.7817273139953613, "frame_idx": 353, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1131, 555, 1284, 671], "id": 2, "cls": 2, "conf": 0.7750544548034668, "frame_idx": 354, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1128, 554, 1287, 672], "id": 2, "cls": 2, "conf": 0.7669058442115784, "frame_idx": 355, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1130, 555, 1284, 672], "id": 2, "cls": 2, "conf": 0.7651919722557068, "frame_idx": 356, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1128, 554, 1283, 672], "id": 2, "cls": 2, "conf": 0.7686755061149597, "frame_idx": 357, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1126, 553, 1284, 673], "id": 2, "cls": 2, "conf": 0.7569704055786133, "frame_idx": 358, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1126, 554, 1283, 673], "id": 2, "cls": 2, "conf": 0.788491427898407, "frame_idx": 359, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1123, 553, 1285, 673], "id": 2, "cls": 2, "conf": 0.796739935874939, "frame_idx": 360, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1124, 553, 1284, 674], "id": 2, "cls": 2, "conf": 0.7600229382514954, "frame_idx": 361, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1122, 552, 1285, 675], "id": 2, "cls": 2, "conf": 0.7608688473701477, "frame_idx": 362, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1121, 553, 1285, 676], "id": 2, "cls": 2, "conf": 0.7610014081001282, "frame_idx": 363, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1120, 552, 1285, 675], "id": 2, "cls": 2, "conf": 0.7238069772720337, "frame_idx": 364, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1119, 553, 1284, 675], "id": 2, "cls": 2, "conf": 0.789625883102417, "frame_idx": 365, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1118, 552, 1283, 675], "id": 2, "cls": 2, "conf": 0.7700904607772827, "frame_idx": 366, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1117, 552, 1282, 677], "id": 2, "cls": 2, "conf": 0.7024756669998169, "frame_idx": 367, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1116, 550, 1282, 677], "id": 2, "cls": 2, "conf": 0.7285512685775757, "frame_idx": 368, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1115, 549, 1281, 675], "id": 2, "cls": 2, "conf": 0.7092558145523071, "frame_idx": 369, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1113, 549, 1282, 675], "id": 2, "cls": 2, "conf": 0.7147558331489563, "frame_idx": 370, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1114, 548, 1280, 675], "id": 2, "cls": 2, "conf": 0.7318784594535828, "frame_idx": 371, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1112, 549, 1279, 676], "id": 2, "cls": 2, "conf": 0.7841340899467468, "frame_idx": 372, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1113, 549, 1278, 675], "id": 2, "cls": 2, "conf": 0.7626461386680603, "frame_idx": 373, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1111, 550, 1278, 677], "id": 2, "cls": 2, "conf": 0.7657148241996765, "frame_idx": 374, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1108, 550, 1280, 677], "id": 2, "cls": 2, "conf": 0.7782973647117615, "frame_idx": 375, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1108, 550, 1280, 677], "id": 2, "cls": 2, "conf": 0.7754068970680237, "frame_idx": 376, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1107, 551, 1279, 677], "id": 2, "cls": 2, "conf": 0.7901440858840942, "frame_idx": 377, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1105, 550, 1280, 678], "id": 2, "cls": 2, "conf": 0.811150848865509, "frame_idx": 378, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1105, 550, 1279, 678], "id": 2, "cls": 2, "conf": 0.7904564142227173, "frame_idx": 379, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1105, 550, 1278, 678], "id": 2, "cls": 2, "conf": 0.7392836809158325, "frame_idx": 380, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1104, 548, 1279, 678], "id": 2, "cls": 2, "conf": 0.7411684989929199, "frame_idx": 381, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1104, 551, 1277, 680], "id": 2, "cls": 2, "conf": 0.7404786944389343, "frame_idx": 382, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1102, 550, 1276, 680], "id": 2, "cls": 2, "conf": 0.7326121926307678, "frame_idx": 383, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1102, 550, 1277, 681], "id": 2, "cls": 2, "conf": 0.7641636729240417, "frame_idx": 384, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1101, 549, 1276, 681], "id": 2, "cls": 2, "conf": 0.7742770314216614, "frame_idx": 385, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1099, 549, 1276, 682], "id": 2, "cls": 2, "conf": 0.7556547522544861, "frame_idx": 386, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1098, 548, 1277, 682], "id": 2, "cls": 2, "conf": 0.702316164970398, "frame_idx": 387, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1096, 548, 1275, 683], "id": 2, "cls": 2, "conf": 0.7168530225753784, "frame_idx": 388, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1093, 547, 1273, 684], "id": 2, "cls": 2, "conf": 0.7561923265457153, "frame_idx": 389, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1093, 548, 1275, 684], "id": 2, "cls": 2, "conf": 0.7371773719787598, "frame_idx": 390, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1093, 549, 1275, 684], "id": 2, "cls": 2, "conf": 0.7662423849105835, "frame_idx": 391, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1090, 548, 1276, 685], "id": 2, "cls": 2, "conf": 0.7733460664749146, "frame_idx": 392, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1090, 548, 1275, 684], "id": 2, "cls": 2, "conf": 0.8063229918479919, "frame_idx": 393, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1088, 547, 1275, 685], "id": 2, "cls": 2, "conf": 0.834899365901947, "frame_idx": 394, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1085, 546, 1275, 686], "id": 2, "cls": 2, "conf": 0.8267676830291748, "frame_idx": 395, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1083, 546, 1274, 686], "id": 2, "cls": 2, "conf": 0.8470121622085571, "frame_idx": 396, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1082, 546, 1272, 685], "id": 2, "cls": 2, "conf": 0.8356623649597168, "frame_idx": 397, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1081, 546, 1271, 686], "id": 2, "cls": 2, "conf": 0.8369763493537903, "frame_idx": 398, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1080, 545, 1272, 686], "id": 2, "cls": 2, "conf": 0.8737363219261169, "frame_idx": 399, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1080, 544, 1271, 687], "id": 2, "cls": 2, "conf": 0.8609719276428223, "frame_idx": 400, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1078, 544, 1272, 689], "id": 2, "cls": 2, "conf": 0.83541339635849, "frame_idx": 401, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1078, 545, 1270, 689], "id": 2, "cls": 2, "conf": 0.8013574481010437, "frame_idx": 402, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1075, 544, 1271, 689], "id": 2, "cls": 2, "conf": 0.7798829078674316, "frame_idx": 403, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1074, 543, 1270, 691], "id": 2, "cls": 2, "conf": 0.8236221671104431, "frame_idx": 404, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1070, 543, 1270, 692], "id": 2, "cls": 2, "conf": 0.8620288372039795, "frame_idx": 405, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1070, 543, 1268, 692], "id": 2, "cls": 2, "conf": 0.8752257227897644, "frame_idx": 406, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1067, 542, 1268, 693], "id": 2, "cls": 2, "conf": 0.870403528213501, "frame_idx": 407, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1066, 542, 1269, 695], "id": 2, "cls": 2, "conf": 0.8699027299880981, "frame_idx": 408, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1062, 541, 1270, 696], "id": 2, "cls": 2, "conf": 0.8874167799949646, "frame_idx": 409, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1061, 541, 1269, 696], "id": 2, "cls": 2, "conf": 0.8754041194915771, "frame_idx": 410, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1060, 540, 1269, 698], "id": 2, "cls": 2, "conf": 0.8649414777755737, "frame_idx": 411, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1057, 539, 1268, 699], "id": 2, "cls": 2, "conf": 0.8912915587425232, "frame_idx": 412, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1056, 539, 1268, 700], "id": 2, "cls": 2, "conf": 0.8944886922836304, "frame_idx": 413, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1055, 539, 1269, 700], "id": 2, "cls": 2, "conf": 0.8907544612884521, "frame_idx": 414, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1054, 540, 1268, 701], "id": 2, "cls": 2, "conf": 0.8559849262237549, "frame_idx": 415, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1053, 541, 1266, 701], "id": 2, "cls": 2, "conf": 0.8329747319221497, "frame_idx": 416, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1051, 540, 1265, 702], "id": 2, "cls": 2, "conf": 0.8382128477096558, "frame_idx": 417, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1049, 540, 1266, 702], "id": 2, "cls": 2, "conf": 0.8805363178253174, "frame_idx": 418, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1046, 539, 1266, 703], "id": 2, "cls": 2, "conf": 0.8715322017669678, "frame_idx": 419, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1045, 539, 1267, 704], "id": 2, "cls": 2, "conf": 0.842781662940979, "frame_idx": 420, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1041, 539, 1268, 706], "id": 2, "cls": 2, "conf": 0.8441018462181091, "frame_idx": 421, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1038, 539, 1266, 708], "id": 2, "cls": 2, "conf": 0.7819275856018066, "frame_idx": 422, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1037, 539, 1264, 708], "id": 2, "cls": 2, "conf": 0.8135506510734558, "frame_idx": 423, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1033, 538, 1264, 710], "id": 2, "cls": 2, "conf": 0.8242059350013733, "frame_idx": 424, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1032, 538, 1265, 710], "id": 2, "cls": 2, "conf": 0.7836756110191345, "frame_idx": 425, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1031, 538, 1264, 710], "id": 2, "cls": 2, "conf": 0.8388970494270325, "frame_idx": 426, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1029, 537, 1264, 711], "id": 2, "cls": 2, "conf": 0.7970230579376221, "frame_idx": 427, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1027, 537, 1265, 711], "id": 2, "cls": 2, "conf": 0.7321099638938904, "frame_idx": 428, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1025, 538, 1265, 712], "id": 2, "cls": 2, "conf": 0.7343229651451111, "frame_idx": 429, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1020, 536, 1261, 712], "id": 2, "cls": 2, "conf": 0.787158727645874, "frame_idx": 430, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1018, 537, 1259, 713], "id": 2, "cls": 2, "conf": 0.8460677862167358, "frame_idx": 431, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1013, 536, 1261, 714], "id": 2, "cls": 2, "conf": 0.8292366862297058, "frame_idx": 432, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1011, 536, 1259, 716], "id": 2, "cls": 2, "conf": 0.8152600526809692, "frame_idx": 433, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1008, 535, 1258, 718], "id": 2, "cls": 2, "conf": 0.7996748089790344, "frame_idx": 434, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1007, 535, 1255, 719], "id": 2, "cls": 2, "conf": 0.8389233946800232, "frame_idx": 435, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1008, 535, 1253, 720], "id": 2, "cls": 2, "conf": 0.8631499409675598, "frame_idx": 436, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1002, 534, 1254, 721], "id": 2, "cls": 2, "conf": 0.8657373785972595, "frame_idx": 437, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [998, 534, 1253, 721], "id": 2, "cls": 2, "conf": 0.8603703379631042, "frame_idx": 438, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [995, 532, 1253, 722], "id": 2, "cls": 2, "conf": 0.8645334839820862, "frame_idx": 439, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [994, 532, 1252, 723], "id": 2, "cls": 2, "conf": 0.8768425583839417, "frame_idx": 440, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [991, 530, 1254, 724], "id": 2, "cls": 2, "conf": 0.8931466937065125, "frame_idx": 441, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [986, 530, 1256, 725], "id": 2, "cls": 2, "conf": 0.9038722515106201, "frame_idx": 442, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [985, 530, 1253, 725], "id": 2, "cls": 2, "conf": 0.9084876775741577, "frame_idx": 443, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [983, 530, 1251, 727], "id": 2, "cls": 2, "conf": 0.9005601406097412, "frame_idx": 444, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [980, 529, 1252, 729], "id": 2, "cls": 2, "conf": 0.8964847922325134, "frame_idx": 445, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [977, 529, 1251, 730], "id": 2, "cls": 2, "conf": 0.8957618474960327, "frame_idx": 446, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [974, 529, 1248, 731], "id": 2, "cls": 2, "conf": 0.8834296464920044, "frame_idx": 447, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [970, 527, 1246, 732], "id": 2, "cls": 2, "conf": 0.8654475212097168, "frame_idx": 448, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [966, 526, 1248, 734], "id": 2, "cls": 2, "conf": 0.8783361315727234, "frame_idx": 449, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [962, 526, 1245, 734], "id": 2, "cls": 2, "conf": 0.8720850348472595, "frame_idx": 450, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [959, 525, 1247, 735], "id": 2, "cls": 2, "conf": 0.8909793496131897, "frame_idx": 451, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [957, 525, 1244, 737], "id": 2, "cls": 2, "conf": 0.8911501169204712, "frame_idx": 452, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [954, 525, 1243, 739], "id": 2, "cls": 2, "conf": 0.8941781520843506, "frame_idx": 453, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [948, 524, 1245, 741], "id": 2, "cls": 2, "conf": 0.8771947622299194, "frame_idx": 454, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [943, 524, 1243, 744], "id": 2, "cls": 2, "conf": 0.8804555535316467, "frame_idx": 455, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [940, 523, 1243, 747], "id": 2, "cls": 2, "conf": 0.8785960078239441, "frame_idx": 456, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [934, 522, 1243, 749], "id": 2, "cls": 2, "conf": 0.9005946516990662, "frame_idx": 457, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [931, 521, 1242, 749], "id": 2, "cls": 2, "conf": 0.8925696611404419, "frame_idx": 458, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [928, 521, 1242, 749], "id": 2, "cls": 2, "conf": 0.8925560116767883, "frame_idx": 459, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [925, 522, 1239, 751], "id": 2, "cls": 2, "conf": 0.8871305584907532, "frame_idx": 460, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [917, 523, 1235, 753], "id": 2, "cls": 2, "conf": 0.8800134658813477, "frame_idx": 461, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [913, 523, 1234, 755], "id": 2, "cls": 2, "conf": 0.8769950270652771, "frame_idx": 462, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [912, 522, 1232, 757], "id": 2, "cls": 2, "conf": 0.8771668672561646, "frame_idx": 463, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [907, 521, 1230, 758], "id": 2, "cls": 2, "conf": 0.8780584931373596, "frame_idx": 464, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [902, 520, 1229, 759], "id": 2, "cls": 2, "conf": 0.9009929299354553, "frame_idx": 465, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [892, 520, 1230, 761], "id": 2, "cls": 2, "conf": 0.880210280418396, "frame_idx": 466, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [889, 519, 1227, 762], "id": 2, "cls": 2, "conf": 0.870464026927948, "frame_idx": 467, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [885, 520, 1225, 767], "id": 2, "cls": 2, "conf": 0.9003344774246216, "frame_idx": 468, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [877, 519, 1226, 767], "id": 2, "cls": 2, "conf": 0.920558512210846, "frame_idx": 469, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [875, 519, 1224, 768], "id": 2, "cls": 2, "conf": 0.9045699238777161, "frame_idx": 470, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [868, 518, 1223, 770], "id": 2, "cls": 2, "conf": 0.9074614644050598, "frame_idx": 471, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [864, 517, 1223, 773], "id": 2, "cls": 2, "conf": 0.9183488488197327, "frame_idx": 472, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [857, 516, 1222, 775], "id": 2, "cls": 2, "conf": 0.9148356914520264, "frame_idx": 473, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [853, 516, 1220, 777], "id": 2, "cls": 2, "conf": 0.9280686378479004, "frame_idx": 474, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [841, 514, 1221, 778], "id": 2, "cls": 2, "conf": 0.9198227524757385, "frame_idx": 475, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [838, 513, 1218, 780], "id": 2, "cls": 2, "conf": 0.8942911028862, "frame_idx": 476, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [830, 513, 1218, 782], "id": 2, "cls": 2, "conf": 0.8980481028556824, "frame_idx": 477, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [826, 513, 1213, 787], "id": 2, "cls": 2, "conf": 0.9096649289131165, "frame_idx": 478, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [819, 512, 1212, 793], "id": 2, "cls": 2, "conf": 0.9269362688064575, "frame_idx": 479, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [811, 509, 1213, 794], "id": 2, "cls": 2, "conf": 0.92948979139328, "frame_idx": 480, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [804, 509, 1211, 796], "id": 2, "cls": 2, "conf": 0.9076160788536072, "frame_idx": 481, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [794, 508, 1210, 798], "id": 2, "cls": 2, "conf": 0.9064416289329529, "frame_idx": 482, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [789, 508, 1208, 800], "id": 2, "cls": 2, "conf": 0.9050999879837036, "frame_idx": 483, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [780, 507, 1204, 803], "id": 2, "cls": 2, "conf": 0.9137296080589294, "frame_idx": 484, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [771, 507, 1204, 807], "id": 2, "cls": 2, "conf": 0.9088245630264282, "frame_idx": 485, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [765, 506, 1204, 810], "id": 2, "cls": 2, "conf": 0.9037410020828247, "frame_idx": 486, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [756, 506, 1203, 812], "id": 2, "cls": 2, "conf": 0.9066951870918274, "frame_idx": 487, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [746, 503, 1201, 818], "id": 2, "cls": 2, "conf": 0.914334774017334, "frame_idx": 488, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [735, 503, 1197, 825], "id": 2, "cls": 2, "conf": 0.9123433232307434, "frame_idx": 489, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [725, 502, 1195, 829], "id": 2, "cls": 2, "conf": 0.9094393849372864, "frame_idx": 490, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [717, 498, 1194, 833], "id": 2, "cls": 2, "conf": 0.9276642203330994, "frame_idx": 491, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [705, 499, 1194, 835], "id": 2, "cls": 2, "conf": 0.9282996654510498, "frame_idx": 492, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [696, 498, 1192, 837], "id": 2, "cls": 2, "conf": 0.9298180937767029, "frame_idx": 493, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [684, 496, 1191, 841], "id": 2, "cls": 2, "conf": 0.9258641600608826, "frame_idx": 494, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [673, 496, 1188, 847], "id": 2, "cls": 2, "conf": 0.923974335193634, "frame_idx": 495, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [661, 498, 1186, 856], "id": 2, "cls": 2, "conf": 0.9190512299537659, "frame_idx": 496, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [646, 495, 1183, 859], "id": 2, "cls": 2, "conf": 0.9168910980224609, "frame_idx": 497, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [632, 495, 1183, 868], "id": 2, "cls": 2, "conf": 0.925777018070221, "frame_idx": 498, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [621, 493, 1182, 873], "id": 2, "cls": 2, "conf": 0.9183085560798645, "frame_idx": 499, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [607, 491, 1180, 878], "id": 2, "cls": 2, "conf": 0.9321070909500122, "frame_idx": 500, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [588, 488, 1177, 882], "id": 2, "cls": 2, "conf": 0.9307034611701965, "frame_idx": 501, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [576, 485, 1174, 888], "id": 2, "cls": 2, "conf": 0.9412079453468323, "frame_idx": 502, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [562, 483, 1173, 893], "id": 2, "cls": 2, "conf": 0.9401066303253174, "frame_idx": 503, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [543, 475, 1171, 897], "id": 2, "cls": 2, "conf": 0.9346688389778137, "frame_idx": 504, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [527, 473, 1169, 903], "id": 2, "cls": 2, "conf": 0.9343288540840149, "frame_idx": 505, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [510, 474, 1164, 914], "id": 2, "cls": 2, "conf": 0.9404311180114746, "frame_idx": 506, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [490, 471, 1161, 920], "id": 2, "cls": 2, "conf": 0.9414466619491577, "frame_idx": 507, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [473, 469, 1159, 927], "id": 2, "cls": 2, "conf": 0.9434319138526917, "frame_idx": 508, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [451, 469, 1158, 938], "id": 2, "cls": 2, "conf": 0.9345313906669617, "frame_idx": 509, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [427, 469, 1156, 946], "id": 2, "cls": 2, "conf": 0.9282017946243286, "frame_idx": 510, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [405, 468, 1152, 952], "id": 2, "cls": 2, "conf": 0.9417479038238525, "frame_idx": 511, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [382, 468, 1150, 966], "id": 2, "cls": 2, "conf": 0.9451406598091125, "frame_idx": 512, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [360, 465, 1148, 976], "id": 2, "cls": 2, "conf": 0.9428954720497131, "frame_idx": 513, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [332, 463, 1148, 984], "id": 2, "cls": 2, "conf": 0.9395127892494202, "frame_idx": 514, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [303, 463, 1144, 992], "id": 2, "cls": 2, "conf": 0.9283111095428467, "frame_idx": 515, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [275, 462, 1136, 1003], "id": 2, "cls": 2, "conf": 0.9324305653572083, "frame_idx": 516, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [245, 461, 1131, 1018], "id": 2, "cls": 2, "conf": 0.9247828125953674, "frame_idx": 517, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [208, 453, 1130, 1032], "id": 2, "cls": 2, "conf": 0.9319226741790771, "frame_idx": 518, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [172, 451, 1129, 1045], "id": 2, "cls": 2, "conf": 0.9351807832717896, "frame_idx": 519, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [134, 449, 1125, 1058], "id": 2, "cls": 2, "conf": 0.9390578269958496, "frame_idx": 520, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [91, 445, 1119, 1068], "id": 2, "cls": 2, "conf": 0.947394609451294, "frame_idx": 521, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [46, 443, 1114, 1070], "id": 2, "cls": 2, "conf": 0.9468377232551575, "frame_idx": 522, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [2, 440, 1110, 1072], "id": 2, "cls": 2, "conf": 0.9386428594589233, "frame_idx": 523, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 438, 1105, 1072], "id": 2, "cls": 2, "conf": 0.9346777200698853, "frame_idx": 524, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 435, 1107, 1072], "id": 2, "cls": 2, "conf": 0.9273584485054016, "frame_idx": 525, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 437, 1096, 1071], "id": 2, "cls": 2, "conf": 0.9241657257080078, "frame_idx": 526, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 432, 1095, 1071], "id": 2, "cls": 2, "conf": 0.9355752468109131, "frame_idx": 527, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 428, 1094, 1070], "id": 2, "cls": 2, "conf": 0.9321312308311462, "frame_idx": 528, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 420, 1082, 1073], "id": 2, "cls": 2, "conf": 0.9156169891357422, "frame_idx": 529, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [2, 409, 1077, 1070], "id": 2, "cls": 2, "conf": 0.8867893815040588, "frame_idx": 530, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [2, 388, 1070, 1071], "id": 2, "cls": 2, "conf": 0.9155814051628113, "frame_idx": 531, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 399, 1066, 1072], "id": 2, "cls": 2, "conf": 0.9372450113296509, "frame_idx": 532, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 389, 1057, 1071], "id": 2, "cls": 2, "conf": 0.9160026907920837, "frame_idx": 533, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 390, 1052, 1070], "id": 2, "cls": 2, "conf": 0.9509764313697815, "frame_idx": 534, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 386, 1042, 1070], "id": 2, "cls": 2, "conf": 0.9340437650680542, "frame_idx": 535, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [2, 381, 1038, 1068], "id": 2, "cls": 2, "conf": 0.9404564499855042, "frame_idx": 536, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [2, 375, 1030, 1066], "id": 2, "cls": 2, "conf": 0.9479154348373413, "frame_idx": 537, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [2, 370, 1024, 1067], "id": 2, "cls": 2, "conf": 0.9565911293029785, "frame_idx": 538, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 365, 1016, 1067], "id": 2, "cls": 2, "conf": 0.9608258008956909, "frame_idx": 539, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [2, 357, 1006, 1064], "id": 2, "cls": 2, "conf": 0.9613184332847595, "frame_idx": 540, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [3, 347, 999, 1064], "id": 2, "cls": 2, "conf": 0.9674457311630249, "frame_idx": 541, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 338, 992, 1064], "id": 2, "cls": 2, "conf": 0.97267746925354, "frame_idx": 542, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 328, 983, 1064], "id": 2, "cls": 2, "conf": 0.9624996781349182, "frame_idx": 543, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 319, 972, 1063], "id": 2, "cls": 2, "conf": 0.9598995447158813, "frame_idx": 544, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 307, 959, 1062], "id": 2, "cls": 2, "conf": 0.9514867663383484, "frame_idx": 545, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 292, 948, 1062], "id": 2, "cls": 2, "conf": 0.9584953784942627, "frame_idx": 546, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 279, 935, 1065], "id": 2, "cls": 2, "conf": 0.9569721221923828, "frame_idx": 547, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 270, 927, 1066], "id": 2, "cls": 2, "conf": 0.972572922706604, "frame_idx": 548, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [2, 258, 915, 1066], "id": 2, "cls": 2, "conf": 0.9626525044441223, "frame_idx": 549, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [2, 241, 898, 1064], "id": 2, "cls": 2, "conf": 0.9489137530326843, "frame_idx": 550, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 221, 885, 1065], "id": 2, "cls": 2, "conf": 0.9458200931549072, "frame_idx": 551, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 204, 868, 1066], "id": 2, "cls": 2, "conf": 0.9462317228317261, "frame_idx": 552, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 193, 856, 1066], "id": 2, "cls": 2, "conf": 0.9367963075637817, "frame_idx": 553, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 180, 836, 1067], "id": 2, "cls": 2, "conf": 0.9550886154174805, "frame_idx": 554, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 173, 820, 1068], "id": 2, "cls": 2, "conf": 0.9146677255630493, "frame_idx": 555, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 170, 797, 1066], "id": 2, "cls": 2, "conf": 0.9364038109779358, "frame_idx": 556, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 171, 779, 1067], "id": 2, "cls": 2, "conf": 0.9397339224815369, "frame_idx": 557, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 171, 751, 1068], "id": 2, "cls": 2, "conf": 0.9423396587371826, "frame_idx": 558, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 175, 729, 1067], "id": 2, "cls": 2, "conf": 0.9324960708618164, "frame_idx": 559, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 181, 700, 1066], "id": 2, "cls": 2, "conf": 0.9049985408782959, "frame_idx": 560, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 188, 672, 1067], "id": 2, "cls": 2, "conf": 0.8566305637359619, "frame_idx": 561, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 195, 637, 1067], "id": 2, "cls": 2, "conf": 0.9080706834793091, "frame_idx": 562, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 199, 603, 1068], "id": 2, "cls": 2, "conf": 0.9104960560798645, "frame_idx": 563, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [1, 220, 559, 1063], "id": 2, "cls": 2, "conf": 0.9200505614280701, "frame_idx": 564, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 235, 516, 1067], "id": 2, "cls": 2, "conf": 0.9269247651100159, "frame_idx": 565, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [0, 250, 470, 1065], "id": 2, "cls": 2, "conf": 0.8854379057884216, "frame_idx": 566, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [3, 256, 409, 1066], "id": 2, "cls": 2, "conf": 0.8114883303642273, "frame_idx": 567, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [4, 239, 349, 1070], "id": 2, "cls": 2, "conf": 0.7934050559997559, "frame_idx": 568, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
{"bbox": [7, 409, 283, 1065], "id": 2, "cls": 2, "conf": 0.7185706496238708, "frame_idx": 569, "source": "video/sample.mp4", "class_name": "car"}
 | 
			
		||||
							
								
								
									
										0
									
								
								models/.gitkeep
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								models/.gitkeep
									
										
									
									
									
										Normal file
									
								
							
							
								
								
									
										
											BIN
										
									
								
								models/bangchakv2.mpta
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								models/bangchakv2.mpta
									
										
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| 
						 | 
				
			
			@ -1,9 +1,66 @@
 | 
			
		|||
fastapi
 | 
			
		||||
uvicorn
 | 
			
		||||
torch
 | 
			
		||||
torchvision
 | 
			
		||||
ultralytics
 | 
			
		||||
opencv-python
 | 
			
		||||
# torch
 | 
			
		||||
# torchvision
 | 
			
		||||
# ultralytics
 | 
			
		||||
# opencv-python
 | 
			
		||||
websockets
 | 
			
		||||
fastapi[standard]
 | 
			
		||||
redis
 | 
			
		||||
redis
 | 
			
		||||
 | 
			
		||||
# Trackers Environment
 | 
			
		||||
# pip install -r requirements.txt
 | 
			
		||||
ultralytics==8.0.20
 | 
			
		||||
 | 
			
		||||
# Base ----------------------------------------
 | 
			
		||||
gitpython
 | 
			
		||||
ipython  # interactive notebook
 | 
			
		||||
matplotlib>=3.2.2
 | 
			
		||||
numpy==1.23.1
 | 
			
		||||
opencv-python>=4.1.1
 | 
			
		||||
Pillow>=7.1.2
 | 
			
		||||
psutil  # system resources
 | 
			
		||||
PyYAML>=5.3.1
 | 
			
		||||
requests>=2.23.0
 | 
			
		||||
scipy>=1.4.1
 | 
			
		||||
thop>=0.1.1  # FLOPs computation
 | 
			
		||||
torch>=1.7.0,<=2.5.1  # see https://pytorch.org/get-started/locally (recommended)
 | 
			
		||||
torchvision>=0.8.1,<=0.20.1
 | 
			
		||||
tqdm>=4.64.0
 | 
			
		||||
# protobuf<=3.20.1  # https://github.com/ultralytics/yolov5/issues/8012
 | 
			
		||||
 | 
			
		||||
# Logging ---------------------------------------------------------------------
 | 
			
		||||
tensorboard>=2.4.1
 | 
			
		||||
# clearml>=1.2.0
 | 
			
		||||
# comet
 | 
			
		||||
 | 
			
		||||
# Plotting --------------------------------------------------------------------
 | 
			
		||||
pandas>=1.1.4
 | 
			
		||||
seaborn>=0.11.0
 | 
			
		||||
 | 
			
		||||
# StrongSORT ------------------------------------------------------------------
 | 
			
		||||
easydict
 | 
			
		||||
 | 
			
		||||
# torchreid -------------------------------------------------------------------
 | 
			
		||||
gdown
 | 
			
		||||
 | 
			
		||||
# ByteTrack -------------------------------------------------------------------
 | 
			
		||||
lap
 | 
			
		||||
 | 
			
		||||
# OCSORT ----------------------------------------------------------------------
 | 
			
		||||
filterpy
 | 
			
		||||
 | 
			
		||||
# Export ----------------------------------------------------------------------
 | 
			
		||||
# onnx>=1.9.0               # ONNX export
 | 
			
		||||
# onnx-simplifier>=0.4.1    # ONNX simplifier
 | 
			
		||||
# nvidia-pyindex            # TensorRT export
 | 
			
		||||
# nvidia-tensorrt           # TensorRT export
 | 
			
		||||
# openvino-dev              # OpenVINO export
 | 
			
		||||
 | 
			
		||||
# Hyperparam search -----------------------------------------------------------
 | 
			
		||||
# optuna
 | 
			
		||||
# plotly                    # for hp importance and pareto front plots
 | 
			
		||||
# kaleido
 | 
			
		||||
# joblib
 | 
			
		||||
pyzmq
 | 
			
		||||
loguru
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue