add StrongSORT Tacker

This commit is contained in:
Pongsatorn Kanjanasantisak 2025-08-10 01:23:09 +07:00
parent ffc2e99678
commit b7d8b3266f
93 changed files with 20230 additions and 6 deletions

View file

View file

@ -0,0 +1,11 @@
strongsort:
ecc: true
ema_alpha: 0.8962157769329083
max_age: 40
max_dist: 0.1594374041012136
max_iou_dist: 0.5431835667667874
max_unmatched_preds: 0
mc_lambda: 0.995
n_init: 3
nn_budget: 100
conf_thres: 0.5122620708221085

View file

@ -0,0 +1,122 @@
from __future__ import absolute_import
import torch
from .pcb import *
from .mlfn import *
from .hacnn import *
from .osnet import *
from .senet import *
from .mudeep import *
from .nasnet import *
from .resnet import *
from .densenet import *
from .xception import *
from .osnet_ain import *
from .resnetmid import *
from .shufflenet import *
from .squeezenet import *
from .inceptionv4 import *
from .mobilenetv2 import *
from .resnet_ibn_a import *
from .resnet_ibn_b import *
from .shufflenetv2 import *
from .inceptionresnetv2 import *
__model_factory = {
# image classification models
'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'resnet101': resnet101,
'resnet152': resnet152,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x8d': resnext101_32x8d,
'resnet50_fc512': resnet50_fc512,
'se_resnet50': se_resnet50,
'se_resnet50_fc512': se_resnet50_fc512,
'se_resnet101': se_resnet101,
'se_resnext50_32x4d': se_resnext50_32x4d,
'se_resnext101_32x4d': se_resnext101_32x4d,
'densenet121': densenet121,
'densenet169': densenet169,
'densenet201': densenet201,
'densenet161': densenet161,
'densenet121_fc512': densenet121_fc512,
'inceptionresnetv2': inceptionresnetv2,
'inceptionv4': inceptionv4,
'xception': xception,
'resnet50_ibn_a': resnet50_ibn_a,
'resnet50_ibn_b': resnet50_ibn_b,
# lightweight models
'nasnsetmobile': nasnetamobile,
'mobilenetv2_x1_0': mobilenetv2_x1_0,
'mobilenetv2_x1_4': mobilenetv2_x1_4,
'shufflenet': shufflenet,
'squeezenet1_0': squeezenet1_0,
'squeezenet1_0_fc512': squeezenet1_0_fc512,
'squeezenet1_1': squeezenet1_1,
'shufflenet_v2_x0_5': shufflenet_v2_x0_5,
'shufflenet_v2_x1_0': shufflenet_v2_x1_0,
'shufflenet_v2_x1_5': shufflenet_v2_x1_5,
'shufflenet_v2_x2_0': shufflenet_v2_x2_0,
# reid-specific models
'mudeep': MuDeep,
'resnet50mid': resnet50mid,
'hacnn': HACNN,
'pcb_p6': pcb_p6,
'pcb_p4': pcb_p4,
'mlfn': mlfn,
'osnet_x1_0': osnet_x1_0,
'osnet_x0_75': osnet_x0_75,
'osnet_x0_5': osnet_x0_5,
'osnet_x0_25': osnet_x0_25,
'osnet_ibn_x1_0': osnet_ibn_x1_0,
'osnet_ain_x1_0': osnet_ain_x1_0,
'osnet_ain_x0_75': osnet_ain_x0_75,
'osnet_ain_x0_5': osnet_ain_x0_5,
'osnet_ain_x0_25': osnet_ain_x0_25
}
def show_avai_models():
"""Displays available models.
Examples::
>>> from torchreid import models
>>> models.show_avai_models()
"""
print(list(__model_factory.keys()))
def build_model(
name, num_classes, loss='softmax', pretrained=True, use_gpu=True
):
"""A function wrapper for building a model.
Args:
name (str): model name.
num_classes (int): number of training identities.
loss (str, optional): loss function to optimize the model. Currently
supports "softmax" and "triplet". Default is "softmax".
pretrained (bool, optional): whether to load ImageNet-pretrained weights.
Default is True.
use_gpu (bool, optional): whether to use gpu. Default is True.
Returns:
nn.Module
Examples::
>>> from torchreid import models
>>> model = models.build_model('resnet50', 751, loss='softmax')
"""
avai_models = list(__model_factory.keys())
if name not in avai_models:
raise KeyError(
'Unknown model: {}. Must be one of {}'.format(name, avai_models)
)
return __model_factory[name](
num_classes=num_classes,
loss=loss,
pretrained=pretrained,
use_gpu=use_gpu
)

View file

@ -0,0 +1,380 @@
"""
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import re
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils import model_zoo
__all__ = [
'densenet121', 'densenet169', 'densenet201', 'densenet161',
'densenet121_fc512'
]
model_urls = {
'densenet121':
'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169':
'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201':
'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161':
'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module(
'conv1',
nn.Conv2d(
num_input_features,
bn_size * growth_rate,
kernel_size=1,
stride=1,
bias=False
)
),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module(
'conv2',
nn.Conv2d(
bn_size * growth_rate,
growth_rate,
kernel_size=3,
stride=1,
padding=1,
bias=False
)
),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(
new_features, p=self.drop_rate, training=self.training
)
return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
def __init__(
self, num_layers, num_input_features, bn_size, growth_rate, drop_rate
):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i*growth_rate, growth_rate, bn_size,
drop_rate
)
self.add_module('denselayer%d' % (i+1), layer)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module(
'conv',
nn.Conv2d(
num_input_features,
num_output_features,
kernel_size=1,
stride=1,
bias=False
)
)
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
"""Densely connected network.
Reference:
Huang et al. Densely Connected Convolutional Networks. CVPR 2017.
Public keys:
- ``densenet121``: DenseNet121.
- ``densenet169``: DenseNet169.
- ``densenet201``: DenseNet201.
- ``densenet161``: DenseNet161.
- ``densenet121_fc512``: DenseNet121 + FC.
"""
def __init__(
self,
num_classes,
loss,
growth_rate=32,
block_config=(6, 12, 24, 16),
num_init_features=64,
bn_size=4,
drop_rate=0,
fc_dims=None,
dropout_p=None,
**kwargs
):
super(DenseNet, self).__init__()
self.loss = loss
# First convolution
self.features = nn.Sequential(
OrderedDict(
[
(
'conv0',
nn.Conv2d(
3,
num_init_features,
kernel_size=7,
stride=2,
padding=3,
bias=False
)
),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
(
'pool0',
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
),
]
)
)
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate
)
self.features.add_module('denseblock%d' % (i+1), block)
num_features = num_features + num_layers*growth_rate
if i != len(block_config) - 1:
trans = _Transition(
num_input_features=num_features,
num_output_features=num_features // 2
)
self.features.add_module('transition%d' % (i+1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.feature_dim = num_features
self.fc = self._construct_fc_layer(fc_dims, num_features, dropout_p)
# Linear layer
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer.
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
f = self.features(x)
f = F.relu(f, inplace=True)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
)
for key in list(pretrain_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
pretrain_dict[new_key] = pretrain_dict[key]
del pretrain_dict[key]
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
"""
Dense network configurations:
--
densenet121: num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16)
densenet169: num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32)
densenet201: num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32)
densenet161: num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24)
"""
def densenet121(num_classes, loss='softmax', pretrained=True, **kwargs):
model = DenseNet(
num_classes=num_classes,
loss=loss,
num_init_features=64,
growth_rate=32,
block_config=(6, 12, 24, 16),
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['densenet121'])
return model
def densenet169(num_classes, loss='softmax', pretrained=True, **kwargs):
model = DenseNet(
num_classes=num_classes,
loss=loss,
num_init_features=64,
growth_rate=32,
block_config=(6, 12, 32, 32),
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['densenet169'])
return model
def densenet201(num_classes, loss='softmax', pretrained=True, **kwargs):
model = DenseNet(
num_classes=num_classes,
loss=loss,
num_init_features=64,
growth_rate=32,
block_config=(6, 12, 48, 32),
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['densenet201'])
return model
def densenet161(num_classes, loss='softmax', pretrained=True, **kwargs):
model = DenseNet(
num_classes=num_classes,
loss=loss,
num_init_features=96,
growth_rate=48,
block_config=(6, 12, 36, 24),
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['densenet161'])
return model
def densenet121_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = DenseNet(
num_classes=num_classes,
loss=loss,
num_init_features=64,
growth_rate=32,
block_config=(6, 12, 24, 16),
fc_dims=[512],
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['densenet121'])
return model

View file

@ -0,0 +1,414 @@
from __future__ import division, absolute_import
import torch
from torch import nn
from torch.nn import functional as F
__all__ = ['HACNN']
class ConvBlock(nn.Module):
"""Basic convolutional block.
convolution + batch normalization + relu.
Args:
in_c (int): number of input channels.
out_c (int): number of output channels.
k (int or tuple): kernel size.
s (int or tuple): stride.
p (int or tuple): padding.
"""
def __init__(self, in_c, out_c, k, s=1, p=0):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p)
self.bn = nn.BatchNorm2d(out_c)
def forward(self, x):
return F.relu(self.bn(self.conv(x)))
class InceptionA(nn.Module):
def __init__(self, in_channels, out_channels):
super(InceptionA, self).__init__()
mid_channels = out_channels // 4
self.stream1 = nn.Sequential(
ConvBlock(in_channels, mid_channels, 1),
ConvBlock(mid_channels, mid_channels, 3, p=1),
)
self.stream2 = nn.Sequential(
ConvBlock(in_channels, mid_channels, 1),
ConvBlock(mid_channels, mid_channels, 3, p=1),
)
self.stream3 = nn.Sequential(
ConvBlock(in_channels, mid_channels, 1),
ConvBlock(mid_channels, mid_channels, 3, p=1),
)
self.stream4 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1),
ConvBlock(in_channels, mid_channels, 1),
)
def forward(self, x):
s1 = self.stream1(x)
s2 = self.stream2(x)
s3 = self.stream3(x)
s4 = self.stream4(x)
y = torch.cat([s1, s2, s3, s4], dim=1)
return y
class InceptionB(nn.Module):
def __init__(self, in_channels, out_channels):
super(InceptionB, self).__init__()
mid_channels = out_channels // 4
self.stream1 = nn.Sequential(
ConvBlock(in_channels, mid_channels, 1),
ConvBlock(mid_channels, mid_channels, 3, s=2, p=1),
)
self.stream2 = nn.Sequential(
ConvBlock(in_channels, mid_channels, 1),
ConvBlock(mid_channels, mid_channels, 3, p=1),
ConvBlock(mid_channels, mid_channels, 3, s=2, p=1),
)
self.stream3 = nn.Sequential(
nn.MaxPool2d(3, stride=2, padding=1),
ConvBlock(in_channels, mid_channels * 2, 1),
)
def forward(self, x):
s1 = self.stream1(x)
s2 = self.stream2(x)
s3 = self.stream3(x)
y = torch.cat([s1, s2, s3], dim=1)
return y
class SpatialAttn(nn.Module):
"""Spatial Attention (Sec. 3.1.I.1)"""
def __init__(self):
super(SpatialAttn, self).__init__()
self.conv1 = ConvBlock(1, 1, 3, s=2, p=1)
self.conv2 = ConvBlock(1, 1, 1)
def forward(self, x):
# global cross-channel averaging
x = x.mean(1, keepdim=True)
# 3-by-3 conv
x = self.conv1(x)
# bilinear resizing
x = F.upsample(
x, (x.size(2) * 2, x.size(3) * 2),
mode='bilinear',
align_corners=True
)
# scaling conv
x = self.conv2(x)
return x
class ChannelAttn(nn.Module):
"""Channel Attention (Sec. 3.1.I.2)"""
def __init__(self, in_channels, reduction_rate=16):
super(ChannelAttn, self).__init__()
assert in_channels % reduction_rate == 0
self.conv1 = ConvBlock(in_channels, in_channels // reduction_rate, 1)
self.conv2 = ConvBlock(in_channels // reduction_rate, in_channels, 1)
def forward(self, x):
# squeeze operation (global average pooling)
x = F.avg_pool2d(x, x.size()[2:])
# excitation operation (2 conv layers)
x = self.conv1(x)
x = self.conv2(x)
return x
class SoftAttn(nn.Module):
"""Soft Attention (Sec. 3.1.I)
Aim: Spatial Attention + Channel Attention
Output: attention maps with shape identical to input.
"""
def __init__(self, in_channels):
super(SoftAttn, self).__init__()
self.spatial_attn = SpatialAttn()
self.channel_attn = ChannelAttn(in_channels)
self.conv = ConvBlock(in_channels, in_channels, 1)
def forward(self, x):
y_spatial = self.spatial_attn(x)
y_channel = self.channel_attn(x)
y = y_spatial * y_channel
y = torch.sigmoid(self.conv(y))
return y
class HardAttn(nn.Module):
"""Hard Attention (Sec. 3.1.II)"""
def __init__(self, in_channels):
super(HardAttn, self).__init__()
self.fc = nn.Linear(in_channels, 4 * 2)
self.init_params()
def init_params(self):
self.fc.weight.data.zero_()
self.fc.bias.data.copy_(
torch.tensor(
[0, -0.75, 0, -0.25, 0, 0.25, 0, 0.75], dtype=torch.float
)
)
def forward(self, x):
# squeeze operation (global average pooling)
x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), x.size(1))
# predict transformation parameters
theta = torch.tanh(self.fc(x))
theta = theta.view(-1, 4, 2)
return theta
class HarmAttn(nn.Module):
"""Harmonious Attention (Sec. 3.1)"""
def __init__(self, in_channels):
super(HarmAttn, self).__init__()
self.soft_attn = SoftAttn(in_channels)
self.hard_attn = HardAttn(in_channels)
def forward(self, x):
y_soft_attn = self.soft_attn(x)
theta = self.hard_attn(x)
return y_soft_attn, theta
class HACNN(nn.Module):
"""Harmonious Attention Convolutional Neural Network.
Reference:
Li et al. Harmonious Attention Network for Person Re-identification. CVPR 2018.
Public keys:
- ``hacnn``: HACNN.
"""
# Args:
# num_classes (int): number of classes to predict
# nchannels (list): number of channels AFTER concatenation
# feat_dim (int): feature dimension for a single stream
# learn_region (bool): whether to learn region features (i.e. local branch)
def __init__(
self,
num_classes,
loss='softmax',
nchannels=[128, 256, 384],
feat_dim=512,
learn_region=True,
use_gpu=True,
**kwargs
):
super(HACNN, self).__init__()
self.loss = loss
self.learn_region = learn_region
self.use_gpu = use_gpu
self.conv = ConvBlock(3, 32, 3, s=2, p=1)
# Construct Inception + HarmAttn blocks
# ============== Block 1 ==============
self.inception1 = nn.Sequential(
InceptionA(32, nchannels[0]),
InceptionB(nchannels[0], nchannels[0]),
)
self.ha1 = HarmAttn(nchannels[0])
# ============== Block 2 ==============
self.inception2 = nn.Sequential(
InceptionA(nchannels[0], nchannels[1]),
InceptionB(nchannels[1], nchannels[1]),
)
self.ha2 = HarmAttn(nchannels[1])
# ============== Block 3 ==============
self.inception3 = nn.Sequential(
InceptionA(nchannels[1], nchannels[2]),
InceptionB(nchannels[2], nchannels[2]),
)
self.ha3 = HarmAttn(nchannels[2])
self.fc_global = nn.Sequential(
nn.Linear(nchannels[2], feat_dim),
nn.BatchNorm1d(feat_dim),
nn.ReLU(),
)
self.classifier_global = nn.Linear(feat_dim, num_classes)
if self.learn_region:
self.init_scale_factors()
self.local_conv1 = InceptionB(32, nchannels[0])
self.local_conv2 = InceptionB(nchannels[0], nchannels[1])
self.local_conv3 = InceptionB(nchannels[1], nchannels[2])
self.fc_local = nn.Sequential(
nn.Linear(nchannels[2] * 4, feat_dim),
nn.BatchNorm1d(feat_dim),
nn.ReLU(),
)
self.classifier_local = nn.Linear(feat_dim, num_classes)
self.feat_dim = feat_dim * 2
else:
self.feat_dim = feat_dim
def init_scale_factors(self):
# initialize scale factors (s_w, s_h) for four regions
self.scale_factors = []
self.scale_factors.append(
torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float)
)
self.scale_factors.append(
torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float)
)
self.scale_factors.append(
torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float)
)
self.scale_factors.append(
torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float)
)
def stn(self, x, theta):
"""Performs spatial transform
x: (batch, channel, height, width)
theta: (batch, 2, 3)
"""
grid = F.affine_grid(theta, x.size())
x = F.grid_sample(x, grid)
return x
def transform_theta(self, theta_i, region_idx):
"""Transforms theta to include (s_w, s_h), resulting in (batch, 2, 3)"""
scale_factors = self.scale_factors[region_idx]
theta = torch.zeros(theta_i.size(0), 2, 3)
theta[:, :, :2] = scale_factors
theta[:, :, -1] = theta_i
if self.use_gpu:
theta = theta.cuda()
return theta
def forward(self, x):
assert x.size(2) == 160 and x.size(3) == 64, \
'Input size does not match, expected (160, 64) but got ({}, {})'.format(x.size(2), x.size(3))
x = self.conv(x)
# ============== Block 1 ==============
# global branch
x1 = self.inception1(x)
x1_attn, x1_theta = self.ha1(x1)
x1_out = x1 * x1_attn
# local branch
if self.learn_region:
x1_local_list = []
for region_idx in range(4):
x1_theta_i = x1_theta[:, region_idx, :]
x1_theta_i = self.transform_theta(x1_theta_i, region_idx)
x1_trans_i = self.stn(x, x1_theta_i)
x1_trans_i = F.upsample(
x1_trans_i, (24, 28), mode='bilinear', align_corners=True
)
x1_local_i = self.local_conv1(x1_trans_i)
x1_local_list.append(x1_local_i)
# ============== Block 2 ==============
# Block 2
# global branch
x2 = self.inception2(x1_out)
x2_attn, x2_theta = self.ha2(x2)
x2_out = x2 * x2_attn
# local branch
if self.learn_region:
x2_local_list = []
for region_idx in range(4):
x2_theta_i = x2_theta[:, region_idx, :]
x2_theta_i = self.transform_theta(x2_theta_i, region_idx)
x2_trans_i = self.stn(x1_out, x2_theta_i)
x2_trans_i = F.upsample(
x2_trans_i, (12, 14), mode='bilinear', align_corners=True
)
x2_local_i = x2_trans_i + x1_local_list[region_idx]
x2_local_i = self.local_conv2(x2_local_i)
x2_local_list.append(x2_local_i)
# ============== Block 3 ==============
# Block 3
# global branch
x3 = self.inception3(x2_out)
x3_attn, x3_theta = self.ha3(x3)
x3_out = x3 * x3_attn
# local branch
if self.learn_region:
x3_local_list = []
for region_idx in range(4):
x3_theta_i = x3_theta[:, region_idx, :]
x3_theta_i = self.transform_theta(x3_theta_i, region_idx)
x3_trans_i = self.stn(x2_out, x3_theta_i)
x3_trans_i = F.upsample(
x3_trans_i, (6, 7), mode='bilinear', align_corners=True
)
x3_local_i = x3_trans_i + x2_local_list[region_idx]
x3_local_i = self.local_conv3(x3_local_i)
x3_local_list.append(x3_local_i)
# ============== Feature generation ==============
# global branch
x_global = F.avg_pool2d(x3_out,
x3_out.size()[2:]
).view(x3_out.size(0), x3_out.size(1))
x_global = self.fc_global(x_global)
# local branch
if self.learn_region:
x_local_list = []
for region_idx in range(4):
x_local_i = x3_local_list[region_idx]
x_local_i = F.avg_pool2d(x_local_i,
x_local_i.size()[2:]
).view(x_local_i.size(0), -1)
x_local_list.append(x_local_i)
x_local = torch.cat(x_local_list, 1)
x_local = self.fc_local(x_local)
if not self.training:
# l2 normalization before concatenation
if self.learn_region:
x_global = x_global / x_global.norm(p=2, dim=1, keepdim=True)
x_local = x_local / x_local.norm(p=2, dim=1, keepdim=True)
return torch.cat([x_global, x_local], 1)
else:
return x_global
prelogits_global = self.classifier_global(x_global)
if self.learn_region:
prelogits_local = self.classifier_local(x_local)
if self.loss == 'softmax':
if self.learn_region:
return (prelogits_global, prelogits_local)
else:
return prelogits_global
elif self.loss == 'triplet':
if self.learn_region:
return (prelogits_global, prelogits_local), (x_global, x_local)
else:
return prelogits_global, x_global
else:
raise KeyError("Unsupported loss: {}".format(self.loss))

View file

@ -0,0 +1,361 @@
"""
Code imported from https://github.com/Cadene/pretrained-models.pytorch
"""
from __future__ import division, absolute_import
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['inceptionresnetv2']
pretrained_settings = {
'inceptionresnetv2': {
'imagenet': {
'url':
'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1000
},
'imagenet+background': {
'url':
'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1001
}
}
}
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False
) # verify bias false
self.bn = nn.BatchNorm2d(
out_planes,
eps=0.001, # value found in tensorflow
momentum=0.1, # default pytorch value
affine=True
)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Mixed_5b(nn.Module):
def __init__(self):
super(Mixed_5b, self).__init__()
self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(192, 48, kernel_size=1, stride=1),
BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2)
)
self.branch2 = nn.Sequential(
BasicConv2d(192, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(192, 64, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Block35(nn.Module):
def __init__(self, scale=1.0):
super(Block35, self).__init__()
self.scale = scale
self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(320, 32, kernel_size=1, stride=1),
BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
BasicConv2d(320, 32, kernel_size=1, stride=1),
BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1),
BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1)
)
self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.relu(out)
return out
class Mixed_6a(nn.Module):
def __init__(self):
super(Mixed_6a, self).__init__()
self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
BasicConv2d(320, 256, kernel_size=1, stride=1),
BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),
BasicConv2d(256, 384, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Block17(nn.Module):
def __init__(self, scale=1.0):
super(Block17, self).__init__()
self.scale = scale
self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(1088, 128, kernel_size=1, stride=1),
BasicConv2d(
128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)
),
BasicConv2d(
160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)
)
)
self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.relu(out)
return out
class Mixed_7a(nn.Module):
def __init__(self):
super(Mixed_7a, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 384, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 288, kernel_size=3, stride=2)
)
self.branch2 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1),
BasicConv2d(288, 320, kernel_size=3, stride=2)
)
self.branch3 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Block8(nn.Module):
def __init__(self, scale=1.0, noReLU=False):
super(Block8, self).__init__()
self.scale = scale
self.noReLU = noReLU
self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(2080, 192, kernel_size=1, stride=1),
BasicConv2d(
192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)
),
BasicConv2d(
224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)
)
)
self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1)
if not self.noReLU:
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
if not self.noReLU:
out = self.relu(out)
return out
# ----------------
# Model Definition
# ----------------
class InceptionResNetV2(nn.Module):
"""Inception-ResNet-V2.
Reference:
Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual
Connections on Learning. AAAI 2017.
Public keys:
- ``inceptionresnetv2``: Inception-ResNet-V2.
"""
def __init__(self, num_classes, loss='softmax', **kwargs):
super(InceptionResNetV2, self).__init__()
self.loss = loss
# Modules
self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)
self.conv2d_2b = BasicConv2d(
32, 64, kernel_size=3, stride=1, padding=1
)
self.maxpool_3a = nn.MaxPool2d(3, stride=2)
self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)
self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)
self.maxpool_5a = nn.MaxPool2d(3, stride=2)
self.mixed_5b = Mixed_5b()
self.repeat = nn.Sequential(
Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17),
Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17),
Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17),
Block35(scale=0.17)
)
self.mixed_6a = Mixed_6a()
self.repeat_1 = nn.Sequential(
Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10),
Block17(scale=0.10), Block17(scale=0.10)
)
self.mixed_7a = Mixed_7a()
self.repeat_2 = nn.Sequential(
Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20),
Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20),
Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20)
)
self.block8 = Block8(noReLU=True)
self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(1536, num_classes)
def load_imagenet_weights(self):
settings = pretrained_settings['inceptionresnetv2']['imagenet']
pretrain_dict = model_zoo.load_url(settings['url'])
model_dict = self.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
self.load_state_dict(model_dict)
def featuremaps(self, x):
x = self.conv2d_1a(x)
x = self.conv2d_2a(x)
x = self.conv2d_2b(x)
x = self.maxpool_3a(x)
x = self.conv2d_3b(x)
x = self.conv2d_4a(x)
x = self.maxpool_5a(x)
x = self.mixed_5b(x)
x = self.repeat(x)
x = self.mixed_6a(x)
x = self.repeat_1(x)
x = self.mixed_7a(x)
x = self.repeat_2(x)
x = self.block8(x)
x = self.conv2d_7b(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def inceptionresnetv2(num_classes, loss='softmax', pretrained=True, **kwargs):
model = InceptionResNetV2(num_classes=num_classes, loss=loss, **kwargs)
if pretrained:
model.load_imagenet_weights()
return model

View file

@ -0,0 +1,381 @@
from __future__ import division, absolute_import
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['inceptionv4']
"""
Code imported from https://github.com/Cadene/pretrained-models.pytorch
"""
pretrained_settings = {
'inceptionv4': {
'imagenet': {
'url':
'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1000
},
'imagenet+background': {
'url':
'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1001
}
}
}
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False
) # verify bias false
self.bn = nn.BatchNorm2d(
out_planes,
eps=0.001, # value found in tensorflow
momentum=0.1, # default pytorch value
affine=True
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Mixed_3a(nn.Module):
def __init__(self):
super(Mixed_3a, self).__init__()
self.maxpool = nn.MaxPool2d(3, stride=2)
self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2)
def forward(self, x):
x0 = self.maxpool(x)
x1 = self.conv(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed_4a(nn.Module):
def __init__(self):
super(Mixed_4a, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1)
)
self.branch1 = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1, stride=1),
BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)),
BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)),
BasicConv2d(64, 96, kernel_size=(3, 3), stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed_5a(nn.Module):
def __init__(self):
super(Mixed_5a, self).__init__()
self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2)
self.maxpool = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.conv(x)
x1 = self.maxpool(x)
out = torch.cat((x0, x1), 1)
return out
class Inception_A(nn.Module):
def __init__(self):
super(Inception_A, self).__init__()
self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(384, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
BasicConv2d(384, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(384, 96, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Reduction_A(nn.Module):
def __init__(self):
super(Reduction_A, self).__init__()
self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
BasicConv2d(384, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1),
BasicConv2d(224, 256, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Inception_B(nn.Module):
def __init__(self):
super(Inception_B, self).__init__()
self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(
192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)
),
BasicConv2d(
224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0)
)
)
self.branch2 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(
192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)
),
BasicConv2d(
192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)
),
BasicConv2d(
224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)
),
BasicConv2d(
224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)
)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(1024, 128, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Reduction_B(nn.Module):
def __init__(self):
super(Reduction_B, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 192, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
BasicConv2d(1024, 256, kernel_size=1, stride=1),
BasicConv2d(
256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)
),
BasicConv2d(
256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)
), BasicConv2d(320, 320, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Inception_C(nn.Module):
def __init__(self):
super(Inception_C, self).__init__()
self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1)
self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch1_1a = BasicConv2d(
384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)
)
self.branch1_1b = BasicConv2d(
384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)
)
self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch2_1 = BasicConv2d(
384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0)
)
self.branch2_2 = BasicConv2d(
448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1)
)
self.branch2_3a = BasicConv2d(
512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)
)
self.branch2_3b = BasicConv2d(
512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(1536, 256, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1_0 = self.branch1_0(x)
x1_1a = self.branch1_1a(x1_0)
x1_1b = self.branch1_1b(x1_0)
x1 = torch.cat((x1_1a, x1_1b), 1)
x2_0 = self.branch2_0(x)
x2_1 = self.branch2_1(x2_0)
x2_2 = self.branch2_2(x2_1)
x2_3a = self.branch2_3a(x2_2)
x2_3b = self.branch2_3b(x2_2)
x2 = torch.cat((x2_3a, x2_3b), 1)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionV4(nn.Module):
"""Inception-v4.
Reference:
Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual
Connections on Learning. AAAI 2017.
Public keys:
- ``inceptionv4``: InceptionV4.
"""
def __init__(self, num_classes, loss, **kwargs):
super(InceptionV4, self).__init__()
self.loss = loss
self.features = nn.Sequential(
BasicConv2d(3, 32, kernel_size=3, stride=2),
BasicConv2d(32, 32, kernel_size=3, stride=1),
BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1),
Mixed_3a(),
Mixed_4a(),
Mixed_5a(),
Inception_A(),
Inception_A(),
Inception_A(),
Inception_A(),
Reduction_A(), # Mixed_6a
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Reduction_B(), # Mixed_7a
Inception_C(),
Inception_C(),
Inception_C()
)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(1536, num_classes)
def forward(self, x):
f = self.features(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def inceptionv4(num_classes, loss='softmax', pretrained=True, **kwargs):
model = InceptionV4(num_classes, loss, **kwargs)
if pretrained:
model_url = pretrained_settings['inceptionv4']['imagenet']['url']
init_pretrained_weights(model, model_url)
return model

View file

@ -0,0 +1,269 @@
from __future__ import division, absolute_import
import torch
import torch.utils.model_zoo as model_zoo
from torch import nn
from torch.nn import functional as F
__all__ = ['mlfn']
model_urls = {
# training epoch = 5, top1 = 51.6
'imagenet':
'https://mega.nz/#!YHxAhaxC!yu9E6zWl0x5zscSouTdbZu8gdFFytDdl-RAdD2DEfpk',
}
class MLFNBlock(nn.Module):
def __init__(
self, in_channels, out_channels, stride, fsm_channels, groups=32
):
super(MLFNBlock, self).__init__()
self.groups = groups
mid_channels = out_channels // 2
# Factor Modules
self.fm_conv1 = nn.Conv2d(in_channels, mid_channels, 1, bias=False)
self.fm_bn1 = nn.BatchNorm2d(mid_channels)
self.fm_conv2 = nn.Conv2d(
mid_channels,
mid_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=self.groups
)
self.fm_bn2 = nn.BatchNorm2d(mid_channels)
self.fm_conv3 = nn.Conv2d(mid_channels, out_channels, 1, bias=False)
self.fm_bn3 = nn.BatchNorm2d(out_channels)
# Factor Selection Module
self.fsm = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, fsm_channels[0], 1),
nn.BatchNorm2d(fsm_channels[0]),
nn.ReLU(inplace=True),
nn.Conv2d(fsm_channels[0], fsm_channels[1], 1),
nn.BatchNorm2d(fsm_channels[1]),
nn.ReLU(inplace=True),
nn.Conv2d(fsm_channels[1], self.groups, 1),
nn.BatchNorm2d(self.groups),
nn.Sigmoid(),
)
self.downsample = None
if in_channels != out_channels or stride > 1:
self.downsample = nn.Sequential(
nn.Conv2d(
in_channels, out_channels, 1, stride=stride, bias=False
),
nn.BatchNorm2d(out_channels),
)
def forward(self, x):
residual = x
s = self.fsm(x)
# reduce dimension
x = self.fm_conv1(x)
x = self.fm_bn1(x)
x = F.relu(x, inplace=True)
# group convolution
x = self.fm_conv2(x)
x = self.fm_bn2(x)
x = F.relu(x, inplace=True)
# factor selection
b, c = x.size(0), x.size(1)
n = c // self.groups
ss = s.repeat(1, n, 1, 1) # from (b, g, 1, 1) to (b, g*n=c, 1, 1)
ss = ss.view(b, n, self.groups, 1, 1)
ss = ss.permute(0, 2, 1, 3, 4).contiguous()
ss = ss.view(b, c, 1, 1)
x = ss * x
# recover dimension
x = self.fm_conv3(x)
x = self.fm_bn3(x)
x = F.relu(x, inplace=True)
if self.downsample is not None:
residual = self.downsample(residual)
return F.relu(residual + x, inplace=True), s
class MLFN(nn.Module):
"""Multi-Level Factorisation Net.
Reference:
Chang et al. Multi-Level Factorisation Net for
Person Re-Identification. CVPR 2018.
Public keys:
- ``mlfn``: MLFN (Multi-Level Factorisation Net).
"""
def __init__(
self,
num_classes,
loss='softmax',
groups=32,
channels=[64, 256, 512, 1024, 2048],
embed_dim=1024,
**kwargs
):
super(MLFN, self).__init__()
self.loss = loss
self.groups = groups
# first convolutional layer
self.conv1 = nn.Conv2d(3, channels[0], 7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(channels[0])
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
# main body
self.feature = nn.ModuleList(
[
# layer 1-3
MLFNBlock(channels[0], channels[1], 1, [128, 64], self.groups),
MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups),
MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups),
# layer 4-7
MLFNBlock(
channels[1], channels[2], 2, [256, 128], self.groups
),
MLFNBlock(
channels[2], channels[2], 1, [256, 128], self.groups
),
MLFNBlock(
channels[2], channels[2], 1, [256, 128], self.groups
),
MLFNBlock(
channels[2], channels[2], 1, [256, 128], self.groups
),
# layer 8-13
MLFNBlock(
channels[2], channels[3], 2, [512, 128], self.groups
),
MLFNBlock(
channels[3], channels[3], 1, [512, 128], self.groups
),
MLFNBlock(
channels[3], channels[3], 1, [512, 128], self.groups
),
MLFNBlock(
channels[3], channels[3], 1, [512, 128], self.groups
),
MLFNBlock(
channels[3], channels[3], 1, [512, 128], self.groups
),
MLFNBlock(
channels[3], channels[3], 1, [512, 128], self.groups
),
# layer 14-16
MLFNBlock(
channels[3], channels[4], 2, [512, 128], self.groups
),
MLFNBlock(
channels[4], channels[4], 1, [512, 128], self.groups
),
MLFNBlock(
channels[4], channels[4], 1, [512, 128], self.groups
),
]
)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# projection functions
self.fc_x = nn.Sequential(
nn.Conv2d(channels[4], embed_dim, 1, bias=False),
nn.BatchNorm2d(embed_dim),
nn.ReLU(inplace=True),
)
self.fc_s = nn.Sequential(
nn.Conv2d(self.groups * 16, embed_dim, 1, bias=False),
nn.BatchNorm2d(embed_dim),
nn.ReLU(inplace=True),
)
self.classifier = nn.Linear(embed_dim, num_classes)
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x, inplace=True)
x = self.maxpool(x)
s_hat = []
for block in self.feature:
x, s = block(x)
s_hat.append(s)
s_hat = torch.cat(s_hat, 1)
x = self.global_avgpool(x)
x = self.fc_x(x)
s_hat = self.fc_s(s_hat)
v = (x+s_hat) * 0.5
v = v.view(v.size(0), -1)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def mlfn(num_classes, loss='softmax', pretrained=True, **kwargs):
model = MLFN(num_classes, loss, **kwargs)
if pretrained:
# init_pretrained_weights(model, model_urls['imagenet'])
import warnings
warnings.warn(
'The imagenet pretrained weights need to be manually downloaded from {}'
.format(model_urls['imagenet'])
)
return model

View file

@ -0,0 +1,274 @@
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from torch.nn import functional as F
__all__ = ['mobilenetv2_x1_0', 'mobilenetv2_x1_4']
model_urls = {
# 1.0: top-1 71.3
'mobilenetv2_x1_0':
'https://mega.nz/#!NKp2wAIA!1NH1pbNzY_M2hVk_hdsxNM1NUOWvvGPHhaNr-fASF6c',
# 1.4: top-1 73.9
'mobilenetv2_x1_4':
'https://mega.nz/#!RGhgEIwS!xN2s2ZdyqI6vQ3EwgmRXLEW3khr9tpXg96G9SUJugGk',
}
class ConvBlock(nn.Module):
"""Basic convolutional block.
convolution (bias discarded) + batch normalization + relu6.
Args:
in_c (int): number of input channels.
out_c (int): number of output channels.
k (int or tuple): kernel size.
s (int or tuple): stride.
p (int or tuple): padding.
g (int): number of blocked connections from input channels
to output channels (default: 1).
"""
def __init__(self, in_c, out_c, k, s=1, p=0, g=1):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(
in_c, out_c, k, stride=s, padding=p, bias=False, groups=g
)
self.bn = nn.BatchNorm2d(out_c)
def forward(self, x):
return F.relu6(self.bn(self.conv(x)))
class Bottleneck(nn.Module):
def __init__(self, in_channels, out_channels, expansion_factor, stride=1):
super(Bottleneck, self).__init__()
mid_channels = in_channels * expansion_factor
self.use_residual = stride == 1 and in_channels == out_channels
self.conv1 = ConvBlock(in_channels, mid_channels, 1)
self.dwconv2 = ConvBlock(
mid_channels, mid_channels, 3, stride, 1, g=mid_channels
)
self.conv3 = nn.Sequential(
nn.Conv2d(mid_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
)
def forward(self, x):
m = self.conv1(x)
m = self.dwconv2(m)
m = self.conv3(m)
if self.use_residual:
return x + m
else:
return m
class MobileNetV2(nn.Module):
"""MobileNetV2.
Reference:
Sandler et al. MobileNetV2: Inverted Residuals and
Linear Bottlenecks. CVPR 2018.
Public keys:
- ``mobilenetv2_x1_0``: MobileNetV2 x1.0.
- ``mobilenetv2_x1_4``: MobileNetV2 x1.4.
"""
def __init__(
self,
num_classes,
width_mult=1,
loss='softmax',
fc_dims=None,
dropout_p=None,
**kwargs
):
super(MobileNetV2, self).__init__()
self.loss = loss
self.in_channels = int(32 * width_mult)
self.feature_dim = int(1280 * width_mult) if width_mult > 1 else 1280
# construct layers
self.conv1 = ConvBlock(3, self.in_channels, 3, s=2, p=1)
self.conv2 = self._make_layer(
Bottleneck, 1, int(16 * width_mult), 1, 1
)
self.conv3 = self._make_layer(
Bottleneck, 6, int(24 * width_mult), 2, 2
)
self.conv4 = self._make_layer(
Bottleneck, 6, int(32 * width_mult), 3, 2
)
self.conv5 = self._make_layer(
Bottleneck, 6, int(64 * width_mult), 4, 2
)
self.conv6 = self._make_layer(
Bottleneck, 6, int(96 * width_mult), 3, 1
)
self.conv7 = self._make_layer(
Bottleneck, 6, int(160 * width_mult), 3, 2
)
self.conv8 = self._make_layer(
Bottleneck, 6, int(320 * width_mult), 1, 1
)
self.conv9 = ConvBlock(self.in_channels, self.feature_dim, 1)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = self._construct_fc_layer(
fc_dims, self.feature_dim, dropout_p
)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
def _make_layer(self, block, t, c, n, s):
# t: expansion factor
# c: output channels
# n: number of blocks
# s: stride for first layer
layers = []
layers.append(block(self.in_channels, c, t, s))
self.in_channels = c
for i in range(1, n):
layers.append(block(self.in_channels, c, t))
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer.
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
x = self.conv7(x)
x = self.conv8(x)
x = self.conv9(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def mobilenetv2_x1_0(num_classes, loss, pretrained=True, **kwargs):
model = MobileNetV2(
num_classes,
loss=loss,
width_mult=1,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
# init_pretrained_weights(model, model_urls['mobilenetv2_x1_0'])
import warnings
warnings.warn(
'The imagenet pretrained weights need to be manually downloaded from {}'
.format(model_urls['mobilenetv2_x1_0'])
)
return model
def mobilenetv2_x1_4(num_classes, loss, pretrained=True, **kwargs):
model = MobileNetV2(
num_classes,
loss=loss,
width_mult=1.4,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
# init_pretrained_weights(model, model_urls['mobilenetv2_x1_4'])
import warnings
warnings.warn(
'The imagenet pretrained weights need to be manually downloaded from {}'
.format(model_urls['mobilenetv2_x1_4'])
)
return model

View file

@ -0,0 +1,206 @@
from __future__ import division, absolute_import
import torch
from torch import nn
from torch.nn import functional as F
__all__ = ['MuDeep']
class ConvBlock(nn.Module):
"""Basic convolutional block.
convolution + batch normalization + relu.
Args:
in_c (int): number of input channels.
out_c (int): number of output channels.
k (int or tuple): kernel size.
s (int or tuple): stride.
p (int or tuple): padding.
"""
def __init__(self, in_c, out_c, k, s, p):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p)
self.bn = nn.BatchNorm2d(out_c)
def forward(self, x):
return F.relu(self.bn(self.conv(x)))
class ConvLayers(nn.Module):
"""Preprocessing layers."""
def __init__(self):
super(ConvLayers, self).__init__()
self.conv1 = ConvBlock(3, 48, k=3, s=1, p=1)
self.conv2 = ConvBlock(48, 96, k=3, s=1, p=1)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.maxpool(x)
return x
class MultiScaleA(nn.Module):
"""Multi-scale stream layer A (Sec.3.1)"""
def __init__(self):
super(MultiScaleA, self).__init__()
self.stream1 = nn.Sequential(
ConvBlock(96, 96, k=1, s=1, p=0),
ConvBlock(96, 24, k=3, s=1, p=1),
)
self.stream2 = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
ConvBlock(96, 24, k=1, s=1, p=0),
)
self.stream3 = ConvBlock(96, 24, k=1, s=1, p=0)
self.stream4 = nn.Sequential(
ConvBlock(96, 16, k=1, s=1, p=0),
ConvBlock(16, 24, k=3, s=1, p=1),
ConvBlock(24, 24, k=3, s=1, p=1),
)
def forward(self, x):
s1 = self.stream1(x)
s2 = self.stream2(x)
s3 = self.stream3(x)
s4 = self.stream4(x)
y = torch.cat([s1, s2, s3, s4], dim=1)
return y
class Reduction(nn.Module):
"""Reduction layer (Sec.3.1)"""
def __init__(self):
super(Reduction, self).__init__()
self.stream1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.stream2 = ConvBlock(96, 96, k=3, s=2, p=1)
self.stream3 = nn.Sequential(
ConvBlock(96, 48, k=1, s=1, p=0),
ConvBlock(48, 56, k=3, s=1, p=1),
ConvBlock(56, 64, k=3, s=2, p=1),
)
def forward(self, x):
s1 = self.stream1(x)
s2 = self.stream2(x)
s3 = self.stream3(x)
y = torch.cat([s1, s2, s3], dim=1)
return y
class MultiScaleB(nn.Module):
"""Multi-scale stream layer B (Sec.3.1)"""
def __init__(self):
super(MultiScaleB, self).__init__()
self.stream1 = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
ConvBlock(256, 256, k=1, s=1, p=0),
)
self.stream2 = nn.Sequential(
ConvBlock(256, 64, k=1, s=1, p=0),
ConvBlock(64, 128, k=(1, 3), s=1, p=(0, 1)),
ConvBlock(128, 256, k=(3, 1), s=1, p=(1, 0)),
)
self.stream3 = ConvBlock(256, 256, k=1, s=1, p=0)
self.stream4 = nn.Sequential(
ConvBlock(256, 64, k=1, s=1, p=0),
ConvBlock(64, 64, k=(1, 3), s=1, p=(0, 1)),
ConvBlock(64, 128, k=(3, 1), s=1, p=(1, 0)),
ConvBlock(128, 128, k=(1, 3), s=1, p=(0, 1)),
ConvBlock(128, 256, k=(3, 1), s=1, p=(1, 0)),
)
def forward(self, x):
s1 = self.stream1(x)
s2 = self.stream2(x)
s3 = self.stream3(x)
s4 = self.stream4(x)
return s1, s2, s3, s4
class Fusion(nn.Module):
"""Saliency-based learning fusion layer (Sec.3.2)"""
def __init__(self):
super(Fusion, self).__init__()
self.a1 = nn.Parameter(torch.rand(1, 256, 1, 1))
self.a2 = nn.Parameter(torch.rand(1, 256, 1, 1))
self.a3 = nn.Parameter(torch.rand(1, 256, 1, 1))
self.a4 = nn.Parameter(torch.rand(1, 256, 1, 1))
# We add an average pooling layer to reduce the spatial dimension
# of feature maps, which differs from the original paper.
self.avgpool = nn.AvgPool2d(kernel_size=4, stride=4, padding=0)
def forward(self, x1, x2, x3, x4):
s1 = self.a1.expand_as(x1) * x1
s2 = self.a2.expand_as(x2) * x2
s3 = self.a3.expand_as(x3) * x3
s4 = self.a4.expand_as(x4) * x4
y = self.avgpool(s1 + s2 + s3 + s4)
return y
class MuDeep(nn.Module):
"""Multiscale deep neural network.
Reference:
Qian et al. Multi-scale Deep Learning Architectures
for Person Re-identification. ICCV 2017.
Public keys:
- ``mudeep``: Multiscale deep neural network.
"""
def __init__(self, num_classes, loss='softmax', **kwargs):
super(MuDeep, self).__init__()
self.loss = loss
self.block1 = ConvLayers()
self.block2 = MultiScaleA()
self.block3 = Reduction()
self.block4 = MultiScaleB()
self.block5 = Fusion()
# Due to this fully connected layer, input image has to be fixed
# in shape, i.e. (3, 256, 128), such that the last convolutional feature
# maps are of shape (256, 16, 8). If input shape is changed,
# the input dimension of this layer has to be changed accordingly.
self.fc = nn.Sequential(
nn.Linear(256 * 16 * 8, 4096),
nn.BatchNorm1d(4096),
nn.ReLU(),
)
self.classifier = nn.Linear(4096, num_classes)
self.feat_dim = 4096
def featuremaps(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(*x)
return x
def forward(self, x):
x = self.featuremaps(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
y = self.classifier(x)
if not self.training:
return x
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, x
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,598 @@
from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0':
'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY',
'osnet_x0_75':
'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq',
'osnet_x0_5':
'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i',
'osnet_x0_25':
'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs',
'osnet_ibn_x1_0':
'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l'
}
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
x = self.relu(x)
return x
##########
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU(inplace=True)
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError(
"Unknown gate activation: {}".format(gate_activation)
)
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(
self,
in_channels,
out_channels,
IN=False,
bottleneck_reduction=4,
**kwargs
):
super(OSBlock, self).__init__()
mid_channels = out_channels // bottleneck_reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2a = LightConv3x3(mid_channels, mid_channels)
self.conv2b = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2c = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2d = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = None
if IN:
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2a = self.conv2a(x1)
x2b = self.conv2b(x1)
x2c = self.conv2c(x1)
x2d = self.conv2d(x1)
x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d)
x3 = self.conv3(x2)
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
if self.IN is not None:
out = self.IN(out)
return F.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. TPAMI, 2021.
"""
def __init__(
self,
num_classes,
blocks,
layers,
channels,
feature_dim=512,
loss='softmax',
IN=False,
**kwargs
):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert num_blocks == len(layers)
assert num_blocks == len(channels) - 1
self.loss = loss
self.feature_dim = feature_dim
# convolutional backbone
self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(
blocks[0],
layers[0],
channels[0],
channels[1],
reduce_spatial_size=True,
IN=IN
)
self.conv3 = self._make_layer(
blocks[1],
layers[1],
channels[1],
channels[2],
reduce_spatial_size=True
)
self.conv4 = self._make_layer(
blocks[2],
layers[2],
channels[2],
channels[3],
reduce_spatial_size=False
)
self.conv5 = Conv1x1(channels[3], channels[3])
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# fully connected layer
self.fc = self._construct_fc_layer(
self.feature_dim, channels[3], dropout_p=None
)
# identity classification layer
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
def _make_layer(
self,
block,
layer,
in_channels,
out_channels,
reduce_spatial_size,
IN=False
):
layers = []
layers.append(block(in_channels, out_channels, IN=IN))
for i in range(1, layer):
layers.append(block(out_channels, out_channels, IN=IN))
if reduce_spatial_size:
layers.append(
nn.Sequential(
Conv1x1(out_channels, out_channels),
nn.AvgPool2d(2, stride=2)
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if fc_dims is None or fc_dims < 0:
self.feature_dim = input_dim
return None
if isinstance(fc_dims, int):
fc_dims = [fc_dims]
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
return x
def forward(self, x, return_featuremaps=False):
x = self.featuremaps(x)
if return_featuremaps:
return x
v = self.global_avgpool(x)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, key=''):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from collections import OrderedDict
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights from "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(cached_file)
)
else:
print(
'Successfully loaded imagenet pretrained weights from "{}"'.
format(cached_file)
)
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
##########
# Instantiation
##########
def osnet_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
def osnet_x0_75(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# medium size (width x0.75)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[48, 192, 288, 384],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_75')
return model
def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# tiny size (width x0.5)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[32, 128, 192, 256],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_5')
return model
def osnet_x0_25(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# very tiny size (width x0.25)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[16, 64, 96, 128],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_25')
return model
def osnet_ibn_x1_0(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
# standard size (width x1.0) + IBN layer
# Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018.
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ibn_x1_0')
return model

View file

@ -0,0 +1,609 @@
from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
__all__ = [
'osnet_ain_x1_0', 'osnet_ain_x0_75', 'osnet_ain_x0_5', 'osnet_ain_x0_25'
]
pretrained_urls = {
'osnet_ain_x1_0':
'https://drive.google.com/uc?id=1-CaioD9NaqbHK_kzSMW8VE4_3KcsRjEo',
'osnet_ain_x0_75':
'https://drive.google.com/uc?id=1apy0hpsMypqstfencdH-jKIUEFOW4xoM',
'osnet_ain_x0_5':
'https://drive.google.com/uc?id=1KusKvEYyKGDTUBVRxRiz55G31wkihB6l',
'osnet_ain_x0_25':
'https://drive.google.com/uc?id=1SxQt2AvmEcgWNhaRb2xC4rP6ZwVDP0Wt'
}
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu(x)
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu(x)
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1, bn=True):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = None
if bn:
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu(x)
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
return self.relu(x)
class LightConvStream(nn.Module):
"""Lightweight convolution stream."""
def __init__(self, in_channels, out_channels, depth):
super(LightConvStream, self).__init__()
assert depth >= 1, 'depth must be equal to or larger than 1, but got {}'.format(
depth
)
layers = []
layers += [LightConv3x3(in_channels, out_channels)]
for i in range(depth - 1):
layers += [LightConv3x3(out_channels, out_channels)]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
##########
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU()
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU()
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError(
"Unknown gate activation: {}".format(gate_activation)
)
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
super(OSBlock, self).__init__()
assert T >= 1
assert out_channels >= reduction and out_channels % reduction == 0
mid_channels = out_channels // reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2 = nn.ModuleList()
for t in range(1, T + 1):
self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2 = 0
for conv2_t in self.conv2:
x2_t = conv2_t(x1)
x2 = x2 + self.gate(x2_t)
x3 = self.conv3(x2)
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
return F.relu(out)
class OSBlockINin(nn.Module):
"""Omni-scale feature learning block with instance normalization."""
def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs):
super(OSBlockINin, self).__init__()
assert T >= 1
assert out_channels >= reduction and out_channels % reduction == 0
mid_channels = out_channels // reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2 = nn.ModuleList()
for t in range(1, T + 1):
self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn=False)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2 = 0
for conv2_t in self.conv2:
x2_t = conv2_t(x1)
x2 = x2 + self.gate(x2_t)
x3 = self.conv3(x2)
x3 = self.IN(x3) # IN inside residual
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
return F.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. TPAMI, 2021.
"""
def __init__(
self,
num_classes,
blocks,
layers,
channels,
feature_dim=512,
loss='softmax',
conv1_IN=False,
**kwargs
):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert num_blocks == len(layers)
assert num_blocks == len(channels) - 1
self.loss = loss
self.feature_dim = feature_dim
# convolutional backbone
self.conv1 = ConvLayer(
3, channels[0], 7, stride=2, padding=3, IN=conv1_IN
)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(
blocks[0], layers[0], channels[0], channels[1]
)
self.pool2 = nn.Sequential(
Conv1x1(channels[1], channels[1]), nn.AvgPool2d(2, stride=2)
)
self.conv3 = self._make_layer(
blocks[1], layers[1], channels[1], channels[2]
)
self.pool3 = nn.Sequential(
Conv1x1(channels[2], channels[2]), nn.AvgPool2d(2, stride=2)
)
self.conv4 = self._make_layer(
blocks[2], layers[2], channels[2], channels[3]
)
self.conv5 = Conv1x1(channels[3], channels[3])
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# fully connected layer
self.fc = self._construct_fc_layer(
self.feature_dim, channels[3], dropout_p=None
)
# identity classification layer
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
def _make_layer(self, blocks, layer, in_channels, out_channels):
layers = []
layers += [blocks[0](in_channels, out_channels)]
for i in range(1, len(blocks)):
layers += [blocks[i](out_channels, out_channels)]
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if fc_dims is None or fc_dims < 0:
self.feature_dim = input_dim
return None
if isinstance(fc_dims, int):
fc_dims = [fc_dims]
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU())
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.InstanceNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.conv5(x)
return x
def forward(self, x, return_featuremaps=False):
x = self.featuremaps(x)
if return_featuremaps:
return x
v = self.global_avgpool(x)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, key=''):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from collections import OrderedDict
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights from "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(cached_file)
)
else:
print(
'Successfully loaded imagenet pretrained weights from "{}"'.
format(cached_file)
)
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
##########
# Instantiation
##########
def osnet_ain_x1_0(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
model = OSNet(
num_classes,
blocks=[
[OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin],
[OSBlockINin, OSBlock]
],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
conv1_IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ain_x1_0')
return model
def osnet_ain_x0_75(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
model = OSNet(
num_classes,
blocks=[
[OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin],
[OSBlockINin, OSBlock]
],
layers=[2, 2, 2],
channels=[48, 192, 288, 384],
loss=loss,
conv1_IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ain_x0_75')
return model
def osnet_ain_x0_5(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
model = OSNet(
num_classes,
blocks=[
[OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin],
[OSBlockINin, OSBlock]
],
layers=[2, 2, 2],
channels=[32, 128, 192, 256],
loss=loss,
conv1_IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ain_x0_5')
return model
def osnet_ain_x0_25(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
model = OSNet(
num_classes,
blocks=[
[OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin],
[OSBlockINin, OSBlock]
],
layers=[2, 2, 2],
channels=[16, 64, 96, 128],
loss=loss,
conv1_IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ain_x0_25')
return model

View file

@ -0,0 +1,314 @@
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from torch.nn import functional as F
__all__ = ['pcb_p6', 'pcb_p4']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, planes * self.expansion, kernel_size=1, bias=False
)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class DimReduceLayer(nn.Module):
def __init__(self, in_channels, out_channels, nonlinear):
super(DimReduceLayer, self).__init__()
layers = []
layers.append(
nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
)
layers.append(nn.BatchNorm2d(out_channels))
if nonlinear == 'relu':
layers.append(nn.ReLU(inplace=True))
elif nonlinear == 'leakyrelu':
layers.append(nn.LeakyReLU(0.1))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class PCB(nn.Module):
"""Part-based Convolutional Baseline.
Reference:
Sun et al. Beyond Part Models: Person Retrieval with Refined
Part Pooling (and A Strong Convolutional Baseline). ECCV 2018.
Public keys:
- ``pcb_p4``: PCB with 4-part strips.
- ``pcb_p6``: PCB with 6-part strips.
"""
def __init__(
self,
num_classes,
loss,
block,
layers,
parts=6,
reduced_dim=256,
nonlinear='relu',
**kwargs
):
self.inplanes = 64
super(PCB, self).__init__()
self.loss = loss
self.parts = parts
self.feature_dim = 512 * block.expansion
# backbone network
self.conv1 = nn.Conv2d(
3, 64, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
# pcb layers
self.parts_avgpool = nn.AdaptiveAvgPool2d((self.parts, 1))
self.dropout = nn.Dropout(p=0.5)
self.conv5 = DimReduceLayer(
512 * block.expansion, reduced_dim, nonlinear=nonlinear
)
self.feature_dim = reduced_dim
self.classifier = nn.ModuleList(
[
nn.Linear(self.feature_dim, num_classes)
for _ in range(self.parts)
]
)
self._init_params()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v_g = self.parts_avgpool(f)
if not self.training:
v_g = F.normalize(v_g, p=2, dim=1)
return v_g.view(v_g.size(0), -1)
v_g = self.dropout(v_g)
v_h = self.conv5(v_g)
y = []
for i in range(self.parts):
v_h_i = v_h[:, :, i, :]
v_h_i = v_h_i.view(v_h_i.size(0), -1)
y_i = self.classifier[i](v_h_i)
y.append(y_i)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
v_g = F.normalize(v_g, p=2, dim=1)
return y, v_g.view(v_g.size(0), -1)
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def pcb_p6(num_classes, loss='softmax', pretrained=True, **kwargs):
model = PCB(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
parts=6,
reduced_dim=256,
nonlinear='relu',
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def pcb_p4(num_classes, loss='softmax', pretrained=True, **kwargs):
model = PCB(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
parts=4,
reduced_dim=256,
nonlinear='relu',
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model

View file

@ -0,0 +1,530 @@
"""
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d', 'resnet50_fc512'
]
model_urls = {
'resnet18':
'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34':
'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50':
'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101':
'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152':
'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d':
'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d':
'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError(
'BasicBlock only supports groups=1 and base_width=64'
)
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock"
)
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width/64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Residual network.
Reference:
- He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
- Xie et al. Aggregated Residual Transformations for Deep Neural Networks. CVPR 2017.
Public keys:
- ``resnet18``: ResNet18.
- ``resnet34``: ResNet34.
- ``resnet50``: ResNet50.
- ``resnet101``: ResNet101.
- ``resnet152``: ResNet152.
- ``resnext50_32x4d``: ResNeXt50.
- ``resnext101_32x8d``: ResNeXt101.
- ``resnet50_fc512``: ResNet50 + FC.
"""
def __init__(
self,
num_classes,
loss,
block,
layers,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.loss = loss
self.feature_dim = 512 * block.expansion
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".
format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=last_stride,
dilate=replace_stride_with_dilation[2]
)
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = self._construct_fc_layer(
fc_dims, 512 * block.expansion, dropout_p
)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
"""ResNet"""
def resnet18(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[2, 2, 2, 2],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet18'])
return model
def resnet34(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet34'])
return model
def resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet101'])
return model
def resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 8, 36, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet152'])
return model
"""ResNeXt"""
def resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=4,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext50_32x4d'])
return model
def resnext101_32x8d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=8,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext101_32x8d'])
return model
"""
ResNet + FC
"""
def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model

View file

@ -0,0 +1,289 @@
"""
Credit to https://github.com/XingangPan/IBN-Net.
"""
from __future__ import division, absolute_import
import math
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['resnet50_ibn_a']
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class IBN(nn.Module):
def __init__(self, planes):
super(IBN, self).__init__()
half1 = int(planes / 2)
self.half = half1
half2 = planes - half1
self.IN = nn.InstanceNorm2d(half1, affine=True)
self.BN = nn.BatchNorm2d(half2)
def forward(self, x):
split = torch.split(x, self.half, 1)
out1 = self.IN(split[0].contiguous())
out2 = self.BN(split[1].contiguous())
out = torch.cat((out1, out2), 1)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, ibn=False, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
if ibn:
self.bn1 = IBN(planes)
else:
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, planes * self.expansion, kernel_size=1, bias=False
)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Residual network + IBN layer.
Reference:
- He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
- Pan et al. Two at Once: Enhancing Learning and Generalization
Capacities via IBN-Net. ECCV 2018.
"""
def __init__(
self,
block,
layers,
num_classes=1000,
loss='softmax',
fc_dims=None,
dropout_p=None,
**kwargs
):
scale = 64
self.inplanes = scale
super(ResNet, self).__init__()
self.loss = loss
self.feature_dim = scale * 8 * block.expansion
self.conv1 = nn.Conv2d(
3, scale, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = nn.BatchNorm2d(scale)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, scale, layers[0])
self.layer2 = self._make_layer(block, scale * 2, layers[1], stride=2)
self.layer3 = self._make_layer(block, scale * 4, layers[2], stride=2)
self.layer4 = self._make_layer(block, scale * 8, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = self._construct_fc_layer(
fc_dims, scale * 8 * block.expansion, dropout_p
)
self.classifier = nn.Linear(self.feature_dim, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
ibn = True
if planes == 512:
ibn = False
layers.append(block(self.inplanes, planes, ibn, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, ibn))
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def featuremaps(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def resnet50_ibn_a(num_classes, loss='softmax', pretrained=False, **kwargs):
model = ResNet(
Bottleneck, [3, 4, 6, 3], num_classes=num_classes, loss=loss, **kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model

View file

@ -0,0 +1,274 @@
"""
Credit to https://github.com/XingangPan/IBN-Net.
"""
from __future__ import division, absolute_import
import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['resnet50_ibn_b']
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, IN=False):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, planes * self.expansion, kernel_size=1, bias=False
)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.IN = None
if IN:
self.IN = nn.InstanceNorm2d(planes * 4, affine=True)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.IN is not None:
out = self.IN(out)
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Residual network + IBN layer.
Reference:
- He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
- Pan et al. Two at Once: Enhancing Learning and Generalization
Capacities via IBN-Net. ECCV 2018.
"""
def __init__(
self,
block,
layers,
num_classes=1000,
loss='softmax',
fc_dims=None,
dropout_p=None,
**kwargs
):
scale = 64
self.inplanes = scale
super(ResNet, self).__init__()
self.loss = loss
self.feature_dim = scale * 8 * block.expansion
self.conv1 = nn.Conv2d(
3, scale, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = nn.InstanceNorm2d(scale, affine=True)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(
block, scale, layers[0], stride=1, IN=True
)
self.layer2 = self._make_layer(
block, scale * 2, layers[1], stride=2, IN=True
)
self.layer3 = self._make_layer(block, scale * 4, layers[2], stride=2)
self.layer4 = self._make_layer(block, scale * 8, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = self._construct_fc_layer(
fc_dims, scale * 8 * block.expansion, dropout_p
)
self.classifier = nn.Linear(self.feature_dim, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, IN=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks - 1):
layers.append(block(self.inplanes, planes))
layers.append(block(self.inplanes, planes, IN=IN))
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def featuremaps(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def resnet50_ibn_b(num_classes, loss='softmax', pretrained=False, **kwargs):
model = ResNet(
Bottleneck, [3, 4, 6, 3], num_classes=num_classes, loss=loss, **kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model

View file

@ -0,0 +1,307 @@
from __future__ import division, absolute_import
import torch
import torch.utils.model_zoo as model_zoo
from torch import nn
__all__ = ['resnet50mid']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, planes * self.expansion, kernel_size=1, bias=False
)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetMid(nn.Module):
"""Residual network + mid-level features.
Reference:
Yu et al. The Devil is in the Middle: Exploiting Mid-level Representations for
Cross-Domain Instance Matching. arXiv:1711.08106.
Public keys:
- ``resnet50mid``: ResNet50 + mid-level feature fusion.
"""
def __init__(
self,
num_classes,
loss,
block,
layers,
last_stride=2,
fc_dims=None,
**kwargs
):
self.inplanes = 64
super(ResNetMid, self).__init__()
self.loss = loss
self.feature_dim = 512 * block.expansion
# backbone network
self.conv1 = nn.Conv2d(
3, 64, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride
)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
assert fc_dims is not None
self.fc_fusion = self._construct_fc_layer(
fc_dims, 512 * block.expansion * 2
)
self.feature_dim += 512 * block.expansion
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x4a = self.layer4[0](x)
x4b = self.layer4[1](x4a)
x4c = self.layer4[2](x4b)
return x4a, x4b, x4c
def forward(self, x):
x4a, x4b, x4c = self.featuremaps(x)
v4a = self.global_avgpool(x4a)
v4b = self.global_avgpool(x4b)
v4c = self.global_avgpool(x4c)
v4ab = torch.cat([v4a, v4b], 1)
v4ab = v4ab.view(v4ab.size(0), -1)
v4ab = self.fc_fusion(v4ab)
v4c = v4c.view(v4c.size(0), -1)
v = torch.cat([v4ab, v4c], 1)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
"""
Residual network configurations:
--
resnet18: block=BasicBlock, layers=[2, 2, 2, 2]
resnet34: block=BasicBlock, layers=[3, 4, 6, 3]
resnet50: block=Bottleneck, layers=[3, 4, 6, 3]
resnet101: block=Bottleneck, layers=[3, 4, 23, 3]
resnet152: block=Bottleneck, layers=[3, 8, 36, 3]
"""
def resnet50mid(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNetMid(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=[1024],
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model

View file

@ -0,0 +1,688 @@
from __future__ import division, absolute_import
import math
from collections import OrderedDict
import torch.nn as nn
from torch.utils import model_zoo
__all__ = [
'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152',
'se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnet50_fc512'
]
"""
Code imported from https://github.com/Cadene/pretrained-models.pytorch
"""
pretrained_settings = {
'senet154': {
'imagenet': {
'url':
'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'se_resnet50': {
'imagenet': {
'url':
'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'se_resnet101': {
'imagenet': {
'url':
'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'se_resnet152': {
'imagenet': {
'url':
'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'se_resnext50_32x4d': {
'imagenet': {
'url':
'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'se_resnext101_32x4d': {
'imagenet': {
'url':
'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
}
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
channels, channels // reduction, kernel_size=1, padding=0
)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(
channels // reduction, channels, kernel_size=1, padding=0
)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class Bottleneck(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
"""
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
"""
Bottleneck for SENet154.
"""
expansion = 4
def __init__(
self, inplanes, planes, groups, reduction, stride=1, downsample=None
):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes * 2)
self.conv2 = nn.Conv2d(
planes * 2,
planes * 4,
kernel_size=3,
stride=stride,
padding=1,
groups=groups,
bias=False
)
self.bn2 = nn.BatchNorm2d(planes * 4)
self.conv3 = nn.Conv2d(
planes * 4, planes * 4, kernel_size=1, bias=False
)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(Bottleneck):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
"""
expansion = 4
def __init__(
self, inplanes, planes, groups, reduction, stride=1, downsample=None
):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, bias=False, stride=stride
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
padding=1,
groups=groups,
bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(Bottleneck):
"""ResNeXt bottleneck type C with a Squeeze-and-Excitation module"""
expansion = 4
def __init__(
self,
inplanes,
planes,
groups,
reduction,
stride=1,
downsample=None,
base_width=4
):
super(SEResNeXtBottleneck, self).__init__()
width = int(math.floor(planes * (base_width/64.)) * groups)
self.conv1 = nn.Conv2d(
inplanes, width, kernel_size=1, bias=False, stride=1
)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(
width,
width,
kernel_size=3,
stride=stride,
padding=1,
groups=groups,
bias=False
)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SENet(nn.Module):
"""Squeeze-and-excitation network.
Reference:
Hu et al. Squeeze-and-Excitation Networks. CVPR 2018.
Public keys:
- ``senet154``: SENet154.
- ``se_resnet50``: ResNet50 + SE.
- ``se_resnet101``: ResNet101 + SE.
- ``se_resnet152``: ResNet152 + SE.
- ``se_resnext50_32x4d``: ResNeXt50 (groups=32, width=4) + SE.
- ``se_resnext101_32x4d``: ResNeXt101 (groups=32, width=4) + SE.
- ``se_resnet50_fc512``: (ResNet50 + SE) + FC.
"""
def __init__(
self,
num_classes,
loss,
block,
layers,
groups,
reduction,
dropout_p=0.2,
inplanes=128,
input_3x3=True,
downsample_kernel_size=3,
downsample_padding=1,
last_stride=2,
fc_dims=None,
**kwargs
):
"""
Parameters
----------
block (nn.Module): Bottleneck class.
- For SENet154: SEBottleneck
- For SE-ResNet models: SEResNetBottleneck
- For SE-ResNeXt models: SEResNeXtBottleneck
layers (list of ints): Number of residual blocks for 4 layers of the
network (layer1...layer4).
groups (int): Number of groups for the 3x3 convolution in each
bottleneck block.
- For SENet154: 64
- For SE-ResNet models: 1
- For SE-ResNeXt models: 32
reduction (int): Reduction ratio for Squeeze-and-Excitation modules.
- For all models: 16
dropout_p (float or None): Drop probability for the Dropout layer.
If `None` the Dropout layer is not used.
- For SENet154: 0.2
- For SE-ResNet models: None
- For SE-ResNeXt models: None
inplanes (int): Number of input channels for layer1.
- For SENet154: 128
- For SE-ResNet models: 64
- For SE-ResNeXt models: 64
input_3x3 (bool): If `True`, use three 3x3 convolutions instead of
a single 7x7 convolution in layer0.
- For SENet154: True
- For SE-ResNet models: False
- For SE-ResNeXt models: False
downsample_kernel_size (int): Kernel size for downsampling convolutions
in layer2, layer3 and layer4.
- For SENet154: 3
- For SE-ResNet models: 1
- For SE-ResNeXt models: 1
downsample_padding (int): Padding for downsampling convolutions in
layer2, layer3 and layer4.
- For SENet154: 1
- For SE-ResNet models: 0
- For SE-ResNeXt models: 0
num_classes (int): Number of outputs in `classifier` layer.
"""
super(SENet, self).__init__()
self.inplanes = inplanes
self.loss = loss
if input_3x3:
layer0_modules = [
(
'conv1',
nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False)
),
('bn1', nn.BatchNorm2d(64)),
('relu1', nn.ReLU(inplace=True)),
(
'conv2',
nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)
),
('bn2', nn.BatchNorm2d(64)),
('relu2', nn.ReLU(inplace=True)),
(
'conv3',
nn.Conv2d(
64, inplanes, 3, stride=1, padding=1, bias=False
)
),
('bn3', nn.BatchNorm2d(inplanes)),
('relu3', nn.ReLU(inplace=True)),
]
else:
layer0_modules = [
(
'conv1',
nn.Conv2d(
3,
inplanes,
kernel_size=7,
stride=2,
padding=3,
bias=False
)
),
('bn1', nn.BatchNorm2d(inplanes)),
('relu1', nn.ReLU(inplace=True)),
]
# To preserve compatibility with Caffe weights `ceil_mode=True`
# is used instead of `padding=1`.
layer0_modules.append(
('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True))
)
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
self.layer1 = self._make_layer(
block,
planes=64,
blocks=layers[0],
groups=groups,
reduction=reduction,
downsample_kernel_size=1,
downsample_padding=0
)
self.layer2 = self._make_layer(
block,
planes=128,
blocks=layers[1],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.layer3 = self._make_layer(
block,
planes=256,
blocks=layers[2],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.layer4 = self._make_layer(
block,
planes=512,
blocks=layers[3],
stride=last_stride,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = self._construct_fc_layer(
fc_dims, 512 * block.expansion, dropout_p
)
self.classifier = nn.Linear(self.feature_dim, num_classes)
def _make_layer(
self,
block,
planes,
blocks,
groups,
reduction,
stride=1,
downsample_kernel_size=1,
downsample_padding=0
):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=downsample_kernel_size,
stride=stride,
padding=downsample_padding,
bias=False
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, groups, reduction, stride, downsample
)
)
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups, reduction))
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""
Construct fully connected layer
- fc_dims (list or tuple): dimensions of fc layers, if None,
no fc layers are constructed
- input_dim (int): input dimension
- dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def featuremaps(self, x):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def senet154(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SENet(
num_classes=num_classes,
loss=loss,
block=SEBottleneck,
layers=[3, 8, 36, 3],
groups=64,
reduction=16,
dropout_p=0.2,
last_stride=2,
fc_dims=None,
**kwargs
)
if pretrained:
model_url = pretrained_settings['senet154']['imagenet']['url']
init_pretrained_weights(model, model_url)
return model
def se_resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SENet(
num_classes=num_classes,
loss=loss,
block=SEResNetBottleneck,
layers=[3, 4, 6, 3],
groups=1,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=2,
fc_dims=None,
**kwargs
)
if pretrained:
model_url = pretrained_settings['se_resnet50']['imagenet']['url']
init_pretrained_weights(model, model_url)
return model
def se_resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SENet(
num_classes=num_classes,
loss=loss,
block=SEResNetBottleneck,
layers=[3, 4, 6, 3],
groups=1,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=1,
fc_dims=[512],
**kwargs
)
if pretrained:
model_url = pretrained_settings['se_resnet50']['imagenet']['url']
init_pretrained_weights(model, model_url)
return model
def se_resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SENet(
num_classes=num_classes,
loss=loss,
block=SEResNetBottleneck,
layers=[3, 4, 23, 3],
groups=1,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=2,
fc_dims=None,
**kwargs
)
if pretrained:
model_url = pretrained_settings['se_resnet101']['imagenet']['url']
init_pretrained_weights(model, model_url)
return model
def se_resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SENet(
num_classes=num_classes,
loss=loss,
block=SEResNetBottleneck,
layers=[3, 8, 36, 3],
groups=1,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=2,
fc_dims=None,
**kwargs
)
if pretrained:
model_url = pretrained_settings['se_resnet152']['imagenet']['url']
init_pretrained_weights(model, model_url)
return model
def se_resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SENet(
num_classes=num_classes,
loss=loss,
block=SEResNeXtBottleneck,
layers=[3, 4, 6, 3],
groups=32,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=2,
fc_dims=None,
**kwargs
)
if pretrained:
model_url = pretrained_settings['se_resnext50_32x4d']['imagenet']['url'
]
init_pretrained_weights(model, model_url)
return model
def se_resnext101_32x4d(
num_classes, loss='softmax', pretrained=True, **kwargs
):
model = SENet(
num_classes=num_classes,
loss=loss,
block=SEResNeXtBottleneck,
layers=[3, 4, 23, 3],
groups=32,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=2,
fc_dims=None,
**kwargs
)
if pretrained:
model_url = pretrained_settings['se_resnext101_32x4d']['imagenet'][
'url']
init_pretrained_weights(model, model_url)
return model

View file

@ -0,0 +1,198 @@
from __future__ import division, absolute_import
import torch
import torch.utils.model_zoo as model_zoo
from torch import nn
from torch.nn import functional as F
__all__ = ['shufflenet']
model_urls = {
# training epoch = 90, top1 = 61.8
'imagenet':
'https://mega.nz/#!RDpUlQCY!tr_5xBEkelzDjveIYBBcGcovNCOrgfiJO9kiidz9fZM',
}
class ChannelShuffle(nn.Module):
def __init__(self, num_groups):
super(ChannelShuffle, self).__init__()
self.g = num_groups
def forward(self, x):
b, c, h, w = x.size()
n = c // self.g
# reshape
x = x.view(b, self.g, n, h, w)
# transpose
x = x.permute(0, 2, 1, 3, 4).contiguous()
# flatten
x = x.view(b, c, h, w)
return x
class Bottleneck(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride,
num_groups,
group_conv1x1=True
):
super(Bottleneck, self).__init__()
assert stride in [1, 2], 'Warning: stride must be either 1 or 2'
self.stride = stride
mid_channels = out_channels // 4
if stride == 2:
out_channels -= in_channels
# group conv is not applied to first conv1x1 at stage 2
num_groups_conv1x1 = num_groups if group_conv1x1 else 1
self.conv1 = nn.Conv2d(
in_channels,
mid_channels,
1,
groups=num_groups_conv1x1,
bias=False
)
self.bn1 = nn.BatchNorm2d(mid_channels)
self.shuffle1 = ChannelShuffle(num_groups)
self.conv2 = nn.Conv2d(
mid_channels,
mid_channels,
3,
stride=stride,
padding=1,
groups=mid_channels,
bias=False
)
self.bn2 = nn.BatchNorm2d(mid_channels)
self.conv3 = nn.Conv2d(
mid_channels, out_channels, 1, groups=num_groups, bias=False
)
self.bn3 = nn.BatchNorm2d(out_channels)
if stride == 2:
self.shortcut = nn.AvgPool2d(3, stride=2, padding=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = self.bn2(self.conv2(out))
out = self.bn3(self.conv3(out))
if self.stride == 2:
res = self.shortcut(x)
out = F.relu(torch.cat([res, out], 1))
else:
out = F.relu(x + out)
return out
# configuration of (num_groups: #out_channels) based on Table 1 in the paper
cfg = {
1: [144, 288, 576],
2: [200, 400, 800],
3: [240, 480, 960],
4: [272, 544, 1088],
8: [384, 768, 1536],
}
class ShuffleNet(nn.Module):
"""ShuffleNet.
Reference:
Zhang et al. ShuffleNet: An Extremely Efficient Convolutional Neural
Network for Mobile Devices. CVPR 2018.
Public keys:
- ``shufflenet``: ShuffleNet (groups=3).
"""
def __init__(self, num_classes, loss='softmax', num_groups=3, **kwargs):
super(ShuffleNet, self).__init__()
self.loss = loss
self.conv1 = nn.Sequential(
nn.Conv2d(3, 24, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(24),
nn.ReLU(),
nn.MaxPool2d(3, stride=2, padding=1),
)
self.stage2 = nn.Sequential(
Bottleneck(
24, cfg[num_groups][0], 2, num_groups, group_conv1x1=False
),
Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups),
Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups),
Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups),
)
self.stage3 = nn.Sequential(
Bottleneck(cfg[num_groups][0], cfg[num_groups][1], 2, num_groups),
Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups),
)
self.stage4 = nn.Sequential(
Bottleneck(cfg[num_groups][1], cfg[num_groups][2], 2, num_groups),
Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups),
Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups),
Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups),
)
self.classifier = nn.Linear(cfg[num_groups][2], num_classes)
self.feat_dim = cfg[num_groups][2]
def forward(self, x):
x = self.conv1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), -1)
if not self.training:
return x
y = self.classifier(x)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, x
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def shufflenet(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ShuffleNet(num_classes, loss, **kwargs)
if pretrained:
# init_pretrained_weights(model, model_urls['imagenet'])
import warnings
warnings.warn(
'The imagenet pretrained weights need to be manually downloaded from {}'
.format(model_urls['imagenet'])
)
return model

View file

@ -0,0 +1,262 @@
"""
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch
import torch.utils.model_zoo as model_zoo
from torch import nn
__all__ = [
'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5',
'shufflenet_v2_x2_0'
]
model_urls = {
'shufflenetv2_x0.5':
'https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth',
'shufflenetv2_x1.0':
'https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth',
'shufflenetv2_x1.5': None,
'shufflenetv2_x2.0': None,
}
def channel_shuffle(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride):
super(InvertedResidual, self).__init__()
if not (1 <= stride <= 3):
raise ValueError('illegal stride value')
self.stride = stride
branch_features = oup // 2
assert (self.stride != 1) or (inp == branch_features << 1)
if self.stride > 1:
self.branch1 = nn.Sequential(
self.depthwise_conv(
inp, inp, kernel_size=3, stride=self.stride, padding=1
),
nn.BatchNorm2d(inp),
nn.Conv2d(
inp,
branch_features,
kernel_size=1,
stride=1,
padding=0,
bias=False
),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
self.branch2 = nn.Sequential(
nn.Conv2d(
inp if (self.stride > 1) else branch_features,
branch_features,
kernel_size=1,
stride=1,
padding=0,
bias=False
),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
self.depthwise_conv(
branch_features,
branch_features,
kernel_size=3,
stride=self.stride,
padding=1
),
nn.BatchNorm2d(branch_features),
nn.Conv2d(
branch_features,
branch_features,
kernel_size=1,
stride=1,
padding=0,
bias=False
),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
@staticmethod
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
return nn.Conv2d(
i, o, kernel_size, stride, padding, bias=bias, groups=i
)
def forward(self, x):
if self.stride == 1:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out
class ShuffleNetV2(nn.Module):
"""ShuffleNetV2.
Reference:
Ma et al. ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design. ECCV 2018.
Public keys:
- ``shufflenet_v2_x0_5``: ShuffleNetV2 x0.5.
- ``shufflenet_v2_x1_0``: ShuffleNetV2 x1.0.
- ``shufflenet_v2_x1_5``: ShuffleNetV2 x1.5.
- ``shufflenet_v2_x2_0``: ShuffleNetV2 x2.0.
"""
def __init__(
self, num_classes, loss, stages_repeats, stages_out_channels, **kwargs
):
super(ShuffleNetV2, self).__init__()
self.loss = loss
if len(stages_repeats) != 3:
raise ValueError(
'expected stages_repeats as list of 3 positive ints'
)
if len(stages_out_channels) != 5:
raise ValueError(
'expected stages_out_channels as list of 5 positive ints'
)
self._stage_out_channels = stages_out_channels
input_channels = 3
output_channels = self._stage_out_channels[0]
self.conv1 = nn.Sequential(
nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True),
)
input_channels = output_channels
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
stage_names = ['stage{}'.format(i) for i in [2, 3, 4]]
for name, repeats, output_channels in zip(
stage_names, stages_repeats, self._stage_out_channels[1:]
):
seq = [InvertedResidual(input_channels, output_channels, 2)]
for i in range(repeats - 1):
seq.append(
InvertedResidual(output_channels, output_channels, 1)
)
setattr(self, name, nn.Sequential(*seq))
input_channels = output_channels
output_channels = self._stage_out_channels[-1]
self.conv5 = nn.Sequential(
nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True),
)
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(output_channels, num_classes)
def featuremaps(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.conv5(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
if model_url is None:
import warnings
warnings.warn(
'ImageNet pretrained weights are unavailable for this model'
)
return
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def shufflenet_v2_x0_5(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ShuffleNetV2(
num_classes, loss, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['shufflenetv2_x0.5'])
return model
def shufflenet_v2_x1_0(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ShuffleNetV2(
num_classes, loss, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['shufflenetv2_x1.0'])
return model
def shufflenet_v2_x1_5(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ShuffleNetV2(
num_classes, loss, [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['shufflenetv2_x1.5'])
return model
def shufflenet_v2_x2_0(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ShuffleNetV2(
num_classes, loss, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['shufflenetv2_x2.0'])
return model

View file

@ -0,0 +1,236 @@
"""
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['squeezenet1_0', 'squeezenet1_1', 'squeezenet1_0_fc512']
model_urls = {
'squeezenet1_0':
'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1':
'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
}
class Fire(nn.Module):
def __init__(
self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes
):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(
squeeze_planes, expand1x1_planes, kernel_size=1
)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(
squeeze_planes, expand3x3_planes, kernel_size=3, padding=1
)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat(
[
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x))
], 1
)
class SqueezeNet(nn.Module):
"""SqueezeNet.
Reference:
Iandola et al. SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and< 0.5 MB model size. arXiv:1602.07360.
Public keys:
- ``squeezenet1_0``: SqueezeNet (version=1.0).
- ``squeezenet1_1``: SqueezeNet (version=1.1).
- ``squeezenet1_0_fc512``: SqueezeNet (version=1.0) + FC.
"""
def __init__(
self,
num_classes,
loss,
version=1.0,
fc_dims=None,
dropout_p=None,
**kwargs
):
super(SqueezeNet, self).__init__()
self.loss = loss
self.feature_dim = 512
if version not in [1.0, 1.1]:
raise ValueError(
'Unsupported SqueezeNet version {version}:'
'1.0 or 1.1 expected'.format(version=version)
)
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = self._construct_fc_layer(fc_dims, 512, dropout_p)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
f = self.features(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url, map_location=None)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def squeezenet1_0(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SqueezeNet(
num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['squeezenet1_0'])
return model
def squeezenet1_0_fc512(
num_classes, loss='softmax', pretrained=True, **kwargs
):
model = SqueezeNet(
num_classes,
loss,
version=1.0,
fc_dims=[512],
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['squeezenet1_0'])
return model
def squeezenet1_1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SqueezeNet(
num_classes, loss, version=1.1, fc_dims=None, dropout_p=None, **kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['squeezenet1_1'])
return model

View file

@ -0,0 +1,344 @@
from __future__ import division, absolute_import
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
__all__ = ['xception']
pretrained_settings = {
'xception': {
'imagenet': {
'url':
'http://data.lip6.fr/cadene/pretrainedmodels/xception-43020ad28.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1000,
'scale':
0.8975 # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
}
}
}
class SeparableConv2d(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
bias=False
):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(
in_channels,
in_channels,
kernel_size,
stride,
padding,
dilation,
groups=in_channels,
bias=bias
)
self.pointwise = nn.Conv2d(
in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias
)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(
self,
in_filters,
out_filters,
reps,
strides=1,
start_with_relu=True,
grow_first=True
):
super(Block, self).__init__()
if out_filters != in_filters or strides != 1:
self.skip = nn.Conv2d(
in_filters, out_filters, 1, stride=strides, bias=False
)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = in_filters
if grow_first:
rep.append(self.relu)
rep.append(
SeparableConv2d(
in_filters,
out_filters,
3,
stride=1,
padding=1,
bias=False
)
)
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range(reps - 1):
rep.append(self.relu)
rep.append(
SeparableConv2d(
filters, filters, 3, stride=1, padding=1, bias=False
)
)
rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(
SeparableConv2d(
in_filters,
out_filters,
3,
stride=1,
padding=1,
bias=False
)
)
rep.append(nn.BatchNorm2d(out_filters))
if not start_with_relu:
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if strides != 1:
rep.append(nn.MaxPool2d(3, strides, 1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
class Xception(nn.Module):
"""Xception.
Reference:
Chollet. Xception: Deep Learning with Depthwise
Separable Convolutions. CVPR 2017.
Public keys:
- ``xception``: Xception.
"""
def __init__(
self, num_classes, loss, fc_dims=None, dropout_p=None, **kwargs
):
super(Xception, self).__init__()
self.loss = loss
self.conv1 = nn.Conv2d(3, 32, 3, 2, 0, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, 3, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.block1 = Block(
64, 128, 2, 2, start_with_relu=False, grow_first=True
)
self.block2 = Block(
128, 256, 2, 2, start_with_relu=True, grow_first=True
)
self.block3 = Block(
256, 728, 2, 2, start_with_relu=True, grow_first=True
)
self.block4 = Block(
728, 728, 3, 1, start_with_relu=True, grow_first=True
)
self.block5 = Block(
728, 728, 3, 1, start_with_relu=True, grow_first=True
)
self.block6 = Block(
728, 728, 3, 1, start_with_relu=True, grow_first=True
)
self.block7 = Block(
728, 728, 3, 1, start_with_relu=True, grow_first=True
)
self.block8 = Block(
728, 728, 3, 1, start_with_relu=True, grow_first=True
)
self.block9 = Block(
728, 728, 3, 1, start_with_relu=True, grow_first=True
)
self.block10 = Block(
728, 728, 3, 1, start_with_relu=True, grow_first=True
)
self.block11 = Block(
728, 728, 3, 1, start_with_relu=True, grow_first=True
)
self.block12 = Block(
728, 1024, 2, 2, start_with_relu=True, grow_first=False
)
self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1)
self.bn3 = nn.BatchNorm2d(1536)
self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1)
self.bn4 = nn.BatchNorm2d(2048)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.feature_dim = 2048
self.fc = self._construct_fc_layer(fc_dims, 2048, dropout_p)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer.
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = F.relu(x, inplace=True)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x, inplace=True)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
x = F.relu(x, inplace=True)
x = self.conv4(x)
x = self.bn4(x)
x = F.relu(x, inplace=True)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initialize models with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def xception(num_classes, loss='softmax', pretrained=True, **kwargs):
model = Xception(num_classes, loss, fc_dims=None, dropout_p=None, **kwargs)
if pretrained:
model_url = pretrained_settings['xception']['imagenet']['url']
init_pretrained_weights(model, model_url)
return model

View file

@ -0,0 +1,215 @@
import torch
from collections import OrderedDict
__model_types = [
'resnet50', 'mlfn', 'hacnn', 'mobilenetv2_x1_0', 'mobilenetv2_x1_4',
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25',
'osnet_ibn_x1_0', 'osnet_ain_x1_0']
__trained_urls = {
# market1501 models ########################################################
'resnet50_market1501.pt':
'https://drive.google.com/uc?id=1dUUZ4rHDWohmsQXCRe2C_HbYkzz94iBV',
'resnet50_dukemtmcreid.pt':
'https://drive.google.com/uc?id=17ymnLglnc64NRvGOitY3BqMRS9UWd1wg',
'resnet50_msmt17.pt':
'https://drive.google.com/uc?id=1ep7RypVDOthCRIAqDnn4_N-UhkkFHJsj',
'resnet50_fc512_market1501.pt':
'https://drive.google.com/uc?id=1kv8l5laX_YCdIGVCetjlNdzKIA3NvsSt',
'resnet50_fc512_dukemtmcreid.pt':
'https://drive.google.com/uc?id=13QN8Mp3XH81GK4BPGXobKHKyTGH50Rtx',
'resnet50_fc512_msmt17.pt':
'https://drive.google.com/uc?id=1fDJLcz4O5wxNSUvImIIjoaIF9u1Rwaud',
'mlfn_market1501.pt':
'https://drive.google.com/uc?id=1wXcvhA_b1kpDfrt9s2Pma-MHxtj9pmvS',
'mlfn_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1rExgrTNb0VCIcOnXfMsbwSUW1h2L1Bum',
'mlfn_msmt17.pt':
'https://drive.google.com/uc?id=18JzsZlJb3Wm7irCbZbZ07TN4IFKvR6p-',
'hacnn_market1501.pt':
'https://drive.google.com/uc?id=1LRKIQduThwGxMDQMiVkTScBwR7WidmYF',
'hacnn_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1zNm6tP4ozFUCUQ7Sv1Z98EAJWXJEhtYH',
'hacnn_msmt17.pt':
'https://drive.google.com/uc?id=1MsKRtPM5WJ3_Tk2xC0aGOO7pM3VaFDNZ',
'mobilenetv2_x1_0_market1501.pt':
'https://drive.google.com/uc?id=18DgHC2ZJkjekVoqBWszD8_Xiikz-fewp',
'mobilenetv2_x1_0_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1q1WU2FETRJ3BXcpVtfJUuqq4z3psetds',
'mobilenetv2_x1_0_msmt17.pt':
'https://drive.google.com/uc?id=1j50Hv14NOUAg7ZeB3frzfX-WYLi7SrhZ',
'mobilenetv2_x1_4_market1501.pt':
'https://drive.google.com/uc?id=1t6JCqphJG-fwwPVkRLmGGyEBhGOf2GO5',
'mobilenetv2_x1_4_dukemtmcreid.pt':
'https://drive.google.com/uc?id=12uD5FeVqLg9-AFDju2L7SQxjmPb4zpBN',
'mobilenetv2_x1_4_msmt17.pt':
'https://drive.google.com/uc?id=1ZY5P2Zgm-3RbDpbXM0kIBMPvspeNIbXz',
'osnet_x1_0_market1501.pt':
'https://drive.google.com/uc?id=1vduhq5DpN2q1g4fYEZfPI17MJeh9qyrA',
'osnet_x1_0_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1QZO_4sNf4hdOKKKzKc-TZU9WW1v6zQbq',
'osnet_x1_0_msmt17.pt':
'https://drive.google.com/uc?id=112EMUfBPYeYg70w-syK6V6Mx8-Qb9Q1M',
'osnet_x0_75_market1501.pt':
'https://drive.google.com/uc?id=1ozRaDSQw_EQ8_93OUmjDbvLXw9TnfPer',
'osnet_x0_75_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1IE3KRaTPp4OUa6PGTFL_d5_KQSJbP0Or',
'osnet_x0_75_msmt17.pt':
'https://drive.google.com/uc?id=1QEGO6WnJ-BmUzVPd3q9NoaO_GsPNlmWc',
'osnet_x0_5_market1501.pt':
'https://drive.google.com/uc?id=1PLB9rgqrUM7blWrg4QlprCuPT7ILYGKT',
'osnet_x0_5_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1KoUVqmiST175hnkALg9XuTi1oYpqcyTu',
'osnet_x0_5_msmt17.pt':
'https://drive.google.com/uc?id=1UT3AxIaDvS2PdxzZmbkLmjtiqq7AIKCv',
'osnet_x0_25_market1501.pt':
'https://drive.google.com/uc?id=1z1UghYvOTtjx7kEoRfmqSMu-z62J6MAj',
'osnet_x0_25_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1eumrtiXT4NOspjyEV4j8cHmlOaaCGk5l',
'osnet_x0_25_msmt17.pt':
'https://drive.google.com/uc?id=1sSwXSUlj4_tHZequ_iZ8w_Jh0VaRQMqF',
####### market1501 models ##################################################
'resnet50_msmt17.pt':
'https://drive.google.com/uc?id=1yiBteqgIZoOeywE8AhGmEQl7FTVwrQmf',
'osnet_x1_0_msmt17.pt':
'https://drive.google.com/uc?id=1IosIFlLiulGIjwW3H8uMRmx3MzPwf86x',
'osnet_x0_75_msmt17.pt':
'https://drive.google.com/uc?id=1fhjSS_7SUGCioIf2SWXaRGPqIY9j7-uw',
'osnet_x0_5_msmt17.pt':
'https://drive.google.com/uc?id=1DHgmb6XV4fwG3n-CnCM0zdL9nMsZ9_RF',
'osnet_x0_25_msmt17.pt':
'https://drive.google.com/uc?id=1Kkx2zW89jq_NETu4u42CFZTMVD5Hwm6e',
'osnet_ibn_x1_0_msmt17.pt':
'https://drive.google.com/uc?id=1q3Sj2ii34NlfxA4LvmHdWO_75NDRmECJ',
'osnet_ain_x1_0_msmt17.pt':
'https://drive.google.com/uc?id=1SigwBE6mPdqiJMqhuIY4aqC7--5CsMal',
}
def show_downloadeable_models():
print('\nAvailable .pt ReID models for automatic download')
print(list(__trained_urls.keys()))
def get_model_url(model):
if model.name in __trained_urls:
return __trained_urls[model.name]
else:
None
def is_model_in_model_types(model):
if model.name in __model_types:
return True
else:
return False
def get_model_name(model):
for x in __model_types:
if x in model.name:
return x
return None
def download_url(url, dst):
"""Downloads file from a url to a destination.
Args:
url (str): url to download file.
dst (str): destination path.
"""
from six.moves import urllib
print('* url="{}"'.format(url))
print('* destination="{}"'.format(dst))
def _reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024*duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write(
'\r...%d%%, %d MB, %d KB/s, %d seconds passed' %
(percent, progress_size / (1024*1024), speed, duration)
)
sys.stdout.flush()
urllib.request.urlretrieve(url, dst, _reporthook)
sys.stdout.write('\n')
def load_pretrained_weights(model, weight_path):
r"""Loads pretrianed weights to model.
Features::
- Incompatible layers (unmatched in name or size) will be ignored.
- Can automatically deal with keys containing "module.".
Args:
model (nn.Module): network model.
weight_path (str): path to pretrained weights.
Examples::
>>> from torchreid.utils import load_pretrained_weights
>>> weight_path = 'log/my_model/model-best.pth.tar'
>>> load_pretrained_weights(model, weight_path)
"""
checkpoint = torch.load(weight_path)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(weight_path)
)
else:
print(
'Successfully loaded pretrained weights from "{}"'.
format(weight_path)
)
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)

View file

@ -0,0 +1,237 @@
import torch.nn as nn
import torch
from pathlib import Path
import numpy as np
from itertools import islice
import torchvision.transforms as transforms
import cv2
import sys
import torchvision.transforms as T
from collections import OrderedDict, namedtuple
import gdown
from os.path import exists as file_exists
from ultralytics.yolo.utils.checks import check_requirements, check_version
from ultralytics.yolo.utils import LOGGER
from trackers.strongsort.deep.reid_model_factory import (show_downloadeable_models, get_model_url, get_model_name,
download_url, load_pretrained_weights)
from trackers.strongsort.deep.models import build_model
def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
class ReIDDetectMultiBackend(nn.Module):
# ReID models MultiBackend class for python inference on various backends
def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'), fp16=False):
super().__init__()
w = weights[0] if isinstance(weights, list) else weights
self.pt, self.jit, self.onnx, self.xml, self.engine, self.tflite = self.model_type(w) # get backend
self.fp16 = fp16
self.fp16 &= self.pt or self.jit or self.engine # FP16
# Build transform functions
self.device = device
self.image_size=(256, 128)
self.pixel_mean=[0.485, 0.456, 0.406]
self.pixel_std=[0.229, 0.224, 0.225]
self.transforms = []
self.transforms += [T.Resize(self.image_size)]
self.transforms += [T.ToTensor()]
self.transforms += [T.Normalize(mean=self.pixel_mean, std=self.pixel_std)]
self.preprocess = T.Compose(self.transforms)
self.to_pil = T.ToPILImage()
model_name = get_model_name(w)
if w.suffix == '.pt':
model_url = get_model_url(w)
if not file_exists(w) and model_url is not None:
gdown.download(model_url, str(w), quiet=False)
elif file_exists(w):
pass
else:
print(f'No URL associated to the chosen StrongSORT weights ({w}). Choose between:')
show_downloadeable_models()
exit()
# Build model
self.model = build_model(
model_name,
num_classes=1,
pretrained=not (w and w.is_file()),
use_gpu=device
)
if self.pt: # PyTorch
# populate model arch with weights
if w and w.is_file() and w.suffix == '.pt':
load_pretrained_weights(self.model, w)
self.model.to(device).eval()
self.model.half() if self.fp16 else self.model.float()
elif self.jit:
LOGGER.info(f'Loading {w} for TorchScript inference...')
self.model = torch.jit.load(w)
self.model.half() if self.fp16 else self.model.float()
elif self.onnx: # ONNX Runtime
LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
cuda = torch.cuda.is_available() and device.type != 'cpu'
#check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
import onnxruntime
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
self.session = onnxruntime.InferenceSession(str(w), providers=providers)
elif self.engine: # TensorRT
LOGGER.info(f'Loading {w} for TensorRT inference...')
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
if device.type == 'cpu':
device = torch.device('cuda:0')
Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
logger = trt.Logger(trt.Logger.INFO)
with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
self.model_ = runtime.deserialize_cuda_engine(f.read())
self.context = self.model_.create_execution_context()
self.bindings = OrderedDict()
self.fp16 = False # default updated below
dynamic = False
for index in range(self.model_.num_bindings):
name = self.model_.get_binding_name(index)
dtype = trt.nptype(self.model_.get_binding_dtype(index))
if self.model_.binding_is_input(index):
if -1 in tuple(self.model_.get_binding_shape(index)): # dynamic
dynamic = True
self.context.set_binding_shape(index, tuple(self.model_.get_profile_shape(0, index)[2]))
if dtype == np.float16:
self.fp16 = True
shape = tuple(self.context.get_binding_shape(index))
im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items())
batch_size = self.bindings['images'].shape[0] # if dynamic, this is instead max batch size
elif self.xml: # OpenVINO
LOGGER.info(f'Loading {w} for OpenVINO inference...')
check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
from openvino.runtime import Core, Layout, get_batch
ie = Core()
if not Path(w).is_file(): # if not *.xml
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
if network.get_parameters()[0].get_layout().empty:
network.get_parameters()[0].set_layout(Layout("NCWH"))
batch_dim = get_batch(network)
if batch_dim.is_static:
batch_size = batch_dim.get_length()
self.executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2
self.output_layer = next(iter(self.executable_network.outputs))
elif self.tflite:
LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
from tflite_runtime.interpreter import Interpreter, load_delegate
except ImportError:
import tensorflow as tf
Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
self.interpreter = tf.lite.Interpreter(model_path=w)
self.interpreter.allocate_tensors()
# Get input and output tensors.
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
# Test model on random input data.
input_data = np.array(np.random.random_sample((1,256,128,3)), dtype=np.float32)
self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
self.interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
output_data = self.interpreter.get_tensor(self.output_details[0]['index'])
else:
print('This model framework is not supported yet!')
exit()
@staticmethod
def model_type(p='path/to/model.pt'):
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
from trackers.reid_export import export_formats
sf = list(export_formats().Suffix) # export suffixes
check_suffix(p, sf) # checks
types = [s in Path(p).name for s in sf]
return types
def _preprocess(self, im_batch):
images = []
for element in im_batch:
image = self.to_pil(element)
image = self.preprocess(image)
images.append(image)
images = torch.stack(images, dim=0)
images = images.to(self.device)
return images
def forward(self, im_batch):
# preprocess batch
im_batch = self._preprocess(im_batch)
# batch to half
if self.fp16 and im_batch.dtype != torch.float16:
im_batch = im_batch.half()
# batch processing
features = []
if self.pt:
features = self.model(im_batch)
elif self.jit: # TorchScript
features = self.model(im_batch)
elif self.onnx: # ONNX Runtime
im_batch = im_batch.cpu().numpy() # torch to numpy
features = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im_batch})[0]
elif self.engine: # TensorRT
if True and im_batch.shape != self.bindings['images'].shape:
i_in, i_out = (self.model_.get_binding_index(x) for x in ('images', 'output'))
self.context.set_binding_shape(i_in, im_batch.shape) # reshape if dynamic
self.bindings['images'] = self.bindings['images']._replace(shape=im_batch.shape)
self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out)))
s = self.bindings['images'].shape
assert im_batch.shape == s, f"input size {im_batch.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
self.binding_addrs['images'] = int(im_batch.data_ptr())
self.context.execute_v2(list(self.binding_addrs.values()))
features = self.bindings['output'].data
elif self.xml: # OpenVINO
im_batch = im_batch.cpu().numpy() # FP32
features = self.executable_network([im_batch])[self.output_layer]
else:
print('Framework not supported at the moment, we are working on it...')
exit()
if isinstance(features, (list, tuple)):
return self.from_numpy(features[0]) if len(features) == 1 else [self.from_numpy(x) for x in features]
else:
return self.from_numpy(features)
def from_numpy(self, x):
return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
def warmup(self, imgsz=[(256, 128, 3)]):
# Warmup model by running inference once
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.tflite
if any(warmup_types) and self.device.type != 'cpu':
im = [np.empty(*imgsz).astype(np.uint8)] # input
for _ in range(2 if self.jit else 1): #
self.forward(im) # warmup

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.6 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.5 MiB

View file

@ -0,0 +1,58 @@
# vim: expandtab:ts=4:sw=4
import numpy as np
class Detection(object):
"""
This class represents a bounding box detection in a single image.
Parameters
----------
tlwh : array_like
Bounding box in format `(x, y, w, h)`.
confidence : float
Detector confidence score.
feature : array_like
A feature vector that describes the object contained in this image.
Attributes
----------
tlwh : ndarray
Bounding box in format `(top left x, top left y, width, height)`.
confidence : ndarray
Detector confidence score.
feature : ndarray | NoneType
A feature vector that describes the object contained in this image.
"""
def __init__(self, tlwh, confidence, feature):
self.tlwh = np.asarray(tlwh, dtype=np.float32)
self.confidence = float(confidence)
self.feature = np.asarray(feature.cpu(), dtype=np.float32)
def to_tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
def to_xyah(self):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = self.tlwh.copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def to_xyah_ext(bbox):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = bbox.copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret

View file

@ -0,0 +1,82 @@
# vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from . import linear_assignment
def iou(bbox, candidates):
"""Computer intersection over union.
Parameters
----------
bbox : ndarray
A bounding box in format `(top left x, top left y, width, height)`.
candidates : ndarray
A matrix of candidate bounding boxes (one per row) in the same format
as `bbox`.
Returns
-------
ndarray
The intersection over union in [0, 1] between the `bbox` and each
candidate. A higher score means a larger fraction of the `bbox` is
occluded by the candidate.
"""
bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
candidates_tl = candidates[:, :2]
candidates_br = candidates[:, :2] + candidates[:, 2:]
tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
wh = np.maximum(0., br - tl)
area_intersection = wh.prod(axis=1)
area_bbox = bbox[2:].prod()
area_candidates = candidates[:, 2:].prod(axis=1)
return area_intersection / (area_bbox + area_candidates - area_intersection)
def iou_cost(tracks, detections, track_indices=None,
detection_indices=None):
"""An intersection over union distance metric.
Parameters
----------
tracks : List[deep_sort.track.Track]
A list of tracks.
detections : List[deep_sort.detection.Detection]
A list of detections.
track_indices : Optional[List[int]]
A list of indices to tracks that should be matched. Defaults to
all `tracks`.
detection_indices : Optional[List[int]]
A list of indices to detections that should be matched. Defaults
to all `detections`.
Returns
-------
ndarray
Returns a cost matrix of shape
len(track_indices), len(detection_indices) where entry (i, j) is
`1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
for row, track_idx in enumerate(track_indices):
if tracks[track_idx].time_since_update > 1:
cost_matrix[row, :] = linear_assignment.INFTY_COST
continue
bbox = tracks[track_idx].to_tlwh()
candidates = np.asarray(
[detections[i].tlwh for i in detection_indices])
cost_matrix[row, :] = 1. - iou(bbox, candidates)
return cost_matrix

View file

@ -0,0 +1,214 @@
# vim: expandtab:ts=4:sw=4
import numpy as np
import scipy.linalg
"""
Table for the 0.95 quantile of the chi-square distribution with N degrees of
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
function and used as Mahalanobis gating threshold.
"""
chi2inv95 = {
1: 3.8415,
2: 5.9915,
3: 7.8147,
4: 9.4877,
5: 11.070,
6: 12.592,
7: 14.067,
8: 15.507,
9: 16.919}
class KalmanFilter(object):
"""
A simple Kalman filter for tracking bounding boxes in image space.
The 8-dimensional state space
x, y, a, h, vx, vy, va, vh
contains the bounding box center position (x, y), aspect ratio a, height h,
and their respective velocities.
Object motion follows a constant velocity model. The bounding box location
(x, y, a, h) is taken as direct observation of the state space (linear
observation model).
"""
def __init__(self):
ndim, dt = 4, 1.
# Create Kalman filter model matrices.
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
for i in range(ndim):
self._motion_mat[i, ndim + i] = dt
self._update_mat = np.eye(ndim, 2 * ndim)
# Motion and observation uncertainty are chosen relative to the current
# state estimate. These weights control the amount of uncertainty in
# the model. This is a bit hacky.
self._std_weight_position = 1. / 20
self._std_weight_velocity = 1. / 160
def initiate(self, measurement):
"""Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
Bounding box coordinates (x, y, a, h) with center position (x, y),
aspect ratio a, and height h.
Returns
-------
(ndarray, ndarray)
Returns the mean vector (8 dimensional) and covariance matrix (8x8
dimensional) of the new track. Unobserved velocities are initialized
to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
2 * self._std_weight_position * measurement[0], # the center point x
2 * self._std_weight_position * measurement[1], # the center point y
1 * measurement[2], # the ratio of width/height
2 * self._std_weight_position * measurement[3], # the height
10 * self._std_weight_velocity * measurement[0],
10 * self._std_weight_velocity * measurement[1],
0.1 * measurement[2],
10 * self._std_weight_velocity * measurement[3]]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean, covariance):
"""Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The 8 dimensional mean vector of the object state at the previous
time step.
covariance : ndarray
The 8x8 dimensional covariance matrix of the object state at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[0],
self._std_weight_position * mean[1],
1 * mean[2],
self._std_weight_position * mean[3]]
std_vel = [
self._std_weight_velocity * mean[0],
self._std_weight_velocity * mean[1],
0.1 * mean[2],
self._std_weight_velocity * mean[3]]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
mean = np.dot(self._motion_mat, mean)
covariance = np.linalg.multi_dot((
self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean, covariance, confidence=.0):
"""Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector (8 dimensional array).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
confidence: (dyh) 检测框置信度
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state
estimate.
"""
std = [
self._std_weight_position * mean[3],
self._std_weight_position * mean[3],
1e-1,
self._std_weight_position * mean[3]]
std = [(1 - confidence) * x for x in std]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((
self._update_mat, covariance, self._update_mat.T))
return mean, covariance + innovation_cov
def update(self, mean, covariance, measurement, confidence=.0):
"""Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, a, h), where (x, y)
is the center position, a the aspect ratio, and h the height of the
bounding box.
confidence: (dyh)检测框置信度
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
projected_mean, projected_cov = self.project(mean, covariance, confidence)
chol_factor, lower = scipy.linalg.cho_factor(
projected_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve(
(chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
check_finite=False).T
innovation = measurement - projected_mean
new_mean = mean + np.dot(innovation, kalman_gain.T)
new_covariance = covariance - np.linalg.multi_dot((
kalman_gain, projected_cov, kalman_gain.T))
return new_mean, new_covariance
def gating_distance(self, mean, covariance, measurements,
only_position=False):
"""Compute gating distance between state distribution and measurements.
A suitable distance threshold can be obtained from `chi2inv95`. If
`only_position` is False, the chi-square distribution has 4 degrees of
freedom, otherwise 2.
Parameters
----------
mean : ndarray
Mean vector over the state distribution (8 dimensional).
covariance : ndarray
Covariance of the state distribution (8x8 dimensional).
measurements : ndarray
An Nx4 dimensional matrix of N measurements, each in
format (x, y, a, h) where (x, y) is the bounding box center
position, a the aspect ratio, and h the height.
only_position : Optional[bool]
If True, distance computation is done with respect to the bounding
box center position only.
Returns
-------
ndarray
Returns an array of length N, where the i-th element contains the
squared Mahalanobis distance between (mean, covariance) and
`measurements[i]`.
"""
mean, covariance = self.project(mean, covariance)
if only_position:
mean, covariance = mean[:2], covariance[:2, :2]
measurements = measurements[:, :2]
cholesky_factor = np.linalg.cholesky(covariance)
d = measurements - mean
z = scipy.linalg.solve_triangular(
cholesky_factor, d.T, lower=True, check_finite=False,
overwrite_b=True)
squared_maha = np.sum(z * z, axis=0)
return squared_maha

View file

@ -0,0 +1,174 @@
# vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from scipy.optimize import linear_sum_assignment
from . import kalman_filter
INFTY_COST = 1e+5
def min_cost_matching(
distance_metric, max_distance, tracks, detections, track_indices=None,
detection_indices=None):
"""Solve linear assignment problem.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection_indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
if len(detection_indices) == 0 or len(track_indices) == 0:
return [], track_indices, detection_indices # Nothing to match.
cost_matrix = distance_metric(
tracks, detections, track_indices, detection_indices)
cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
row_indices, col_indices = linear_sum_assignment(cost_matrix)
matches, unmatched_tracks, unmatched_detections = [], [], []
for col, detection_idx in enumerate(detection_indices):
if col not in col_indices:
unmatched_detections.append(detection_idx)
for row, track_idx in enumerate(track_indices):
if row not in row_indices:
unmatched_tracks.append(track_idx)
for row, col in zip(row_indices, col_indices):
track_idx = track_indices[row]
detection_idx = detection_indices[col]
if cost_matrix[row, col] > max_distance:
unmatched_tracks.append(track_idx)
unmatched_detections.append(detection_idx)
else:
matches.append((track_idx, detection_idx))
return matches, unmatched_tracks, unmatched_detections
def matching_cascade(
distance_metric, max_distance, cascade_depth, tracks, detections,
track_indices=None, detection_indices=None):
"""Run matching cascade.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
cascade_depth: int
The cascade depth, should be se to the maximum track age.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : Optional[List[int]]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above). Defaults to all tracks.
detection_indices : Optional[List[int]]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above). Defaults to all
detections.
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = list(range(len(tracks)))
if detection_indices is None:
detection_indices = list(range(len(detections)))
unmatched_detections = detection_indices
matches = []
track_indices_l = [
k for k in track_indices
# if tracks[k].time_since_update == 1 + level
]
matches_l, _, unmatched_detections = \
min_cost_matching(
distance_metric, max_distance, tracks, detections,
track_indices_l, unmatched_detections)
matches += matches_l
unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
return matches, unmatched_tracks, unmatched_detections
def gate_cost_matrix(
cost_matrix, tracks, detections, track_indices, detection_indices, mc_lambda,
gated_cost=INFTY_COST, only_position=False):
"""Invalidate infeasible entries in cost matrix based on the state
distributions obtained by Kalman filtering.
Parameters
----------
kf : The Kalman filter.
cost_matrix : ndarray
The NxM dimensional cost matrix, where N is the number of track indices
and M is the number of detection indices, such that entry (i, j) is the
association cost between `tracks[track_indices[i]]` and
`detections[detection_indices[j]]`.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
gated_cost : Optional[float]
Entries in the cost matrix corresponding to infeasible associations are
set this value. Defaults to a very large value.
only_position : Optional[bool]
If True, only the x, y position of the state distribution is considered
during gating. Defaults to False.
Returns
-------
ndarray
Returns the modified cost matrix.
"""
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray(
[detections[i].to_xyah() for i in detection_indices])
for row, track_idx in enumerate(track_indices):
track = tracks[track_idx]
gating_distance = track.kf.gating_distance(track.mean, track.covariance, measurements, only_position)
cost_matrix[row, gating_distance > gating_threshold] = gated_cost
cost_matrix[row] = mc_lambda * cost_matrix[row] + (1 - mc_lambda) * gating_distance
return cost_matrix

View file

@ -0,0 +1,162 @@
# vim: expandtab:ts=4:sw=4
import numpy as np
import sys
import torch
def _pdist(a, b):
"""Compute pair-wise squared distance between points in `a` and `b`.
Parameters
----------
a : array_like
An NxM matrix of N samples of dimensionality M.
b : array_like
An LxM matrix of L samples of dimensionality M.
Returns
-------
ndarray
Returns a matrix of size len(a), len(b) such that eleement (i, j)
contains the squared distance between `a[i]` and `b[j]`.
"""
a, b = np.asarray(a), np.asarray(b)
if len(a) == 0 or len(b) == 0:
return np.zeros((len(a), len(b)))
a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)
r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]
r2 = np.clip(r2, 0., float(np.inf))
return r2
def _cosine_distance(a, b, data_is_normalized=False):
"""Compute pair-wise cosine distance between points in `a` and `b`.
Parameters
----------
a : array_like
An NxM matrix of N samples of dimensionality M.
b : array_like
An LxM matrix of L samples of dimensionality M.
data_is_normalized : Optional[bool]
If True, assumes rows in a and b are unit length vectors.
Otherwise, a and b are explicitly normalized to lenght 1.
Returns
-------
ndarray
Returns a matrix of size len(a), len(b) such that eleement (i, j)
contains the squared distance between `a[i]` and `b[j]`.
"""
if not data_is_normalized:
a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
return 1. - np.dot(a, b.T)
def _nn_euclidean_distance(x, y):
""" Helper function for nearest neighbor distance metric (Euclidean).
Parameters
----------
x : ndarray
A matrix of N row-vectors (sample points).
y : ndarray
A matrix of M row-vectors (query points).
Returns
-------
ndarray
A vector of length M that contains for each entry in `y` the
smallest Euclidean distance to a sample in `x`.
"""
# x_ = torch.from_numpy(np.asarray(x) / np.linalg.norm(x, axis=1, keepdims=True))
# y_ = torch.from_numpy(np.asarray(y) / np.linalg.norm(y, axis=1, keepdims=True))
distances = distances = _pdist(x, y)
return np.maximum(0.0, torch.min(distances, axis=0)[0].numpy())
def _nn_cosine_distance(x, y):
""" Helper function for nearest neighbor distance metric (cosine).
Parameters
----------
x : ndarray
A matrix of N row-vectors (sample points).
y : ndarray
A matrix of M row-vectors (query points).
Returns
-------
ndarray
A vector of length M that contains for each entry in `y` the
smallest cosine distance to a sample in `x`.
"""
x_ = torch.from_numpy(np.asarray(x))
y_ = torch.from_numpy(np.asarray(y))
distances = _cosine_distance(x_, y_)
distances = distances
return distances.min(axis=0)
class NearestNeighborDistanceMetric(object):
"""
A nearest neighbor distance metric that, for each target, returns
the closest distance to any sample that has been observed so far.
Parameters
----------
metric : str
Either "euclidean" or "cosine".
matching_threshold: float
The matching threshold. Samples with larger distance are considered an
invalid match.
budget : Optional[int]
If not None, fix samples per class to at most this number. Removes
the oldest samples when the budget is reached.
Attributes
----------
samples : Dict[int -> List[ndarray]]
A dictionary that maps from target identities to the list of samples
that have been observed so far.
"""
def __init__(self, metric, matching_threshold, budget=None):
if metric == "euclidean":
self._metric = _nn_euclidean_distance
elif metric == "cosine":
self._metric = _nn_cosine_distance
else:
raise ValueError(
"Invalid metric; must be either 'euclidean' or 'cosine'")
self.matching_threshold = matching_threshold
self.budget = budget
self.samples = {}
def partial_fit(self, features, targets, active_targets):
"""Update the distance metric with new data.
Parameters
----------
features : ndarray
An NxM matrix of N features of dimensionality M.
targets : ndarray
An integer array of associated target identities.
active_targets : List[int]
A list of targets that are currently present in the scene.
"""
for feature, target in zip(features, targets):
self.samples.setdefault(target, []).append(feature)
if self.budget is not None:
self.samples[target] = self.samples[target][-self.budget:]
self.samples = {k: self.samples[k] for k in active_targets}
def distance(self, features, targets):
"""Compute distance between features and targets.
Parameters
----------
features : ndarray
An NxM matrix of N features of dimensionality M.
targets : List[int]
A list of targets to match the given `features` against.
Returns
-------
ndarray
Returns a cost matrix of shape len(targets), len(features), where
element (i, j) contains the closest squared distance between
`targets[i]` and `features[j]`.
"""
cost_matrix = np.zeros((len(targets), len(features)))
for i, target in enumerate(targets):
cost_matrix[i, :] = self._metric(self.samples[target], features)
return cost_matrix

View file

@ -0,0 +1,73 @@
# vim: expandtab:ts=4:sw=4
import numpy as np
import cv2
def non_max_suppression(boxes, max_bbox_overlap, scores=None):
"""Suppress overlapping detections.
Original code from [1]_ has been adapted to include confidence score.
.. [1] http://www.pyimagesearch.com/2015/02/16/
faster-non-maximum-suppression-python/
Examples
--------
>>> boxes = [d.roi for d in detections]
>>> scores = [d.confidence for d in detections]
>>> indices = non_max_suppression(boxes, max_bbox_overlap, scores)
>>> detections = [detections[i] for i in indices]
Parameters
----------
boxes : ndarray
Array of ROIs (x, y, width, height).
max_bbox_overlap : float
ROIs that overlap more than this values are suppressed.
scores : Optional[array_like]
Detector confidence score.
Returns
-------
List[int]
Returns indices of detections that have survived non-maxima suppression.
"""
if len(boxes) == 0:
return []
boxes = boxes.astype(np.float)
pick = []
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2] + boxes[:, 0]
y2 = boxes[:, 3] + boxes[:, 1]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
if scores is not None:
idxs = np.argsort(scores)
else:
idxs = np.argsort(y2)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap = (w * h) / area[idxs[:last]]
idxs = np.delete(
idxs, np.concatenate(
([last], np.where(overlap > max_bbox_overlap)[0])))
return pick

View file

@ -0,0 +1,317 @@
# vim: expandtab:ts=4:sw=4
import cv2
import numpy as np
from trackers.strongsort.sort.kalman_filter import KalmanFilter
from collections import deque
class TrackState:
"""
Enumeration type for the single target track state. Newly created tracks are
classified as `tentative` until enough evidence has been collected. Then,
the track state is changed to `confirmed`. Tracks that are no longer alive
are classified as `deleted` to mark them for removal from the set of active
tracks.
"""
Tentative = 1
Confirmed = 2
Deleted = 3
class Track:
"""
A single target track with state space `(x, y, a, h)` and associated
velocities, where `(x, y)` is the center of the bounding box, `a` is the
aspect ratio and `h` is the height.
Parameters
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
track_id : int
A unique track identifier.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
max_age : int
The maximum number of consecutive misses before the track state is
set to `Deleted`.
feature : Optional[ndarray]
Feature vector of the detection this track originates from. If not None,
this feature is added to the `features` cache.
Attributes
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
track_id : int
A unique track identifier.
hits : int
Total number of measurement updates.
age : int
Total number of frames since first occurance.
time_since_update : int
Total number of frames since last measurement update.
state : TrackState
The current track state.
features : List[ndarray]
A cache of features. On each measurement update, the associated feature
vector is added to this list.
"""
def __init__(self, detection, track_id, class_id, conf, n_init, max_age, ema_alpha,
feature=None):
self.track_id = track_id
self.class_id = int(class_id)
self.hits = 1
self.age = 1
self.time_since_update = 0
self.max_num_updates_wo_assignment = 7
self.updates_wo_assignment = 0
self.ema_alpha = ema_alpha
self.state = TrackState.Tentative
self.features = []
if feature is not None:
feature /= np.linalg.norm(feature)
self.features.append(feature)
self.conf = conf
self._n_init = n_init
self._max_age = max_age
self.kf = KalmanFilter()
self.mean, self.covariance = self.kf.initiate(detection)
# Initializing trajectory queue
self.q = deque(maxlen=25)
def to_tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
Returns
-------
ndarray
The bounding box.
"""
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
def to_tlbr(self):
"""Get kf estimated current position in bounding box format `(min x, miny, max x,
max y)`.
Returns
-------
ndarray
The predicted kf bounding box.
"""
ret = self.to_tlwh()
ret[2:] = ret[:2] + ret[2:]
return ret
def ECC(self, src, dst, warp_mode = cv2.MOTION_EUCLIDEAN, eps = 1e-5,
max_iter = 100, scale = 0.1, align = False):
"""Compute the warp matrix from src to dst.
Parameters
----------
src : ndarray
An NxM matrix of source img(BGR or Gray), it must be the same format as dst.
dst : ndarray
An NxM matrix of target img(BGR or Gray).
warp_mode: flags of opencv
translation: cv2.MOTION_TRANSLATION
rotated and shifted: cv2.MOTION_EUCLIDEAN
affine(shift,rotated,shear): cv2.MOTION_AFFINE
homography(3d): cv2.MOTION_HOMOGRAPHY
eps: float
the threshold of the increment in the correlation coefficient between two iterations
max_iter: int
the number of iterations.
scale: float or [int, int]
scale_ratio: float
scale_size: [W, H]
align: bool
whether to warp affine or perspective transforms to the source image
Returns
-------
warp matrix : ndarray
Returns the warp matrix from src to dst.
if motion models is homography, the warp matrix will be 3x3, otherwise 2x3
src_aligned: ndarray
aligned source image of gray
"""
# BGR2GRAY
if src.ndim == 3:
# Convert images to grayscale
src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
# make the imgs smaller to speed up
if scale is not None:
if isinstance(scale, float) or isinstance(scale, int):
if scale != 1:
src_r = cv2.resize(src, (0, 0), fx = scale, fy = scale,interpolation = cv2.INTER_LINEAR)
dst_r = cv2.resize(dst, (0, 0), fx = scale, fy = scale,interpolation = cv2.INTER_LINEAR)
scale = [scale, scale]
else:
src_r, dst_r = src, dst
scale = None
else:
if scale[0] != src.shape[1] and scale[1] != src.shape[0]:
src_r = cv2.resize(src, (scale[0], scale[1]), interpolation = cv2.INTER_LINEAR)
dst_r = cv2.resize(dst, (scale[0], scale[1]), interpolation=cv2.INTER_LINEAR)
scale = [scale[0] / src.shape[1], scale[1] / src.shape[0]]
else:
src_r, dst_r = src, dst
scale = None
else:
src_r, dst_r = src, dst
# Define 2x3 or 3x3 matrices and initialize the matrix to identity
if warp_mode == cv2.MOTION_HOMOGRAPHY :
warp_matrix = np.eye(3, 3, dtype=np.float32)
else :
warp_matrix = np.eye(2, 3, dtype=np.float32)
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, max_iter, eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
try:
(cc, warp_matrix) = cv2.findTransformECC (src_r, dst_r, warp_matrix, warp_mode, criteria, None, 1)
except cv2.error as e:
print('ecc transform failed')
return None, None
if scale is not None:
warp_matrix[0, 2] = warp_matrix[0, 2] / scale[0]
warp_matrix[1, 2] = warp_matrix[1, 2] / scale[1]
if align:
sz = src.shape
if warp_mode == cv2.MOTION_HOMOGRAPHY:
# Use warpPerspective for Homography
src_aligned = cv2.warpPerspective(src, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR)
else :
# Use warpAffine for Translation, Euclidean and Affine
src_aligned = cv2.warpAffine(src, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR)
return warp_matrix, src_aligned
else:
return warp_matrix, None
def get_matrix(self, matrix):
eye = np.eye(3)
dist = np.linalg.norm(eye - matrix)
if dist < 100:
return matrix
else:
return eye
def camera_update(self, previous_frame, next_frame):
warp_matrix, src_aligned = self.ECC(previous_frame, next_frame)
if warp_matrix is None and src_aligned is None:
return
[a,b] = warp_matrix
warp_matrix=np.array([a,b,[0,0,1]])
warp_matrix = warp_matrix.tolist()
matrix = self.get_matrix(warp_matrix)
x1, y1, x2, y2 = self.to_tlbr()
x1_, y1_, _ = matrix @ np.array([x1, y1, 1]).T
x2_, y2_, _ = matrix @ np.array([x2, y2, 1]).T
w, h = x2_ - x1_, y2_ - y1_
cx, cy = x1_ + w / 2, y1_ + h / 2
self.mean[:4] = [cx, cy, w / h, h]
def increment_age(self):
self.age += 1
self.time_since_update += 1
def predict(self, kf):
"""Propagate the state distribution to the current time step using a
Kalman filter prediction step.
Parameters
----------
kf : kalman_filter.KalmanFilter
The Kalman filter.
"""
self.mean, self.covariance = self.kf.predict(self.mean, self.covariance)
self.age += 1
self.time_since_update += 1
def update_kf(self, bbox, confidence=0.5):
self.updates_wo_assignment = self.updates_wo_assignment + 1
self.mean, self.covariance = self.kf.update(self.mean, self.covariance, bbox, confidence)
tlbr = self.to_tlbr()
x_c = int((tlbr[0] + tlbr[2]) / 2)
y_c = int((tlbr[1] + tlbr[3]) / 2)
self.q.append(('predupdate', (x_c, y_c)))
def update(self, detection, class_id, conf):
"""Perform Kalman filter measurement update step and update the feature
cache.
Parameters
----------
detection : Detection
The associated detection.
"""
self.conf = conf
self.class_id = class_id.int()
self.mean, self.covariance = self.kf.update(self.mean, self.covariance, detection.to_xyah(), detection.confidence)
feature = detection.feature / np.linalg.norm(detection.feature)
smooth_feat = self.ema_alpha * self.features[-1] + (1 - self.ema_alpha) * feature
smooth_feat /= np.linalg.norm(smooth_feat)
self.features = [smooth_feat]
self.hits += 1
self.time_since_update = 0
if self.state == TrackState.Tentative and self.hits >= self._n_init:
self.state = TrackState.Confirmed
tlbr = self.to_tlbr()
x_c = int((tlbr[0] + tlbr[2]) / 2)
y_c = int((tlbr[1] + tlbr[3]) / 2)
self.q.append(('observationupdate', (x_c, y_c)))
def mark_missed(self):
"""Mark this track as missed (no association at the current time step).
"""
if self.state == TrackState.Tentative:
self.state = TrackState.Deleted
elif self.time_since_update > self._max_age:
self.state = TrackState.Deleted
def is_tentative(self):
"""Returns True if this track is tentative (unconfirmed).
"""
return self.state == TrackState.Tentative
def is_confirmed(self):
"""Returns True if this track is confirmed."""
return self.state == TrackState.Confirmed
def is_deleted(self):
"""Returns True if this track is dead and should be deleted."""
return self.state == TrackState.Deleted

View file

@ -0,0 +1,192 @@
# vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from . import kalman_filter
from . import linear_assignment
from . import iou_matching
from . import detection
from .track import Track
class Tracker:
"""
This is the multi-target tracker.
Parameters
----------
metric : nn_matching.NearestNeighborDistanceMetric
A distance metric for measurement-to-track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
Attributes
----------
metric : nn_matching.NearestNeighborDistanceMetric
The distance metric used for measurement to track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of frames that a track remains in initialization phase.
kf : kalman_filter.KalmanFilter
A Kalman filter to filter target trajectories in image space.
tracks : List[Track]
The list of active tracks at the current time step.
"""
GATING_THRESHOLD = np.sqrt(kalman_filter.chi2inv95[4])
def __init__(self, metric, max_iou_dist=0.9, max_age=30, max_unmatched_preds=7, n_init=3, _lambda=0, ema_alpha=0.9, mc_lambda=0.995):
self.metric = metric
self.max_iou_dist = max_iou_dist
self.max_age = max_age
self.n_init = n_init
self._lambda = _lambda
self.ema_alpha = ema_alpha
self.mc_lambda = mc_lambda
self.max_unmatched_preds = max_unmatched_preds
self.kf = kalman_filter.KalmanFilter()
self.tracks = []
self._next_id = 1
def predict(self):
"""Propagate track state distributions one time step forward.
This function should be called once every time step, before `update`.
"""
for track in self.tracks:
track.predict(self.kf)
def increment_ages(self):
for track in self.tracks:
track.increment_age()
track.mark_missed()
def camera_update(self, previous_img, current_img):
for track in self.tracks:
track.camera_update(previous_img, current_img)
def pred_n_update_all_tracks(self):
"""Perform predictions and updates for all tracks by its own predicted state.
"""
self.predict()
for t in self.tracks:
if self.max_unmatched_preds != 0 and t.updates_wo_assignment < t.max_num_updates_wo_assignment:
bbox = t.to_tlwh()
t.update_kf(detection.to_xyah_ext(bbox))
def update(self, detections, classes, confidences):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
# Run matching cascade.
matches, unmatched_tracks, unmatched_detections = \
self._match(detections)
# Update track set.
for track_idx, detection_idx in matches:
self.tracks[track_idx].update(
detections[detection_idx], classes[detection_idx], confidences[detection_idx])
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
if self.max_unmatched_preds != 0 and self.tracks[track_idx].updates_wo_assignment < self.tracks[track_idx].max_num_updates_wo_assignment:
bbox = self.tracks[track_idx].to_tlwh()
self.tracks[track_idx].update_kf(detection.to_xyah_ext(bbox))
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx], classes[detection_idx].item(), confidences[detection_idx].item())
self.tracks = [t for t in self.tracks if not t.is_deleted()]
# Update distance metric.
active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
features, targets = [], []
for track in self.tracks:
if not track.is_confirmed():
continue
features += track.features
targets += [track.track_id for _ in track.features]
self.metric.partial_fit(np.asarray(features), np.asarray(targets), active_targets)
def _full_cost_metric(self, tracks, dets, track_indices, detection_indices):
"""
This implements the full lambda-based cost-metric. However, in doing so, it disregards
the possibility to gate the position only which is provided by
linear_assignment.gate_cost_matrix(). Instead, I gate by everything.
Note that the Mahalanobis distance is itself an unnormalised metric. Given the cosine
distance being normalised, we employ a quick and dirty normalisation based on the
threshold: that is, we divide the positional-cost by the gating threshold, thus ensuring
that the valid values range 0-1.
Note also that the authors work with the squared distance. I also sqrt this, so that it
is more intuitive in terms of values.
"""
# Compute First the Position-based Cost Matrix
pos_cost = np.empty([len(track_indices), len(detection_indices)])
msrs = np.asarray([dets[i].to_xyah() for i in detection_indices])
for row, track_idx in enumerate(track_indices):
pos_cost[row, :] = np.sqrt(
self.kf.gating_distance(
tracks[track_idx].mean, tracks[track_idx].covariance, msrs, False
)
) / self.GATING_THRESHOLD
pos_gate = pos_cost > 1.0
# Now Compute the Appearance-based Cost Matrix
app_cost = self.metric.distance(
np.array([dets[i].feature for i in detection_indices]),
np.array([tracks[i].track_id for i in track_indices]),
)
app_gate = app_cost > self.metric.matching_threshold
# Now combine and threshold
cost_matrix = self._lambda * pos_cost + (1 - self._lambda) * app_cost
cost_matrix[np.logical_or(pos_gate, app_gate)] = linear_assignment.INFTY_COST
# Return Matrix
return cost_matrix
def _match(self, detections):
def gated_metric(tracks, dets, track_indices, detection_indices):
features = np.array([dets[i].feature for i in detection_indices])
targets = np.array([tracks[i].track_id for i in track_indices])
cost_matrix = self.metric.distance(features, targets)
cost_matrix = linear_assignment.gate_cost_matrix(cost_matrix, tracks, dets, track_indices, detection_indices, self.mc_lambda)
return cost_matrix
# Split track set into confirmed and unconfirmed tracks.
confirmed_tracks = [
i for i, t in enumerate(self.tracks) if t.is_confirmed()]
unconfirmed_tracks = [
i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
# Associate confirmed tracks using appearance features.
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, self.max_age,
self.tracks, detections, confirmed_tracks)
# Associate remaining tracks together with unconfirmed tracks using IOU.
iou_track_candidates = unconfirmed_tracks + [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update == 1]
unmatched_tracks_a = [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update != 1]
matches_b, unmatched_tracks_b, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_dist, self.tracks,
detections, iou_track_candidates, unmatched_detections)
matches = matches_a + matches_b
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
return matches, unmatched_tracks, unmatched_detections
def _initiate_track(self, detection, class_id, conf):
self.tracks.append(Track(
detection.to_xyah(), self._next_id, class_id, conf, self.n_init, self.max_age, self.ema_alpha,
detection.feature))
self._next_id += 1

View file

@ -0,0 +1,151 @@
import numpy as np
import torch
import sys
import cv2
import gdown
from os.path import exists as file_exists, join
import torchvision.transforms as transforms
from sort.nn_matching import NearestNeighborDistanceMetric
from sort.detection import Detection
from sort.tracker import Tracker
from reid_multibackend import ReIDDetectMultiBackend
from ultralytics.yolo.utils.ops import xyxy2xywh
class StrongSORT(object):
def __init__(self,
model_weights,
device,
fp16,
max_dist=0.2,
max_iou_dist=0.7,
max_age=70,
max_unmatched_preds=7,
n_init=3,
nn_budget=100,
mc_lambda=0.995,
ema_alpha=0.9
):
self.model = ReIDDetectMultiBackend(weights=model_weights, device=device, fp16=fp16)
self.max_dist = max_dist
metric = NearestNeighborDistanceMetric(
"cosine", self.max_dist, nn_budget)
self.tracker = Tracker(
metric, max_iou_dist=max_iou_dist, max_age=max_age, n_init=n_init, max_unmatched_preds=max_unmatched_preds, mc_lambda=mc_lambda, ema_alpha=ema_alpha)
def update(self, dets, ori_img):
xyxys = dets[:, 0:4]
confs = dets[:, 4]
clss = dets[:, 5]
classes = clss.numpy()
xywhs = xyxy2xywh(xyxys.numpy())
confs = confs.numpy()
self.height, self.width = ori_img.shape[:2]
# generate detections
features = self._get_features(xywhs, ori_img)
bbox_tlwh = self._xywh_to_tlwh(xywhs)
detections = [Detection(bbox_tlwh[i], conf, features[i]) for i, conf in enumerate(
confs)]
# run on non-maximum supression
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
# update tracker
self.tracker.predict()
self.tracker.update(detections, clss, confs)
# output bbox identities
outputs = []
for track in self.tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
box = track.to_tlwh()
x1, y1, x2, y2 = self._tlwh_to_xyxy(box)
track_id = track.track_id
class_id = track.class_id
conf = track.conf
queue = track.q
outputs.append(np.array([x1, y1, x2, y2, track_id, class_id, conf, queue], dtype=object))
if len(outputs) > 0:
outputs = np.stack(outputs, axis=0)
return outputs
"""
TODO:
Convert bbox from xc_yc_w_h to xtl_ytl_w_h
Thanks JieChen91@github.com for reporting this bug!
"""
@staticmethod
def _xywh_to_tlwh(bbox_xywh):
if isinstance(bbox_xywh, np.ndarray):
bbox_tlwh = bbox_xywh.copy()
elif isinstance(bbox_xywh, torch.Tensor):
bbox_tlwh = bbox_xywh.clone()
bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2.
bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2.
return bbox_tlwh
def _xywh_to_xyxy(self, bbox_xywh):
x, y, w, h = bbox_xywh
x1 = max(int(x - w / 2), 0)
x2 = min(int(x + w / 2), self.width - 1)
y1 = max(int(y - h / 2), 0)
y2 = min(int(y + h / 2), self.height - 1)
return x1, y1, x2, y2
def _tlwh_to_xyxy(self, bbox_tlwh):
"""
TODO:
Convert bbox from xtl_ytl_w_h to xc_yc_w_h
Thanks JieChen91@github.com for reporting this bug!
"""
x, y, w, h = bbox_tlwh
x1 = max(int(x), 0)
x2 = min(int(x+w), self.width - 1)
y1 = max(int(y), 0)
y2 = min(int(y+h), self.height - 1)
return x1, y1, x2, y2
def increment_ages(self):
self.tracker.increment_ages()
def _xyxy_to_tlwh(self, bbox_xyxy):
x1, y1, x2, y2 = bbox_xyxy
t = x1
l = y1
w = int(x2 - x1)
h = int(y2 - y1)
return t, l, w, h
def _get_features(self, bbox_xywh, ori_img):
im_crops = []
for box in bbox_xywh:
x1, y1, x2, y2 = self._xywh_to_xyxy(box)
im = ori_img[y1:y2, x1:x2]
im_crops.append(im)
if im_crops:
features = self.model(im_crops)
else:
features = np.array([])
return features
def trajectory(self, im0, q, color):
# Add rectangle to image (PIL-only)
for i, p in enumerate(q):
thickness = int(np.sqrt(float (i + 1)) * 1.5)
if p[0] == 'observationupdate':
cv2.circle(im0, p[1], 2, color=color, thickness=thickness)
else:
cv2.circle(im0, p[1], 2, color=(255,255,255), thickness=thickness)

View file

@ -0,0 +1,13 @@
from os import environ
def assert_in(file, files_to_check):
if file not in files_to_check:
raise AssertionError("{} does not exist in the list".format(str(file)))
return True
def assert_in_env(check_list: list):
for item in check_list:
assert_in(item, environ.keys())
return True

View file

@ -0,0 +1,36 @@
import numpy as np
import cv2
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
def compute_color_for_labels(label):
"""
Simple function that adds fixed color depending on the class
"""
color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
return tuple(color)
def draw_boxes(img, bbox, identities=None, offset=(0,0)):
for i,box in enumerate(bbox):
x1,y1,x2,y2 = [int(i) for i in box]
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
# box text and bar
id = int(identities[i]) if identities is not None else 0
color = compute_color_for_labels(id)
label = '{}{:d}'.format("", id)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0]
cv2.rectangle(img,(x1, y1),(x2,y2),color,3)
cv2.rectangle(img,(x1, y1),(x1+t_size[0]+3,y1+t_size[1]+4), color,-1)
cv2.putText(img,label,(x1,y1+t_size[1]+4), cv2.FONT_HERSHEY_PLAIN, 2, [255,255,255], 2)
return img
if __name__ == '__main__':
for i in range(82):
print(compute_color_for_labels(i))

View file

@ -0,0 +1,103 @@
import os
import numpy as np
import copy
import motmetrics as mm
mm.lap.default_solver = 'lap'
from utils.io import read_results, unzip_objs
class Evaluator(object):
def __init__(self, data_root, seq_name, data_type):
self.data_root = data_root
self.seq_name = seq_name
self.data_type = data_type
self.load_annotations()
self.reset_accumulator()
def load_annotations(self):
assert self.data_type == 'mot'
gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
def reset_accumulator(self):
self.acc = mm.MOTAccumulator(auto_id=True)
def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
# results
trk_tlwhs = np.copy(trk_tlwhs)
trk_ids = np.copy(trk_ids)
# gts
gt_objs = self.gt_frame_dict.get(frame_id, [])
gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
# ignore boxes
ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
ignore_tlwhs = unzip_objs(ignore_objs)[0]
# remove ignored results
keep = np.ones(len(trk_tlwhs), dtype=bool)
iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
if len(iou_distance) > 0:
match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
match_ious = iou_distance[match_is, match_js]
match_js = np.asarray(match_js, dtype=int)
match_js = match_js[np.logical_not(np.isnan(match_ious))]
keep[match_js] = False
trk_tlwhs = trk_tlwhs[keep]
trk_ids = trk_ids[keep]
# get distance matrix
iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
# acc
self.acc.update(gt_ids, trk_ids, iou_distance)
if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):
events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
else:
events = None
return events
def eval_file(self, filename):
self.reset_accumulator()
result_frame_dict = read_results(filename, self.data_type, is_gt=False)
frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))
for frame_id in frames:
trk_objs = result_frame_dict.get(frame_id, [])
trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
return self.acc
@staticmethod
def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
names = copy.deepcopy(names)
if metrics is None:
metrics = mm.metrics.motchallenge_metrics
metrics = copy.deepcopy(metrics)
mh = mm.metrics.create()
summary = mh.compute_many(
accs,
metrics=metrics,
names=names,
generate_overall=True
)
return summary
@staticmethod
def save_summary(summary, filename):
import pandas as pd
writer = pd.ExcelWriter(filename)
summary.to_excel(writer)
writer.save()

View file

@ -0,0 +1,133 @@
import os
from typing import Dict
import numpy as np
# from utils.log import get_logger
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
# def write_results(filename, results_dict: Dict, data_type: str):
# if not filename:
# return
# path = os.path.dirname(filename)
# if not os.path.exists(path):
# os.makedirs(path)
# if data_type in ('mot', 'mcmot', 'lab'):
# save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
# elif data_type == 'kitti':
# save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
# else:
# raise ValueError(data_type)
# with open(filename, 'w') as f:
# for frame_id, frame_data in results_dict.items():
# if data_type == 'kitti':
# frame_id -= 1
# for tlwh, track_id in frame_data:
# if track_id < 0:
# continue
# x1, y1, w, h = tlwh
# x2, y2 = x1 + w, y1 + h
# line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)
# f.write(line)
# logger.info('Save results to {}'.format(filename))
def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
if data_type in ('mot', 'lab'):
read_fun = read_mot_results
else:
raise ValueError('Unknown data type: {}'.format(data_type))
return read_fun(filename, is_gt, is_ignore)
"""
labels={'ped', ... % 1
'person_on_vhcl', ... % 2
'car', ... % 3
'bicycle', ... % 4
'mbike', ... % 5
'non_mot_vhcl', ... % 6
'static_person', ... % 7
'distractor', ... % 8
'occluder', ... % 9
'occluder_on_grnd', ... %10
'occluder_full', ... % 11
'reflection', ... % 12
'crowd' ... % 13
};
"""
def read_mot_results(filename, is_gt, is_ignore):
valid_labels = {1}
ignore_labels = {2, 7, 8, 12}
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
for line in f.readlines():
linelist = line.split(',')
if len(linelist) < 7:
continue
fid = int(linelist[0])
if fid < 1:
continue
results_dict.setdefault(fid, list())
if is_gt:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
mark = int(float(linelist[6]))
if mark == 0 or label not in valid_labels:
continue
score = 1
elif is_ignore:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
vis_ratio = float(linelist[8])
if label not in ignore_labels and vis_ratio >= 0:
continue
else:
continue
score = 1
else:
score = float(linelist[6])
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
results_dict[fid].append((tlwh, target_id, score))
return results_dict
def unzip_objs(objs):
if len(objs) > 0:
tlwhs, ids, scores = zip(*objs)
else:
tlwhs, ids, scores = [], [], []
tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
return tlwhs, ids, scores

View file

@ -0,0 +1,383 @@
"""
References:
https://medium.com/analytics-vidhya/creating-a-custom-logging-mechanism-for-real-time-object-detection-using-tdd-4ca2cfcd0a2f
"""
import json
from os import makedirs
from os.path import exists, join
from datetime import datetime
class JsonMeta(object):
HOURS = 3
MINUTES = 59
SECONDS = 59
PATH_TO_SAVE = 'LOGS'
DEFAULT_FILE_NAME = 'remaining'
class BaseJsonLogger(object):
"""
This is the base class that returns __dict__ of its own
it also returns the dicts of objects in the attributes that are list instances
"""
def dic(self):
# returns dicts of objects
out = {}
for k, v in self.__dict__.items():
if hasattr(v, 'dic'):
out[k] = v.dic()
elif isinstance(v, list):
out[k] = self.list(v)
else:
out[k] = v
return out
@staticmethod
def list(values):
# applies the dic method on items in the list
return [v.dic() if hasattr(v, 'dic') else v for v in values]
class Label(BaseJsonLogger):
"""
For each bounding box there are various categories with confidences. Label class keeps track of that information.
"""
def __init__(self, category: str, confidence: float):
self.category = category
self.confidence = confidence
class Bbox(BaseJsonLogger):
"""
This module stores the information for each frame and use them in JsonParser
Attributes:
labels (list): List of label module.
top (int):
left (int):
width (int):
height (int):
Args:
bbox_id (float):
top (int):
left (int):
width (int):
height (int):
References:
Check Label module for better understanding.
"""
def __init__(self, bbox_id, top, left, width, height):
self.labels = []
self.bbox_id = bbox_id
self.top = top
self.left = left
self.width = width
self.height = height
def add_label(self, category, confidence):
# adds category and confidence only if top_k is not exceeded.
self.labels.append(Label(category, confidence))
def labels_full(self, value):
return len(self.labels) == value
class Frame(BaseJsonLogger):
"""
This module stores the information for each frame and use them in JsonParser
Attributes:
timestamp (float): The elapsed time of captured frame
frame_id (int): The frame number of the captured video
bboxes (list of Bbox objects): Stores the list of bbox objects.
References:
Check Bbox class for better information
Args:
timestamp (float):
frame_id (int):
"""
def __init__(self, frame_id: int, timestamp: float = None):
self.frame_id = frame_id
self.timestamp = timestamp
self.bboxes = []
def add_bbox(self, bbox_id: int, top: int, left: int, width: int, height: int):
bboxes_ids = [bbox.bbox_id for bbox in self.bboxes]
if bbox_id not in bboxes_ids:
self.bboxes.append(Bbox(bbox_id, top, left, width, height))
else:
raise ValueError("Frame with id: {} already has a Bbox with id: {}".format(self.frame_id, bbox_id))
def add_label_to_bbox(self, bbox_id: int, category: str, confidence: float):
bboxes = {bbox.id: bbox for bbox in self.bboxes}
if bbox_id in bboxes.keys():
res = bboxes.get(bbox_id)
res.add_label(category, confidence)
else:
raise ValueError('the bbox with id: {} does not exists!'.format(bbox_id))
class BboxToJsonLogger(BaseJsonLogger):
"""
ُ This module is designed to automate the task of logging jsons. An example json is used
to show the contents of json file shortly
Example:
{
"video_details": {
"frame_width": 1920,
"frame_height": 1080,
"frame_rate": 20,
"video_name": "/home/gpu/codes/MSD/pedestrian_2/project/public/camera1.avi"
},
"frames": [
{
"frame_id": 329,
"timestamp": 3365.1254
"bboxes": [
{
"labels": [
{
"category": "pedestrian",
"confidence": 0.9
}
],
"bbox_id": 0,
"top": 1257,
"left": 138,
"width": 68,
"height": 109
}
]
}],
Attributes:
frames (dict): It's a dictionary that maps each frame_id to json attributes.
video_details (dict): information about video file.
top_k_labels (int): shows the allowed number of labels
start_time (datetime object): we use it to automate the json output by time.
Args:
top_k_labels (int): shows the allowed number of labels
"""
def __init__(self, top_k_labels: int = 1):
self.frames = {}
self.video_details = self.video_details = dict(frame_width=None, frame_height=None, frame_rate=None,
video_name=None)
self.top_k_labels = top_k_labels
self.start_time = datetime.now()
def set_top_k(self, value):
self.top_k_labels = value
def frame_exists(self, frame_id: int) -> bool:
"""
Args:
frame_id (int):
Returns:
bool: true if frame_id is recognized
"""
return frame_id in self.frames.keys()
def add_frame(self, frame_id: int, timestamp: float = None) -> None:
"""
Args:
frame_id (int):
timestamp (float): opencv captured frame time property
Raises:
ValueError: if frame_id would not exist in class frames attribute
Returns:
None
"""
if not self.frame_exists(frame_id):
self.frames[frame_id] = Frame(frame_id, timestamp)
else:
raise ValueError("Frame id: {} already exists".format(frame_id))
def bbox_exists(self, frame_id: int, bbox_id: int) -> bool:
"""
Args:
frame_id:
bbox_id:
Returns:
bool: if bbox exists in frame bboxes list
"""
bboxes = []
if self.frame_exists(frame_id=frame_id):
bboxes = [bbox.bbox_id for bbox in self.frames[frame_id].bboxes]
return bbox_id in bboxes
def find_bbox(self, frame_id: int, bbox_id: int):
"""
Args:
frame_id:
bbox_id:
Returns:
bbox_id (int):
Raises:
ValueError: if bbox_id does not exist in the bbox list of specific frame.
"""
if not self.bbox_exists(frame_id, bbox_id):
raise ValueError("frame with id: {} does not contain bbox with id: {}".format(frame_id, bbox_id))
bboxes = {bbox.bbox_id: bbox for bbox in self.frames[frame_id].bboxes}
return bboxes.get(bbox_id)
def add_bbox_to_frame(self, frame_id: int, bbox_id: int, top: int, left: int, width: int, height: int) -> None:
"""
Args:
frame_id (int):
bbox_id (int):
top (int):
left (int):
width (int):
height (int):
Returns:
None
Raises:
ValueError: if bbox_id already exist in frame information with frame_id
ValueError: if frame_id does not exist in frames attribute
"""
if self.frame_exists(frame_id):
frame = self.frames[frame_id]
if not self.bbox_exists(frame_id, bbox_id):
frame.add_bbox(bbox_id, top, left, width, height)
else:
raise ValueError(
"frame with frame_id: {} already contains the bbox with id: {} ".format(frame_id, bbox_id))
else:
raise ValueError("frame with frame_id: {} does not exist".format(frame_id))
def add_label_to_bbox(self, frame_id: int, bbox_id: int, category: str, confidence: float):
"""
Args:
frame_id:
bbox_id:
category:
confidence: the confidence value returned from yolo detection
Returns:
None
Raises:
ValueError: if labels quota (top_k_labels) exceeds.
"""
bbox = self.find_bbox(frame_id, bbox_id)
if not bbox.labels_full(self.top_k_labels):
bbox.add_label(category, confidence)
else:
raise ValueError("labels in frame_id: {}, bbox_id: {} is fulled".format(frame_id, bbox_id))
def add_video_details(self, frame_width: int = None, frame_height: int = None, frame_rate: int = None,
video_name: str = None):
self.video_details['frame_width'] = frame_width
self.video_details['frame_height'] = frame_height
self.video_details['frame_rate'] = frame_rate
self.video_details['video_name'] = video_name
def output(self):
output = {'video_details': self.video_details}
result = list(self.frames.values())
output['frames'] = [item.dic() for item in result]
return output
def json_output(self, output_name):
"""
Args:
output_name:
Returns:
None
Notes:
It creates the json output with `output_name` name.
"""
if not output_name.endswith('.json'):
output_name += '.json'
with open(output_name, 'w') as file:
json.dump(self.output(), file)
file.close()
def set_start(self):
self.start_time = datetime.now()
def schedule_output_by_time(self, output_dir=JsonMeta.PATH_TO_SAVE, hours: int = 0, minutes: int = 0,
seconds: int = 60) -> None:
"""
Notes:
Creates folder and then periodically stores the jsons on that address.
Args:
output_dir (str): the directory where output files will be stored
hours (int):
minutes (int):
seconds (int):
Returns:
None
"""
end = datetime.now()
interval = 0
interval += abs(min([hours, JsonMeta.HOURS]) * 3600)
interval += abs(min([minutes, JsonMeta.MINUTES]) * 60)
interval += abs(min([seconds, JsonMeta.SECONDS]))
diff = (end - self.start_time).seconds
if diff > interval:
output_name = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '.json'
if not exists(output_dir):
makedirs(output_dir)
output = join(output_dir, output_name)
self.json_output(output_name=output)
self.frames = {}
self.start_time = datetime.now()
def schedule_output_by_frames(self, frames_quota, frame_counter, output_dir=JsonMeta.PATH_TO_SAVE):
"""
saves as the number of frames quota increases higher.
:param frames_quota:
:param frame_counter:
:param output_dir:
:return:
"""
pass
def flush(self, output_dir):
"""
Notes:
We use this function to output jsons whenever possible.
like the time that we exit the while loop of opencv.
Args:
output_dir:
Returns:
None
"""
filename = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '-remaining.json'
output = join(output_dir, filename)
self.json_output(output_name=output)

View file

@ -0,0 +1,17 @@
import logging
def get_logger(name='root'):
formatter = logging.Formatter(
# fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
return logger

View file

@ -0,0 +1,41 @@
import os
import yaml
from easydict import EasyDict as edict
class YamlParser(edict):
"""
This is yaml parser based on EasyDict.
"""
def __init__(self, cfg_dict=None, config_file=None):
if cfg_dict is None:
cfg_dict = {}
if config_file is not None:
assert(os.path.isfile(config_file))
with open(config_file, 'r') as fo:
yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader)
cfg_dict.update(yaml_)
super(YamlParser, self).__init__(cfg_dict)
def merge_from_file(self, config_file):
with open(config_file, 'r') as fo:
yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader)
self.update(yaml_)
def merge_from_dict(self, config_dict):
self.update(config_dict)
def get_config(config_file=None):
return YamlParser(config_file=config_file)
if __name__ == "__main__":
cfg = YamlParser(config_file="../configs/yolov3.yaml")
cfg.merge_from_file("../configs/strong_sort.yaml")
import ipdb
ipdb.set_trace()

View file

@ -0,0 +1,39 @@
from functools import wraps
from time import time
def is_video(ext: str):
"""
Returns true if ext exists in
allowed_exts for video files.
Args:
ext:
Returns:
"""
allowed_exts = ('.mp4', '.webm', '.ogg', '.avi', '.wmv', '.mkv', '.3gp')
return any((ext.endswith(x) for x in allowed_exts))
def tik_tok(func):
"""
keep track of time for each process.
Args:
func:
Returns:
"""
@wraps(func)
def _time_it(*args, **kwargs):
start = time()
try:
return func(*args, **kwargs)
finally:
end_ = time()
print("time: {:.03f}s, fps: {:.03f}".format(end_ - start, 1 / (end_ - start)))
return _time_it