From aa88140d02045cd1c23b20c9104057d92c6b4436 Mon Sep 17 00:00:00 2001 From: Zhongdao Date: Wed, 7 Jul 2021 18:04:52 +0800 Subject: [PATCH] Clean up --- eval/poseval/setup.py | 24 - tracker/sot/lib/models/dcn/__init__.py | 12 - tracker/sot/lib/models/dcn/deform_conv.py | 337 ------- tracker/sot/lib/models/dcn/deform_pool.py | 245 ----- .../lib/models/dcn/src/deform_conv_cuda.cpp | 693 -------------- .../models/dcn/src/deform_conv_cuda_kernel.cu | 865 ------------------ .../lib/models/dcn/src/deform_pool_cuda.cpp | 81 -- .../models/dcn/src/deform_pool_cuda_kernel.cu | 363 -------- tracker/sot/lib/models/mask.py | 342 ------- tracker/sot/lib/models/ocean.py | 354 ------- tracker/sot/lib/models/oceanTRT.py | 61 -- tracker/sot/lib/models/oceanplus.py | 106 --- tracker/sot/lib/models/siamfc.py | 7 - 13 files changed, 3490 deletions(-) delete mode 100644 eval/poseval/setup.py delete mode 100644 tracker/sot/lib/models/dcn/__init__.py delete mode 100644 tracker/sot/lib/models/dcn/deform_conv.py delete mode 100644 tracker/sot/lib/models/dcn/deform_pool.py delete mode 100644 tracker/sot/lib/models/dcn/src/deform_conv_cuda.cpp delete mode 100644 tracker/sot/lib/models/dcn/src/deform_conv_cuda_kernel.cu delete mode 100644 tracker/sot/lib/models/dcn/src/deform_pool_cuda.cpp delete mode 100644 tracker/sot/lib/models/dcn/src/deform_pool_cuda_kernel.cu delete mode 100644 tracker/sot/lib/models/mask.py delete mode 100644 tracker/sot/lib/models/ocean.py delete mode 100644 tracker/sot/lib/models/oceanTRT.py delete mode 100644 tracker/sot/lib/models/oceanplus.py diff --git a/eval/poseval/setup.py b/eval/poseval/setup.py deleted file mode 100644 index e31df7d..0000000 --- a/eval/poseval/setup.py +++ /dev/null @@ -1,24 +0,0 @@ -from setuptools import setup, find_packages - - -setup( - name='poseval', - version='0.1.0', - packages=find_packages(), - description='poseval', - long_description=open('README.md', encoding='utf-8').read(), - long_description_content_type='text/markdown', - - install_requires=[ - 'click', - 'motmetrics>=1.2', - 'shapely', - 'tqdm', - ], - extras_require={ - 'dev': [ - 'pylint', - 'pytest', - ], - }, -) diff --git a/tracker/sot/lib/models/dcn/__init__.py b/tracker/sot/lib/models/dcn/__init__.py deleted file mode 100644 index 29d75be..0000000 --- a/tracker/sot/lib/models/dcn/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from .deform_conv import (DeformConv, DeformConvPack, ModulatedDeformConv, - ModulatedDeformConvPack, deform_conv, - modulated_deform_conv) -from .deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, - ModulatedDeformRoIPoolingPack, deform_roi_pooling) - -__all__ = [ - 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', - 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', - 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv', - 'deform_roi_pooling' -] diff --git a/tracker/sot/lib/models/dcn/deform_conv.py b/tracker/sot/lib/models/dcn/deform_conv.py deleted file mode 100644 index c4b8701..0000000 --- a/tracker/sot/lib/models/dcn/deform_conv.py +++ /dev/null @@ -1,337 +0,0 @@ -import math - -import torch -import torch.nn as nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from . import deform_conv_cuda - - -class DeformConvFunction(Function): - - @staticmethod - def forward(ctx, - input, - offset, - weight, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - im2col_step=64): - if input is not None and input.dim() != 4: - raise ValueError( - "Expected 4D tensor as input, got {}D tensor instead.".format( - input.dim())) - ctx.stride = _pair(stride) - ctx.padding = _pair(padding) - ctx.dilation = _pair(dilation) - ctx.groups = groups - ctx.deformable_groups = deformable_groups - ctx.im2col_step = im2col_step - - ctx.save_for_backward(input, offset, weight) - - output = input.new_empty( - DeformConvFunction._output_size(input, weight, ctx.padding, - ctx.dilation, ctx.stride)) - - ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones - - if not input.is_cuda: - raise NotImplementedError - else: - cur_im2col_step = min(ctx.im2col_step, input.shape[0]) - assert (input.shape[0] % - cur_im2col_step) == 0, 'im2col step must divide batchsize' - deform_conv_cuda.deform_conv_forward_cuda( - input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1], - weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], - ctx.padding[1], ctx.padding[0], ctx.dilation[1], - ctx.dilation[0], ctx.groups, ctx.deformable_groups, - cur_im2col_step) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, offset, weight = ctx.saved_tensors - - grad_input = grad_offset = grad_weight = None - - if not grad_output.is_cuda: - raise NotImplementedError - else: - cur_im2col_step = min(ctx.im2col_step, input.shape[0]) - assert (input.shape[0] % - cur_im2col_step) == 0, 'im2col step must divide batchsize' - - if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: - grad_input = torch.zeros_like(input) - grad_offset = torch.zeros_like(offset) - deform_conv_cuda.deform_conv_backward_input_cuda( - input, offset, grad_output, grad_input, - grad_offset, weight, ctx.bufs_[0], weight.size(3), - weight.size(2), ctx.stride[1], ctx.stride[0], - ctx.padding[1], ctx.padding[0], ctx.dilation[1], - ctx.dilation[0], ctx.groups, ctx.deformable_groups, - cur_im2col_step) - - if ctx.needs_input_grad[2]: - grad_weight = torch.zeros_like(weight) - deform_conv_cuda.deform_conv_backward_parameters_cuda( - input, offset, grad_output, - grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), - weight.size(2), ctx.stride[1], ctx.stride[0], - ctx.padding[1], ctx.padding[0], ctx.dilation[1], - ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1, - cur_im2col_step) - - return (grad_input, grad_offset, grad_weight, None, None, None, None, - None) - - @staticmethod - def _output_size(input, weight, padding, dilation, stride): - channels = weight.size(0) - output_size = (input.size(0), channels) - for d in range(input.dim() - 2): - in_size = input.size(d + 2) - pad = padding[d] - kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 - stride_ = stride[d] - output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) - if not all(map(lambda s: s > 0, output_size)): - raise ValueError( - "convolution input is too small (output would be {})".format( - 'x'.join(map(str, output_size)))) - return output_size - - -class ModulatedDeformConvFunction(Function): - - @staticmethod - def forward(ctx, - input, - offset, - mask, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1): - ctx.stride = stride - ctx.padding = padding - ctx.dilation = dilation - ctx.groups = groups - ctx.deformable_groups = deformable_groups - ctx.with_bias = bias is not None - if not ctx.with_bias: - bias = input.new_empty(1) # fake tensor - if not input.is_cuda: - raise NotImplementedError - if weight.requires_grad or mask.requires_grad or offset.requires_grad \ - or input.requires_grad: - ctx.save_for_backward(input, offset, mask, weight, bias) - output = input.new_empty( - ModulatedDeformConvFunction._infer_shape(ctx, input, weight)) - ctx._bufs = [input.new_empty(0), input.new_empty(0)] - deform_conv_cuda.modulated_deform_conv_cuda_forward( - input, weight, bias, ctx._bufs[0], offset, mask, output, - ctx._bufs[1], weight.shape[2], weight.shape[3], ctx.stride, - ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, - ctx.groups, ctx.deformable_groups, ctx.with_bias) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - if not grad_output.is_cuda: - raise NotImplementedError - input, offset, mask, weight, bias = ctx.saved_tensors - grad_input = torch.zeros_like(input) - grad_offset = torch.zeros_like(offset) - grad_mask = torch.zeros_like(mask) - grad_weight = torch.zeros_like(weight) - grad_bias = torch.zeros_like(bias) - deform_conv_cuda.modulated_deform_conv_cuda_backward( - input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1], - grad_input, grad_weight, grad_bias, grad_offset, grad_mask, - grad_output, weight.shape[2], weight.shape[3], ctx.stride, - ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, - ctx.groups, ctx.deformable_groups, ctx.with_bias) - if not ctx.with_bias: - grad_bias = None - - return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, - None, None, None, None, None) - - @staticmethod - def _infer_shape(ctx, input, weight): - n = input.size(0) - channels_out = weight.size(0) - height, width = input.shape[2:4] - kernel_h, kernel_w = weight.shape[2:4] - height_out = (height + 2 * ctx.padding - - (ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1 - width_out = (width + 2 * ctx.padding - - (ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1 - return n, channels_out, height_out, width_out - - -deform_conv = DeformConvFunction.apply -modulated_deform_conv = ModulatedDeformConvFunction.apply - - -class DeformConv(nn.Module): - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - bias=False): - super(DeformConv, self).__init__() - - assert not bias - assert in_channels % groups == 0, \ - 'in_channels {} cannot be divisible by groups {}'.format( - in_channels, groups) - assert out_channels % groups == 0, \ - 'out_channels {} cannot be divisible by groups {}'.format( - out_channels, groups) - - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.stride = _pair(stride) - self.padding = _pair(padding) - self.dilation = _pair(dilation) - self.groups = groups - self.deformable_groups = deformable_groups - - self.weight = nn.Parameter( - torch.Tensor(out_channels, in_channels // self.groups, - *self.kernel_size)) - - self.reset_parameters() - - def reset_parameters(self): - n = self.in_channels - for k in self.kernel_size: - n *= k - stdv = 1. / math.sqrt(n) - self.weight.data.uniform_(-stdv, stdv) - - def forward(self, x, offset): - return deform_conv(x, offset, self.weight, self.stride, self.padding, - self.dilation, self.groups, self.deformable_groups) - - -class DeformConvPack(DeformConv): - - def __init__(self, *args, **kwargs): - super(DeformConvPack, self).__init__(*args, **kwargs) - - self.conv_offset = nn.Conv2d( - self.in_channels, - self.deformable_groups * 2 * self.kernel_size[0] * - self.kernel_size[1], - kernel_size=self.kernel_size, - stride=_pair(self.stride), - padding=_pair(self.padding), - bias=True) - self.init_offset() - - def init_offset(self): - self.conv_offset.weight.data.zero_() - self.conv_offset.bias.data.zero_() - - def forward(self, x): - offset = self.conv_offset(x) - return deform_conv(x, offset, self.weight, self.stride, self.padding, - self.dilation, self.groups, self.deformable_groups) - - -class ModulatedDeformConv(nn.Module): - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - bias=True): - super(ModulatedDeformConv, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.stride = stride - self.padding = padding - self.dilation = dilation - self.groups = groups - self.deformable_groups = deformable_groups - self.with_bias = bias - - self.weight = nn.Parameter( - torch.Tensor(out_channels, in_channels // groups, - *self.kernel_size)) - if bias: - self.bias = nn.Parameter(torch.Tensor(out_channels)) - else: - self.register_parameter('bias', None) - self.reset_parameters() - - def reset_parameters(self): - n = self.in_channels - for k in self.kernel_size: - n *= k - stdv = 1. / math.sqrt(n) - self.weight.data.uniform_(-stdv, stdv) - if self.bias is not None: - self.bias.data.zero_() - - def forward(self, x, offset, mask): - return modulated_deform_conv(x, offset, mask, self.weight, self.bias, - self.stride, self.padding, self.dilation, - self.groups, self.deformable_groups) - - -class ModulatedDeformConvPack(ModulatedDeformConv): - - def __init__(self, *args, **kwargs): - super(ModulatedDeformConvPack, self).__init__(*args, **kwargs) - - self.conv_offset_mask = nn.Conv2d( - self.in_channels, - self.deformable_groups * 3 * self.kernel_size[0] * - self.kernel_size[1], - kernel_size=self.kernel_size, - stride=_pair(self.stride), - padding=_pair(self.padding), - bias=True) - self.init_offset() - - def init_offset(self): - self.conv_offset_mask.weight.data.zero_() - self.conv_offset_mask.bias.data.zero_() - - def forward(self, x): - out = self.conv_offset_mask(x) - o1, o2, mask = torch.chunk(out, 3, dim=1) - offset = torch.cat((o1, o2), dim=1) - mask = torch.sigmoid(mask) - return modulated_deform_conv(x, offset, mask, self.weight, self.bias, - self.stride, self.padding, self.dilation, - self.groups, self.deformable_groups) diff --git a/tracker/sot/lib/models/dcn/deform_pool.py b/tracker/sot/lib/models/dcn/deform_pool.py deleted file mode 100644 index 14e6f4f..0000000 --- a/tracker/sot/lib/models/dcn/deform_pool.py +++ /dev/null @@ -1,245 +0,0 @@ -import torch -import torch.nn as nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable - -from . import deform_pool_cuda - - -class DeformRoIPoolingFunction(Function): - - @staticmethod - def forward(ctx, - data, - rois, - offset, - spatial_scale, - out_size, - out_channels, - no_trans, - group_size=1, - part_size=None, - sample_per_part=4, - trans_std=.0): - ctx.spatial_scale = spatial_scale - ctx.out_size = out_size - ctx.out_channels = out_channels - ctx.no_trans = no_trans - ctx.group_size = group_size - ctx.part_size = out_size if part_size is None else part_size - ctx.sample_per_part = sample_per_part - ctx.trans_std = trans_std - - assert 0.0 <= ctx.trans_std <= 1.0 - if not data.is_cuda: - raise NotImplementedError - - n = rois.shape[0] - output = data.new_empty(n, out_channels, out_size, out_size) - output_count = data.new_empty(n, out_channels, out_size, out_size) - deform_pool_cuda.deform_psroi_pooling_cuda_forward( - data, rois, offset, output, output_count, ctx.no_trans, - ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size, - ctx.part_size, ctx.sample_per_part, ctx.trans_std) - - if data.requires_grad or rois.requires_grad or offset.requires_grad: - ctx.save_for_backward(data, rois, offset) - ctx.output_count = output_count - - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - if not grad_output.is_cuda: - raise NotImplementedError - - data, rois, offset = ctx.saved_tensors - output_count = ctx.output_count - grad_input = torch.zeros_like(data) - grad_rois = None - grad_offset = torch.zeros_like(offset) - - deform_pool_cuda.deform_psroi_pooling_cuda_backward( - grad_output, data, rois, offset, output_count, grad_input, - grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels, - ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part, - ctx.trans_std) - return (grad_input, grad_rois, grad_offset, None, None, None, None, - None, None, None, None) - - -deform_roi_pooling = DeformRoIPoolingFunction.apply - - -class DeformRoIPooling(nn.Module): - - def __init__(self, - spatial_scale, - out_size, - out_channels, - no_trans, - group_size=1, - part_size=None, - sample_per_part=4, - trans_std=.0): - super(DeformRoIPooling, self).__init__() - self.spatial_scale = spatial_scale - self.out_size = out_size - self.out_channels = out_channels - self.no_trans = no_trans - self.group_size = group_size - self.part_size = out_size if part_size is None else part_size - self.sample_per_part = sample_per_part - self.trans_std = trans_std - - def forward(self, data, rois, offset): - if self.no_trans: - offset = data.new_empty(0) - return deform_roi_pooling(data, rois, offset, self.spatial_scale, - self.out_size, self.out_channels, - self.no_trans, self.group_size, - self.part_size, self.sample_per_part, - self.trans_std) - - -class DeformRoIPoolingPack(DeformRoIPooling): - - def __init__(self, - spatial_scale, - out_size, - out_channels, - no_trans, - group_size=1, - part_size=None, - sample_per_part=4, - trans_std=.0, - num_offset_fcs=3, - deform_fc_channels=1024): - super(DeformRoIPoolingPack, - self).__init__(spatial_scale, out_size, out_channels, no_trans, - group_size, part_size, sample_per_part, trans_std) - - self.num_offset_fcs = num_offset_fcs - self.deform_fc_channels = deform_fc_channels - - if not no_trans: - seq = [] - ic = self.out_size * self.out_size * self.out_channels - for i in range(self.num_offset_fcs): - if i < self.num_offset_fcs - 1: - oc = self.deform_fc_channels - else: - oc = self.out_size * self.out_size * 2 - seq.append(nn.Linear(ic, oc)) - ic = oc - if i < self.num_offset_fcs - 1: - seq.append(nn.ReLU(inplace=True)) - self.offset_fc = nn.Sequential(*seq) - self.offset_fc[-1].weight.data.zero_() - self.offset_fc[-1].bias.data.zero_() - - def forward(self, data, rois): - assert data.size(1) == self.out_channels - if self.no_trans: - offset = data.new_empty(0) - return deform_roi_pooling(data, rois, offset, self.spatial_scale, - self.out_size, self.out_channels, - self.no_trans, self.group_size, - self.part_size, self.sample_per_part, - self.trans_std) - else: - n = rois.shape[0] - offset = data.new_empty(0) - x = deform_roi_pooling(data, rois, offset, self.spatial_scale, - self.out_size, self.out_channels, True, - self.group_size, self.part_size, - self.sample_per_part, self.trans_std) - offset = self.offset_fc(x.view(n, -1)) - offset = offset.view(n, 2, self.out_size, self.out_size) - return deform_roi_pooling(data, rois, offset, self.spatial_scale, - self.out_size, self.out_channels, - self.no_trans, self.group_size, - self.part_size, self.sample_per_part, - self.trans_std) - - -class ModulatedDeformRoIPoolingPack(DeformRoIPooling): - - def __init__(self, - spatial_scale, - out_size, - out_channels, - no_trans, - group_size=1, - part_size=None, - sample_per_part=4, - trans_std=.0, - num_offset_fcs=3, - num_mask_fcs=2, - deform_fc_channels=1024): - super(ModulatedDeformRoIPoolingPack, - self).__init__(spatial_scale, out_size, out_channels, no_trans, - group_size, part_size, sample_per_part, trans_std) - - self.num_offset_fcs = num_offset_fcs - self.num_mask_fcs = num_mask_fcs - self.deform_fc_channels = deform_fc_channels - - if not no_trans: - offset_fc_seq = [] - ic = self.out_size * self.out_size * self.out_channels - for i in range(self.num_offset_fcs): - if i < self.num_offset_fcs - 1: - oc = self.deform_fc_channels - else: - oc = self.out_size * self.out_size * 2 - offset_fc_seq.append(nn.Linear(ic, oc)) - ic = oc - if i < self.num_offset_fcs - 1: - offset_fc_seq.append(nn.ReLU(inplace=True)) - self.offset_fc = nn.Sequential(*offset_fc_seq) - self.offset_fc[-1].weight.data.zero_() - self.offset_fc[-1].bias.data.zero_() - - mask_fc_seq = [] - ic = self.out_size * self.out_size * self.out_channels - for i in range(self.num_mask_fcs): - if i < self.num_mask_fcs - 1: - oc = self.deform_fc_channels - else: - oc = self.out_size * self.out_size - mask_fc_seq.append(nn.Linear(ic, oc)) - ic = oc - if i < self.num_mask_fcs - 1: - mask_fc_seq.append(nn.ReLU(inplace=True)) - else: - mask_fc_seq.append(nn.Sigmoid()) - self.mask_fc = nn.Sequential(*mask_fc_seq) - self.mask_fc[-2].weight.data.zero_() - self.mask_fc[-2].bias.data.zero_() - - def forward(self, data, rois): - assert data.size(1) == self.out_channels - if self.no_trans: - offset = data.new_empty(0) - return deform_roi_pooling(data, rois, offset, self.spatial_scale, - self.out_size, self.out_channels, - self.no_trans, self.group_size, - self.part_size, self.sample_per_part, - self.trans_std) - else: - n = rois.shape[0] - offset = data.new_empty(0) - x = deform_roi_pooling(data, rois, offset, self.spatial_scale, - self.out_size, self.out_channels, True, - self.group_size, self.part_size, - self.sample_per_part, self.trans_std) - offset = self.offset_fc(x.view(n, -1)) - offset = offset.view(n, 2, self.out_size, self.out_size) - mask = self.mask_fc(x.view(n, -1)) - mask = mask.view(n, 1, self.out_size, self.out_size) - return deform_roi_pooling( - data, rois, offset, self.spatial_scale, self.out_size, - self.out_channels, self.no_trans, self.group_size, - self.part_size, self.sample_per_part, self.trans_std) * mask diff --git a/tracker/sot/lib/models/dcn/src/deform_conv_cuda.cpp b/tracker/sot/lib/models/dcn/src/deform_conv_cuda.cpp deleted file mode 100644 index 5464c8c..0000000 --- a/tracker/sot/lib/models/dcn/src/deform_conv_cuda.cpp +++ /dev/null @@ -1,693 +0,0 @@ - -#include - -#include -#include - -void deformable_im2col(const at::Tensor data_im, const at::Tensor data_offset, - const int channels, const int height, const int width, - const int ksize_h, const int ksize_w, const int pad_h, - const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int parallel_imgs, const int deformable_group, - at::Tensor data_col); - -void deformable_col2im(const at::Tensor data_col, const at::Tensor data_offset, - const int channels, const int height, const int width, - const int ksize_h, const int ksize_w, const int pad_h, - const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int parallel_imgs, const int deformable_group, - at::Tensor grad_im); - -void deformable_col2im_coord( - const at::Tensor data_col, const at::Tensor data_im, - const at::Tensor data_offset, const int channels, const int height, - const int width, const int ksize_h, const int ksize_w, const int pad_h, - const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, const int parallel_imgs, - const int deformable_group, at::Tensor grad_offset); - -void modulated_deformable_im2col_cuda( - const at::Tensor data_im, const at::Tensor data_offset, - const at::Tensor data_mask, const int batch_size, const int channels, - const int height_im, const int width_im, const int height_col, - const int width_col, const int kernel_h, const int kenerl_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, const int deformable_group, - at::Tensor data_col); - -void modulated_deformable_col2im_cuda( - const at::Tensor data_col, const at::Tensor data_offset, - const at::Tensor data_mask, const int batch_size, const int channels, - const int height_im, const int width_im, const int height_col, - const int width_col, const int kernel_h, const int kenerl_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, const int deformable_group, - at::Tensor grad_im); - -void modulated_deformable_col2im_coord_cuda( - const at::Tensor data_col, const at::Tensor data_im, - const at::Tensor data_offset, const at::Tensor data_mask, - const int batch_size, const int channels, const int height_im, - const int width_im, const int height_col, const int width_col, - const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, - const int stride_h, const int stride_w, const int dilation_h, - const int dilation_w, const int deformable_group, at::Tensor grad_offset, - at::Tensor grad_mask); - -void shape_check(at::Tensor input, at::Tensor offset, at::Tensor *gradOutput, - at::Tensor weight, int kH, int kW, int dH, int dW, int padH, - int padW, int dilationH, int dilationW, int group, - int deformable_group) { - AT_CHECK(weight.ndimension() == 4, - "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, " - "but got: %s", - weight.ndimension()); - - AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); - - AT_CHECK(kW > 0 && kH > 0, - "kernel size should be greater than zero, but got kH: %d kW: %d", kH, - kW); - - AT_CHECK((weight.size(2) == kH && weight.size(3) == kW), - "kernel size should be consistent with weight, ", - "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", kH, - kW, weight.size(2), weight.size(3)); - - AT_CHECK(dW > 0 && dH > 0, - "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); - - AT_CHECK( - dilationW > 0 && dilationH > 0, - "dilation should be greater than 0, but got dilationH: %d dilationW: %d", - dilationH, dilationW); - - int ndim = input.ndimension(); - int dimf = 0; - int dimh = 1; - int dimw = 2; - - if (ndim == 4) { - dimf++; - dimh++; - dimw++; - } - - AT_CHECK(ndim == 3 || ndim == 4, "3D or 4D input tensor expected but got: %s", - ndim); - - long nInputPlane = weight.size(1) * group; - long inputHeight = input.size(dimh); - long inputWidth = input.size(dimw); - long nOutputPlane = weight.size(0); - long outputHeight = - (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; - long outputWidth = - (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; - - AT_CHECK(nInputPlane % deformable_group == 0, - "input channels must divide deformable group size"); - - if (outputWidth < 1 || outputHeight < 1) - AT_ERROR( - "Given input size: (%ld x %ld x %ld). " - "Calculated output size: (%ld x %ld x %ld). Output size is too small", - nInputPlane, inputHeight, inputWidth, nOutputPlane, outputHeight, - outputWidth); - - AT_CHECK(input.size(1) == nInputPlane, - "invalid number of input planes, expected: %d, but got: %d", - nInputPlane, input.size(1)); - - AT_CHECK((inputHeight >= kH && inputWidth >= kW), - "input image is smaller than kernel"); - - AT_CHECK((offset.size(2) == outputHeight && offset.size(3) == outputWidth), - "invalid spatial size of offset, expected height: %d width: %d, but " - "got height: %d width: %d", - outputHeight, outputWidth, offset.size(2), offset.size(3)); - - AT_CHECK((offset.size(1) == deformable_group * 2 * kH * kW), - "invalid number of channels of offset"); - - if (gradOutput != NULL) { - AT_CHECK(gradOutput->size(dimf) == nOutputPlane, - "invalid number of gradOutput planes, expected: %d, but got: %d", - nOutputPlane, gradOutput->size(dimf)); - - AT_CHECK((gradOutput->size(dimh) == outputHeight && - gradOutput->size(dimw) == outputWidth), - "invalid size of gradOutput, expected height: %d width: %d , but " - "got height: %d width: %d", - outputHeight, outputWidth, gradOutput->size(dimh), - gradOutput->size(dimw)); - } -} - -int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight, - at::Tensor offset, at::Tensor output, - at::Tensor columns, at::Tensor ones, int kW, - int kH, int dW, int dH, int padW, int padH, - int dilationW, int dilationH, int group, - int deformable_group, int im2col_step) { - // todo: resize columns to include im2col: done - // todo: add im2col_step as input - // todo: add new output buffer and transpose it to output (or directly - // transpose output) todo: possibly change data indexing because of - // parallel_imgs - - shape_check(input, offset, NULL, weight, kH, kW, dH, dW, padH, padW, - dilationH, dilationW, group, deformable_group); - - input = input.contiguous(); - offset = offset.contiguous(); - weight = weight.contiguous(); - - int batch = 1; - if (input.ndimension() == 3) { - // Force batch - batch = 0; - input.unsqueeze_(0); - offset.unsqueeze_(0); - } - - // todo: assert batchsize dividable by im2col_step - - long batchSize = input.size(0); - long nInputPlane = input.size(1); - long inputHeight = input.size(2); - long inputWidth = input.size(3); - - long nOutputPlane = weight.size(0); - - long outputWidth = - (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; - long outputHeight = - (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; - - AT_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); - - output = output.view({batchSize / im2col_step, im2col_step, nOutputPlane, - outputHeight, outputWidth}); - columns = at::zeros( - {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, - input.options()); - - if (ones.ndimension() != 2 || - ones.size(0) * ones.size(1) < outputHeight * outputWidth) { - ones = at::ones({outputHeight, outputWidth}, input.options()); - } - - input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, - inputHeight, inputWidth}); - offset = - offset.view({batchSize / im2col_step, im2col_step, - deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - at::Tensor output_buffer = - at::zeros({batchSize / im2col_step, nOutputPlane, - im2col_step * outputHeight, outputWidth}, - output.options()); - - output_buffer = output_buffer.view( - {output_buffer.size(0), group, output_buffer.size(1) / group, - output_buffer.size(2), output_buffer.size(3)}); - - for (int elt = 0; elt < batchSize / im2col_step; elt++) { - deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, - inputWidth, kH, kW, padH, padW, dH, dW, dilationH, - dilationW, im2col_step, deformable_group, columns); - - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - weight = weight.view({group, weight.size(0) / group, weight.size(1), - weight.size(2), weight.size(3)}); - - for (int g = 0; g < group; g++) { - output_buffer[elt][g] = output_buffer[elt][g] - .flatten(1) - .addmm_(weight[g].flatten(1), columns[g]) - .view_as(output_buffer[elt][g]); - } - } - - output_buffer = output_buffer.view( - {output_buffer.size(0), output_buffer.size(1) * output_buffer.size(2), - output_buffer.size(3), output_buffer.size(4)}); - - output_buffer = output_buffer.view({batchSize / im2col_step, nOutputPlane, - im2col_step, outputHeight, outputWidth}); - output_buffer.transpose_(1, 2); - output.copy_(output_buffer); - output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); - - input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); - offset = offset.view( - {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - if (batch == 0) { - output = output.view({nOutputPlane, outputHeight, outputWidth}); - input = input.view({nInputPlane, inputHeight, inputWidth}); - offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); - } - - return 1; -} - -int deform_conv_backward_input_cuda(at::Tensor input, at::Tensor offset, - at::Tensor gradOutput, at::Tensor gradInput, - at::Tensor gradOffset, at::Tensor weight, - at::Tensor columns, int kW, int kH, int dW, - int dH, int padW, int padH, int dilationW, - int dilationH, int group, - int deformable_group, int im2col_step) { - shape_check(input, offset, &gradOutput, weight, kH, kW, dH, dW, padH, padW, - dilationH, dilationW, group, deformable_group); - - input = input.contiguous(); - offset = offset.contiguous(); - gradOutput = gradOutput.contiguous(); - weight = weight.contiguous(); - - int batch = 1; - - if (input.ndimension() == 3) { - // Force batch - batch = 0; - input = input.view({1, input.size(0), input.size(1), input.size(2)}); - offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)}); - gradOutput = gradOutput.view( - {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); - } - - long batchSize = input.size(0); - long nInputPlane = input.size(1); - long inputHeight = input.size(2); - long inputWidth = input.size(3); - - long nOutputPlane = weight.size(0); - - long outputWidth = - (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; - long outputHeight = - (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; - - AT_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset"); - gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); - columns = at::zeros( - {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, - input.options()); - - // change order of grad output - gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, - nOutputPlane, outputHeight, outputWidth}); - gradOutput.transpose_(1, 2); - - gradInput = gradInput.view({batchSize / im2col_step, im2col_step, nInputPlane, - inputHeight, inputWidth}); - input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, - inputHeight, inputWidth}); - gradOffset = gradOffset.view({batchSize / im2col_step, im2col_step, - deformable_group * 2 * kH * kW, outputHeight, - outputWidth}); - offset = - offset.view({batchSize / im2col_step, im2col_step, - deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - for (int elt = 0; elt < batchSize / im2col_step; elt++) { - // divide into groups - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - weight = weight.view({group, weight.size(0) / group, weight.size(1), - weight.size(2), weight.size(3)}); - gradOutput = gradOutput.view( - {gradOutput.size(0), group, gradOutput.size(1) / group, - gradOutput.size(2), gradOutput.size(3), gradOutput.size(4)}); - - for (int g = 0; g < group; g++) { - columns[g] = columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), - gradOutput[elt][g].flatten(1), 0.0f, 1.0f); - } - - columns = - columns.view({columns.size(0) * columns.size(1), columns.size(2)}); - gradOutput = gradOutput.view( - {gradOutput.size(0), gradOutput.size(1) * gradOutput.size(2), - gradOutput.size(3), gradOutput.size(4), gradOutput.size(5)}); - - deformable_col2im_coord(columns, input[elt], offset[elt], nInputPlane, - inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, - dilationH, dilationW, im2col_step, deformable_group, - gradOffset[elt]); - - deformable_col2im(columns, offset[elt], nInputPlane, inputHeight, - inputWidth, kH, kW, padH, padW, dH, dW, dilationH, - dilationW, im2col_step, deformable_group, gradInput[elt]); - } - - gradOutput.transpose_(1, 2); - gradOutput = - gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); - - gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); - input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); - gradOffset = gradOffset.view( - {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - offset = offset.view( - {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - if (batch == 0) { - gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); - input = input.view({nInputPlane, inputHeight, inputWidth}); - gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); - offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); - gradOffset = - gradOffset.view({offset.size(1), offset.size(2), offset.size(3)}); - } - - return 1; -} - -int deform_conv_backward_parameters_cuda( - at::Tensor input, at::Tensor offset, at::Tensor gradOutput, - at::Tensor gradWeight, // at::Tensor gradBias, - at::Tensor columns, at::Tensor ones, int kW, int kH, int dW, int dH, - int padW, int padH, int dilationW, int dilationH, int group, - int deformable_group, float scale, int im2col_step) { - // todo: transpose and reshape outGrad - // todo: reshape columns - // todo: add im2col_step as input - - shape_check(input, offset, &gradOutput, gradWeight, kH, kW, dH, dW, padH, - padW, dilationH, dilationW, group, deformable_group); - - input = input.contiguous(); - offset = offset.contiguous(); - gradOutput = gradOutput.contiguous(); - - int batch = 1; - - if (input.ndimension() == 3) { - // Force batch - batch = 0; - input = input.view( - at::IntList({1, input.size(0), input.size(1), input.size(2)})); - gradOutput = gradOutput.view( - {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); - } - - long batchSize = input.size(0); - long nInputPlane = input.size(1); - long inputHeight = input.size(2); - long inputWidth = input.size(3); - - long nOutputPlane = gradWeight.size(0); - - long outputWidth = - (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; - long outputHeight = - (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; - - AT_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); - - columns = at::zeros( - {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, - input.options()); - - gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, - nOutputPlane, outputHeight, outputWidth}); - gradOutput.transpose_(1, 2); - - at::Tensor gradOutputBuffer = at::zeros_like(gradOutput); - gradOutputBuffer = - gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, im2col_step, - outputHeight, outputWidth}); - gradOutputBuffer.copy_(gradOutput); - gradOutputBuffer = - gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, - im2col_step * outputHeight, outputWidth}); - - gradOutput.transpose_(1, 2); - gradOutput = - gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); - - input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, - inputHeight, inputWidth}); - offset = - offset.view({batchSize / im2col_step, im2col_step, - deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - for (int elt = 0; elt < batchSize / im2col_step; elt++) { - deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, - inputWidth, kH, kW, padH, padW, dH, dW, dilationH, - dilationW, im2col_step, deformable_group, columns); - - // divide into group - gradOutputBuffer = gradOutputBuffer.view( - {gradOutputBuffer.size(0), group, gradOutputBuffer.size(1) / group, - gradOutputBuffer.size(2), gradOutputBuffer.size(3)}); - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - gradWeight = - gradWeight.view({group, gradWeight.size(0) / group, gradWeight.size(1), - gradWeight.size(2), gradWeight.size(3)}); - - for (int g = 0; g < group; g++) { - gradWeight[g] = gradWeight[g] - .flatten(1) - .addmm_(gradOutputBuffer[elt][g].flatten(1), - columns[g].transpose(1, 0), 1.0, scale) - .view_as(gradWeight[g]); - } - gradOutputBuffer = gradOutputBuffer.view( - {gradOutputBuffer.size(0), - gradOutputBuffer.size(1) * gradOutputBuffer.size(2), - gradOutputBuffer.size(3), gradOutputBuffer.size(4)}); - columns = - columns.view({columns.size(0) * columns.size(1), columns.size(2)}); - gradWeight = gradWeight.view({gradWeight.size(0) * gradWeight.size(1), - gradWeight.size(2), gradWeight.size(3), - gradWeight.size(4)}); - } - - input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); - offset = offset.view( - {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); - - if (batch == 0) { - gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); - input = input.view({nInputPlane, inputHeight, inputWidth}); - } - - return 1; -} - -void modulated_deform_conv_cuda_forward( - at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, - at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns, - int kernel_h, int kernel_w, const int stride_h, const int stride_w, - const int pad_h, const int pad_w, const int dilation_h, - const int dilation_w, const int group, const int deformable_group, - const bool with_bias) { - AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); - AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); - - const int batch = input.size(0); - const int channels = input.size(1); - const int height = input.size(2); - const int width = input.size(3); - - const int channels_out = weight.size(0); - const int channels_kernel = weight.size(1); - const int kernel_h_ = weight.size(2); - const int kernel_w_ = weight.size(3); - - if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) - AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", - kernel_h_, kernel_w, kernel_h_, kernel_w_); - if (channels != channels_kernel * group) - AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", - channels, channels_kernel * group); - - const int height_out = - (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; - const int width_out = - (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; - - if (ones.ndimension() != 2 || - ones.size(0) * ones.size(1) < height_out * width_out) { - // Resize plane and fill with ones... - ones = at::ones({height_out, width_out}, input.options()); - } - - // resize output - output = output.view({batch, channels_out, height_out, width_out}).zero_(); - // resize temporary columns - columns = - at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out}, - input.options()); - - output = output.view({output.size(0), group, output.size(1) / group, - output.size(2), output.size(3)}); - - for (int b = 0; b < batch; b++) { - modulated_deformable_im2col_cuda( - input[b], offset[b], mask[b], 1, channels, height, width, height_out, - width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, deformable_group, columns); - - // divide into group - weight = weight.view({group, weight.size(0) / group, weight.size(1), - weight.size(2), weight.size(3)}); - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - - for (int g = 0; g < group; g++) { - output[b][g] = output[b][g] - .flatten(1) - .addmm_(weight[g].flatten(1), columns[g]) - .view_as(output[b][g]); - } - - weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), - weight.size(3), weight.size(4)}); - columns = - columns.view({columns.size(0) * columns.size(1), columns.size(2)}); - } - - output = output.view({output.size(0), output.size(1) * output.size(2), - output.size(3), output.size(4)}); - - if (with_bias) { - output += bias.view({1, bias.size(0), 1, 1}); - } -} - -void modulated_deform_conv_cuda_backward( - at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, - at::Tensor offset, at::Tensor mask, at::Tensor columns, - at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, - at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, - int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, - int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, - const bool with_bias) { - AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); - AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); - - const int batch = input.size(0); - const int channels = input.size(1); - const int height = input.size(2); - const int width = input.size(3); - - const int channels_kernel = weight.size(1); - const int kernel_h_ = weight.size(2); - const int kernel_w_ = weight.size(3); - if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) - AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", - kernel_h_, kernel_w, kernel_h_, kernel_w_); - if (channels != channels_kernel * group) - AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", - channels, channels_kernel * group); - - const int height_out = - (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; - const int width_out = - (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; - - if (ones.ndimension() != 2 || - ones.size(0) * ones.size(1) < height_out * width_out) { - // Resize plane and fill with ones... - ones = at::ones({height_out, width_out}, input.options()); - } - - grad_input = grad_input.view({batch, channels, height, width}); - columns = at::zeros({channels * kernel_h * kernel_w, height_out * width_out}, - input.options()); - - grad_output = - grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, - grad_output.size(2), grad_output.size(3)}); - - for (int b = 0; b < batch; b++) { - // divide int group - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - weight = weight.view({group, weight.size(0) / group, weight.size(1), - weight.size(2), weight.size(3)}); - - for (int g = 0; g < group; g++) { - columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), - grad_output[b][g].flatten(1), 0.0f, 1.0f); - } - - columns = - columns.view({columns.size(0) * columns.size(1), columns.size(2)}); - weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), - weight.size(3), weight.size(4)}); - - // gradient w.r.t. input coordinate data - modulated_deformable_col2im_coord_cuda( - columns, input[b], offset[b], mask[b], 1, channels, height, width, - height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, - stride_w, dilation_h, dilation_w, deformable_group, grad_offset[b], - grad_mask[b]); - // gradient w.r.t. input data - modulated_deformable_col2im_cuda( - columns, offset[b], mask[b], 1, channels, height, width, height_out, - width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, deformable_group, grad_input[b]); - - // gradient w.r.t. weight, dWeight should accumulate across the batch and - // group - modulated_deformable_im2col_cuda( - input[b], offset[b], mask[b], 1, channels, height, width, height_out, - width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, deformable_group, columns); - - columns = columns.view({group, columns.size(0) / group, columns.size(1)}); - grad_weight = grad_weight.view({group, grad_weight.size(0) / group, - grad_weight.size(1), grad_weight.size(2), - grad_weight.size(3)}); - if (with_bias) - grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); - - for (int g = 0; g < group; g++) { - grad_weight[g] = - grad_weight[g] - .flatten(1) - .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) - .view_as(grad_weight[g]); - if (with_bias) { - grad_bias[g] = - grad_bias[g] - .view({-1, 1}) - .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) - .view(-1); - } - } - - columns = - columns.view({columns.size(0) * columns.size(1), columns.size(2)}); - grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), - grad_weight.size(2), grad_weight.size(3), - grad_weight.size(4)}); - if (with_bias) - grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); - } - grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), - grad_output.size(2), grad_output.size(3), - grad_output.size(4)}); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("deform_conv_forward_cuda", &deform_conv_forward_cuda, - "deform forward (CUDA)"); - m.def("deform_conv_backward_input_cuda", &deform_conv_backward_input_cuda, - "deform_conv_backward_input (CUDA)"); - m.def("deform_conv_backward_parameters_cuda", - &deform_conv_backward_parameters_cuda, - "deform_conv_backward_parameters (CUDA)"); - m.def("modulated_deform_conv_cuda_forward", - &modulated_deform_conv_cuda_forward, - "modulated deform conv forward (CUDA)"); - m.def("modulated_deform_conv_cuda_backward", - &modulated_deform_conv_cuda_backward, - "modulated deform conv backward (CUDA)"); -} diff --git a/tracker/sot/lib/models/dcn/src/deform_conv_cuda_kernel.cu b/tracker/sot/lib/models/dcn/src/deform_conv_cuda_kernel.cu deleted file mode 100644 index 17a6e42..0000000 --- a/tracker/sot/lib/models/dcn/src/deform_conv_cuda_kernel.cu +++ /dev/null @@ -1,865 +0,0 @@ -/*! - ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** - * - * COPYRIGHT - * - * All contributions by the University of California: - * Copyright (c) 2014-2017 The Regents of the University of California (Regents) - * All rights reserved. - * - * All other contributions: - * Copyright (c) 2014-2017, the respective contributors - * All rights reserved. - * - * Caffe uses a shared copyright model: each contributor holds copyright over - * their contributions to Caffe. The project versioning records all such - * contribution and copyright details. If a contributor wants to further mark - * their specific copyright on a particular contribution, they should indicate - * their copyright solely in the commit message of the change when it is - * committed. - * - * LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * CONTRIBUTION AGREEMENT - * - * By contributing to the BVLC/caffe repository through pull-request, comment, - * or otherwise, the contributor releases their content to the - * license and copyright terms herein. - * - ***************** END Caffe Copyright Notice and Disclaimer ******************** - * - * Copyright (c) 2018 Microsoft - * Licensed under The MIT License [see LICENSE for details] - * \file modulated_deformable_im2col.cuh - * \brief Function definitions of converting an image to - * column matrix based on kernel, padding, dilation, and offset. - * These functions are mainly used in deformable convolution operators. - * \ref: https://arxiv.org/abs/1703.06211 - * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng - */ - - -#include -#include -#include -#include -#include - -using namespace at; - -#define CUDA_KERNEL_LOOP(i, n) \ - for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ - i += blockDim.x * gridDim.x) - -const int CUDA_NUM_THREADS = 1024; -const int kMaxGridNum = 65535; - -inline int GET_BLOCKS(const int N) -{ - return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); -} - -template -__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width, - const int height, const int width, scalar_t h, scalar_t w) -{ - - int h_low = floor(h); - int w_low = floor(w); - int h_high = h_low + 1; - int w_high = w_low + 1; - - scalar_t lh = h - h_low; - scalar_t lw = w - w_low; - scalar_t hh = 1 - lh, hw = 1 - lw; - - scalar_t v1 = 0; - if (h_low >= 0 && w_low >= 0) - v1 = bottom_data[h_low * data_width + w_low]; - scalar_t v2 = 0; - if (h_low >= 0 && w_high <= width - 1) - v2 = bottom_data[h_low * data_width + w_high]; - scalar_t v3 = 0; - if (h_high <= height - 1 && w_low >= 0) - v3 = bottom_data[h_high * data_width + w_low]; - scalar_t v4 = 0; - if (h_high <= height - 1 && w_high <= width - 1) - v4 = bottom_data[h_high * data_width + w_high]; - - scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; - - scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); - return val; -} - -template -__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, - const int h, const int w, const int height, const int width) -{ - - if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) - { - //empty - return 0; - } - - int argmax_h_low = floor(argmax_h); - int argmax_w_low = floor(argmax_w); - int argmax_h_high = argmax_h_low + 1; - int argmax_w_high = argmax_w_low + 1; - - scalar_t weight = 0; - if (h == argmax_h_low && w == argmax_w_low) - weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); - if (h == argmax_h_low && w == argmax_w_high) - weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); - if (h == argmax_h_high && w == argmax_w_low) - weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); - if (h == argmax_h_high && w == argmax_w_high) - weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); - return weight; -} - -template -__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, - const int height, const int width, const scalar_t *im_data, - const int data_width, const int bp_dir) -{ - - if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) - { - //empty - return 0; - } - - int argmax_h_low = floor(argmax_h); - int argmax_w_low = floor(argmax_w); - int argmax_h_high = argmax_h_low + 1; - int argmax_w_high = argmax_w_low + 1; - - scalar_t weight = 0; - - if (bp_dir == 0) - { - if (argmax_h_low >= 0 && argmax_w_low >= 0) - weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; - if (argmax_h_low >= 0 && argmax_w_high <= width - 1) - weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; - if (argmax_h_high <= height - 1 && argmax_w_low >= 0) - weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; - if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) - weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; - } - else if (bp_dir == 1) - { - if (argmax_h_low >= 0 && argmax_w_low >= 0) - weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; - if (argmax_h_low >= 0 && argmax_w_high <= width - 1) - weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; - if (argmax_h_high <= height - 1 && argmax_w_low >= 0) - weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; - if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) - weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; - } - - return weight; -} - -template -__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, const int channel_per_deformable_group, - const int batch_size, const int num_channels, const int deformable_group, - const int height_col, const int width_col, - scalar_t *data_col) -{ - CUDA_KERNEL_LOOP(index, n) - { - // index index of output matrix - const int w_col = index % width_col; - const int h_col = (index / width_col) % height_col; - const int b_col = (index / width_col / height_col) % batch_size; - const int c_im = (index / width_col / height_col) / batch_size; - const int c_col = c_im * kernel_h * kernel_w; - - // compute deformable group index - const int deformable_group_index = c_im / channel_per_deformable_group; - - const int h_in = h_col * stride_h - pad_h; - const int w_in = w_col * stride_w - pad_w; - scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; - //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; - const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; - const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; - - for (int i = 0; i < kernel_h; ++i) - { - for (int j = 0; j < kernel_w; ++j) - { - const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; - const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; - const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; - const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; - scalar_t val = static_cast(0); - const scalar_t h_im = h_in + i * dilation_h + offset_h; - const scalar_t w_im = w_in + j * dilation_w + offset_w; - if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) - { - //const scalar_t map_h = i * dilation_h + offset_h; - //const scalar_t map_w = j * dilation_w + offset_w; - //const int cur_height = height - h_in; - //const int cur_width = width - w_in; - //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); - val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); - } - *data_col_ptr = val; - data_col_ptr += batch_size * height_col * width_col; - } - } - } -} - -void deformable_im2col( - const at::Tensor data_im, const at::Tensor data_offset, const int channels, - const int height, const int width, const int ksize_h, const int ksize_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, const int parallel_imgs, - const int deformable_group, at::Tensor data_col) -{ - // num_axes should be smaller than block size - // todo: check parallel_imgs is correctly passed in - int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; - int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; - int num_kernels = channels * height_col * width_col * parallel_imgs; - int channel_per_deformable_group = channels / deformable_group; - - AT_DISPATCH_FLOATING_TYPES_AND_HALF( - data_im.scalar_type(), "deformable_im2col_gpu", ([&] { - const scalar_t *data_im_ = data_im.data(); - const scalar_t *data_offset_ = data_offset.data(); - scalar_t *data_col_ = data_col.data(); - - deformable_im2col_gpu_kernel<<>>( - num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, - pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, - channel_per_deformable_group, parallel_imgs, channels, deformable_group, - height_col, width_col, data_col_); - })); - - cudaError_t err = cudaGetLastError(); - if (err != cudaSuccess) - { - printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); - } -} - -template -__global__ void deformable_col2im_gpu_kernel( - const int n, const scalar_t *data_col, const scalar_t *data_offset, - const int channels, const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int channel_per_deformable_group, - const int batch_size, const int deformable_group, - const int height_col, const int width_col, - scalar_t *grad_im) -{ - CUDA_KERNEL_LOOP(index, n) - { - const int j = (index / width_col / height_col / batch_size) % kernel_w; - const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; - const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; - // compute the start and end of the output - - const int deformable_group_index = c / channel_per_deformable_group; - - int w_out = index % width_col; - int h_out = (index / width_col) % height_col; - int b = (index / width_col / height_col) % batch_size; - int w_in = w_out * stride_w - pad_w; - int h_in = h_out * stride_h - pad_h; - - const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * - 2 * kernel_h * kernel_w * height_col * width_col; - const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; - const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; - const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; - const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; - const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; - const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; - - const scalar_t cur_top_grad = data_col[index]; - const int cur_h = (int)cur_inv_h_data; - const int cur_w = (int)cur_inv_w_data; - for (int dy = -2; dy <= 2; dy++) - { - for (int dx = -2; dx <= 2; dx++) - { - if (cur_h + dy >= 0 && cur_h + dy < height && - cur_w + dx >= 0 && cur_w + dx < width && - abs(cur_inv_h_data - (cur_h + dy)) < 1 && - abs(cur_inv_w_data - (cur_w + dx)) < 1) - { - int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; - scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); - atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); - } - } - } - } -} - -void deformable_col2im( - const at::Tensor data_col, const at::Tensor data_offset, const int channels, - const int height, const int width, const int ksize_h, - const int ksize_w, const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int parallel_imgs, const int deformable_group, - at::Tensor grad_im) -{ - - // todo: make sure parallel_imgs is passed in correctly - int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; - int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; - int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; - int channel_per_deformable_group = channels / deformable_group; - - AT_DISPATCH_FLOATING_TYPES_AND_HALF( - data_col.scalar_type(), "deformable_col2im_gpu", ([&] { - const scalar_t *data_col_ = data_col.data(); - const scalar_t *data_offset_ = data_offset.data(); - scalar_t *grad_im_ = grad_im.data(); - - deformable_col2im_gpu_kernel<<>>( - num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, - ksize_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, channel_per_deformable_group, - parallel_imgs, deformable_group, height_col, width_col, grad_im_); - })); - - cudaError_t err = cudaGetLastError(); - if (err != cudaSuccess) - { - printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); - } -} - -template -__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, - const scalar_t *data_im, const scalar_t *data_offset, - const int channels, const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int channel_per_deformable_group, - const int batch_size, const int offset_channels, const int deformable_group, - const int height_col, const int width_col, scalar_t *grad_offset) -{ - CUDA_KERNEL_LOOP(index, n) - { - scalar_t val = 0; - int w = index % width_col; - int h = (index / width_col) % height_col; - int c = (index / width_col / height_col) % offset_channels; - int b = (index / width_col / height_col) / offset_channels; - // compute the start and end of the output - - const int deformable_group_index = c / (2 * kernel_h * kernel_w); - const int col_step = kernel_h * kernel_w; - int cnt = 0; - const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * - batch_size * width_col * height_col; - const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * - channel_per_deformable_group / kernel_h / kernel_w * height * width; - const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * - kernel_h * kernel_w * height_col * width_col; - - const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; - - for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) - { - const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; - const int bp_dir = offset_c % 2; - - int j = (col_pos / width_col / height_col / batch_size) % kernel_w; - int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; - int w_out = col_pos % width_col; - int h_out = (col_pos / width_col) % height_col; - int w_in = w_out * stride_w - pad_w; - int h_in = h_out * stride_h - pad_h; - const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); - const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); - const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; - const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; - scalar_t inv_h = h_in + i * dilation_h + offset_h; - scalar_t inv_w = w_in + j * dilation_w + offset_w; - if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) - { - inv_h = inv_w = -2; - } - const scalar_t weight = get_coordinate_weight( - inv_h, inv_w, - height, width, data_im_ptr + cnt * height * width, width, bp_dir); - val += weight * data_col_ptr[col_pos]; - cnt += 1; - } - - grad_offset[index] = val; - } -} - -void deformable_col2im_coord( - const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, - const int channels, const int height, const int width, const int ksize_h, - const int ksize_w, const int pad_h, const int pad_w, const int stride_h, - const int stride_w, const int dilation_h, const int dilation_w, - const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) -{ - - int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; - int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; - int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; - int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; - - AT_DISPATCH_FLOATING_TYPES_AND_HALF( - data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { - const scalar_t *data_col_ = data_col.data(); - const scalar_t *data_im_ = data_im.data(); - const scalar_t *data_offset_ = data_offset.data(); - scalar_t *grad_offset_ = grad_offset.data(); - - deformable_col2im_coord_gpu_kernel<<>>( - num_kernels, data_col_, data_im_, data_offset_, channels, height, width, - ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, channel_per_deformable_group, - parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, - height_col, width_col, grad_offset_); - })); -} - -template -__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, - const int height, const int width, scalar_t h, scalar_t w) -{ - int h_low = floor(h); - int w_low = floor(w); - int h_high = h_low + 1; - int w_high = w_low + 1; - - scalar_t lh = h - h_low; - scalar_t lw = w - w_low; - scalar_t hh = 1 - lh, hw = 1 - lw; - - scalar_t v1 = 0; - if (h_low >= 0 && w_low >= 0) - v1 = bottom_data[h_low * data_width + w_low]; - scalar_t v2 = 0; - if (h_low >= 0 && w_high <= width - 1) - v2 = bottom_data[h_low * data_width + w_high]; - scalar_t v3 = 0; - if (h_high <= height - 1 && w_low >= 0) - v3 = bottom_data[h_high * data_width + w_low]; - scalar_t v4 = 0; - if (h_high <= height - 1 && w_high <= width - 1) - v4 = bottom_data[h_high * data_width + w_high]; - - scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; - - scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); - return val; -} - -template -__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, - const int h, const int w, const int height, const int width) -{ - if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) - { - //empty - return 0; - } - - int argmax_h_low = floor(argmax_h); - int argmax_w_low = floor(argmax_w); - int argmax_h_high = argmax_h_low + 1; - int argmax_w_high = argmax_w_low + 1; - - scalar_t weight = 0; - if (h == argmax_h_low && w == argmax_w_low) - weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); - if (h == argmax_h_low && w == argmax_w_high) - weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); - if (h == argmax_h_high && w == argmax_w_low) - weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); - if (h == argmax_h_high && w == argmax_w_high) - weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); - return weight; -} - -template -__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, - const int height, const int width, const scalar_t *im_data, - const int data_width, const int bp_dir) -{ - if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) - { - //empty - return 0; - } - - int argmax_h_low = floor(argmax_h); - int argmax_w_low = floor(argmax_w); - int argmax_h_high = argmax_h_low + 1; - int argmax_w_high = argmax_w_low + 1; - - scalar_t weight = 0; - - if (bp_dir == 0) - { - if (argmax_h_low >= 0 && argmax_w_low >= 0) - weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; - if (argmax_h_low >= 0 && argmax_w_high <= width - 1) - weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; - if (argmax_h_high <= height - 1 && argmax_w_low >= 0) - weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; - if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) - weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; - } - else if (bp_dir == 1) - { - if (argmax_h_low >= 0 && argmax_w_low >= 0) - weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; - if (argmax_h_low >= 0 && argmax_w_high <= width - 1) - weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; - if (argmax_h_high <= height - 1 && argmax_w_low >= 0) - weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; - if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) - weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; - } - - return weight; -} - -template -__global__ void modulated_deformable_im2col_gpu_kernel(const int n, - const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int channel_per_deformable_group, - const int batch_size, const int num_channels, const int deformable_group, - const int height_col, const int width_col, - scalar_t *data_col) -{ - CUDA_KERNEL_LOOP(index, n) - { - // index index of output matrix - const int w_col = index % width_col; - const int h_col = (index / width_col) % height_col; - const int b_col = (index / width_col / height_col) % batch_size; - const int c_im = (index / width_col / height_col) / batch_size; - const int c_col = c_im * kernel_h * kernel_w; - - // compute deformable group index - const int deformable_group_index = c_im / channel_per_deformable_group; - - const int h_in = h_col * stride_h - pad_h; - const int w_in = w_col * stride_w - pad_w; - - scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; - //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; - const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; - const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; - - const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; - - for (int i = 0; i < kernel_h; ++i) - { - for (int j = 0; j < kernel_w; ++j) - { - const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; - const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; - const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; - const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; - const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; - const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; - scalar_t val = static_cast(0); - const scalar_t h_im = h_in + i * dilation_h + offset_h; - const scalar_t w_im = w_in + j * dilation_w + offset_w; - //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { - if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) - { - //const float map_h = i * dilation_h + offset_h; - //const float map_w = j * dilation_w + offset_w; - //const int cur_height = height - h_in; - //const int cur_width = width - w_in; - //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); - val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); - } - *data_col_ptr = val * mask; - data_col_ptr += batch_size * height_col * width_col; - //data_col_ptr += height_col * width_col; - } - } - } -} - -template -__global__ void modulated_deformable_col2im_gpu_kernel(const int n, - const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask, - const int channels, const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int channel_per_deformable_group, - const int batch_size, const int deformable_group, - const int height_col, const int width_col, - scalar_t *grad_im) -{ - CUDA_KERNEL_LOOP(index, n) - { - const int j = (index / width_col / height_col / batch_size) % kernel_w; - const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; - const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; - // compute the start and end of the output - - const int deformable_group_index = c / channel_per_deformable_group; - - int w_out = index % width_col; - int h_out = (index / width_col) % height_col; - int b = (index / width_col / height_col) % batch_size; - int w_in = w_out * stride_w - pad_w; - int h_in = h_out * stride_h - pad_h; - - const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; - const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; - const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; - const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; - const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; - const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; - const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; - const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; - const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; - const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; - - const scalar_t cur_top_grad = data_col[index] * mask; - const int cur_h = (int)cur_inv_h_data; - const int cur_w = (int)cur_inv_w_data; - for (int dy = -2; dy <= 2; dy++) - { - for (int dx = -2; dx <= 2; dx++) - { - if (cur_h + dy >= 0 && cur_h + dy < height && - cur_w + dx >= 0 && cur_w + dx < width && - abs(cur_inv_h_data - (cur_h + dy)) < 1 && - abs(cur_inv_w_data - (cur_w + dx)) < 1) - { - int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; - scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); - atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); - } - } - } - } -} - -template -__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, - const scalar_t *data_col, const scalar_t *data_im, - const scalar_t *data_offset, const scalar_t *data_mask, - const int channels, const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int channel_per_deformable_group, - const int batch_size, const int offset_channels, const int deformable_group, - const int height_col, const int width_col, - scalar_t *grad_offset, scalar_t *grad_mask) -{ - CUDA_KERNEL_LOOP(index, n) - { - scalar_t val = 0, mval = 0; - int w = index % width_col; - int h = (index / width_col) % height_col; - int c = (index / width_col / height_col) % offset_channels; - int b = (index / width_col / height_col) / offset_channels; - // compute the start and end of the output - - const int deformable_group_index = c / (2 * kernel_h * kernel_w); - const int col_step = kernel_h * kernel_w; - int cnt = 0; - const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; - const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; - const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; - const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; - - const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; - - for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) - { - const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; - const int bp_dir = offset_c % 2; - - int j = (col_pos / width_col / height_col / batch_size) % kernel_w; - int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; - int w_out = col_pos % width_col; - int h_out = (col_pos / width_col) % height_col; - int w_in = w_out * stride_w - pad_w; - int h_in = h_out * stride_h - pad_h; - const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); - const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); - const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); - const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; - const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; - const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; - scalar_t inv_h = h_in + i * dilation_h + offset_h; - scalar_t inv_w = w_in + j * dilation_w + offset_w; - if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) - { - inv_h = inv_w = -2; - } - else - { - mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); - } - const scalar_t weight = dmcn_get_coordinate_weight( - inv_h, inv_w, - height, width, data_im_ptr + cnt * height * width, width, bp_dir); - val += weight * data_col_ptr[col_pos] * mask; - cnt += 1; - } - // KERNEL_ASSIGN(grad_offset[index], offset_req, val); - grad_offset[index] = val; - if (offset_c % 2 == 0) - // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); - grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; - } -} - -void modulated_deformable_im2col_cuda( - const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, - const int batch_size, const int channels, const int height_im, const int width_im, - const int height_col, const int width_col, const int kernel_h, const int kenerl_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int deformable_group, at::Tensor data_col) -{ - // num_axes should be smaller than block size - const int channel_per_deformable_group = channels / deformable_group; - const int num_kernels = channels * batch_size * height_col * width_col; - - AT_DISPATCH_FLOATING_TYPES_AND_HALF( - data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { - const scalar_t *data_im_ = data_im.data(); - const scalar_t *data_offset_ = data_offset.data(); - const scalar_t *data_mask_ = data_mask.data(); - scalar_t *data_col_ = data_col.data(); - - modulated_deformable_im2col_gpu_kernel<<>>( - num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, - pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, - batch_size, channels, deformable_group, height_col, width_col, data_col_); - })); - - cudaError_t err = cudaGetLastError(); - if (err != cudaSuccess) - { - printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); - } -} - -void modulated_deformable_col2im_cuda( - const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, - const int batch_size, const int channels, const int height_im, const int width_im, - const int height_col, const int width_col, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int deformable_group, at::Tensor grad_im) -{ - - const int channel_per_deformable_group = channels / deformable_group; - const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; - - AT_DISPATCH_FLOATING_TYPES_AND_HALF( - data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { - const scalar_t *data_col_ = data_col.data(); - const scalar_t *data_offset_ = data_offset.data(); - const scalar_t *data_mask_ = data_mask.data(); - scalar_t *grad_im_ = grad_im.data(); - - modulated_deformable_col2im_gpu_kernel<<>>( - num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, - kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, - dilation_h, dilation_w, channel_per_deformable_group, - batch_size, deformable_group, height_col, width_col, grad_im_); - })); - - cudaError_t err = cudaGetLastError(); - if (err != cudaSuccess) - { - printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); - } -} - -void modulated_deformable_col2im_coord_cuda( - const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, - const int batch_size, const int channels, const int height_im, const int width_im, - const int height_col, const int width_col, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int deformable_group, - at::Tensor grad_offset, at::Tensor grad_mask) -{ - const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; - const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; - - AT_DISPATCH_FLOATING_TYPES_AND_HALF( - data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { - const scalar_t *data_col_ = data_col.data(); - const scalar_t *data_im_ = data_im.data(); - const scalar_t *data_offset_ = data_offset.data(); - const scalar_t *data_mask_ = data_mask.data(); - scalar_t *grad_offset_ = grad_offset.data(); - scalar_t *grad_mask_ = grad_mask.data(); - - modulated_deformable_col2im_coord_gpu_kernel<<>>( - num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, - kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, channel_per_deformable_group, - batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, - grad_offset_, grad_mask_); - })); - cudaError_t err = cudaGetLastError(); - if (err != cudaSuccess) - { - printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); - } -} diff --git a/tracker/sot/lib/models/dcn/src/deform_pool_cuda.cpp b/tracker/sot/lib/models/dcn/src/deform_pool_cuda.cpp deleted file mode 100644 index b2efdf9..0000000 --- a/tracker/sot/lib/models/dcn/src/deform_pool_cuda.cpp +++ /dev/null @@ -1,81 +0,0 @@ - -#include - -#include -#include - -void DeformablePSROIPoolForward( - const at::Tensor data, const at::Tensor bbox, const at::Tensor trans, - at::Tensor out, at::Tensor top_count, const int batch, const int channels, - const int height, const int width, const int num_bbox, - const int channels_trans, const int no_trans, const float spatial_scale, - const int output_dim, const int group_size, const int pooled_size, - const int part_size, const int sample_per_part, const float trans_std); - -void DeformablePSROIPoolBackwardAcc( - const at::Tensor out_grad, const at::Tensor data, const at::Tensor bbox, - const at::Tensor trans, const at::Tensor top_count, at::Tensor in_grad, - at::Tensor trans_grad, const int batch, const int channels, - const int height, const int width, const int num_bbox, - const int channels_trans, const int no_trans, const float spatial_scale, - const int output_dim, const int group_size, const int pooled_size, - const int part_size, const int sample_per_part, const float trans_std); - -void deform_psroi_pooling_cuda_forward( - at::Tensor input, at::Tensor bbox, at::Tensor trans, at::Tensor out, - at::Tensor top_count, const int no_trans, const float spatial_scale, - const int output_dim, const int group_size, const int pooled_size, - const int part_size, const int sample_per_part, const float trans_std) { - AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); - - const int batch = input.size(0); - const int channels = input.size(1); - const int height = input.size(2); - const int width = input.size(3); - const int channels_trans = no_trans ? 2 : trans.size(1); - - const int num_bbox = bbox.size(0); - if (num_bbox != out.size(0)) - AT_ERROR("Output shape and bbox number wont match: (%d vs %d).", - out.size(0), num_bbox); - - DeformablePSROIPoolForward( - input, bbox, trans, out, top_count, batch, channels, height, width, - num_bbox, channels_trans, no_trans, spatial_scale, output_dim, group_size, - pooled_size, part_size, sample_per_part, trans_std); -} - -void deform_psroi_pooling_cuda_backward( - at::Tensor out_grad, at::Tensor input, at::Tensor bbox, at::Tensor trans, - at::Tensor top_count, at::Tensor input_grad, at::Tensor trans_grad, - const int no_trans, const float spatial_scale, const int output_dim, - const int group_size, const int pooled_size, const int part_size, - const int sample_per_part, const float trans_std) { - AT_CHECK(out_grad.is_contiguous(), "out_grad tensor has to be contiguous"); - AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); - - const int batch = input.size(0); - const int channels = input.size(1); - const int height = input.size(2); - const int width = input.size(3); - const int channels_trans = no_trans ? 2 : trans.size(1); - - const int num_bbox = bbox.size(0); - if (num_bbox != out_grad.size(0)) - AT_ERROR("Output shape and bbox number wont match: (%d vs %d).", - out_grad.size(0), num_bbox); - - DeformablePSROIPoolBackwardAcc( - out_grad, input, bbox, trans, top_count, input_grad, trans_grad, batch, - channels, height, width, num_bbox, channels_trans, no_trans, - spatial_scale, output_dim, group_size, pooled_size, part_size, - sample_per_part, trans_std); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("deform_psroi_pooling_cuda_forward", &deform_psroi_pooling_cuda_forward, - "deform psroi pooling forward(CUDA)"); - m.def("deform_psroi_pooling_cuda_backward", - &deform_psroi_pooling_cuda_backward, - "deform psroi pooling backward(CUDA)"); -} \ No newline at end of file diff --git a/tracker/sot/lib/models/dcn/src/deform_pool_cuda_kernel.cu b/tracker/sot/lib/models/dcn/src/deform_pool_cuda_kernel.cu deleted file mode 100644 index cb7c835..0000000 --- a/tracker/sot/lib/models/dcn/src/deform_pool_cuda_kernel.cu +++ /dev/null @@ -1,363 +0,0 @@ -/*! - * Copyright (c) 2017 Microsoft - * Licensed under The MIT License [see LICENSE for details] - * \file deformable_psroi_pooling.cu - * \brief - * \author Yi Li, Guodong Zhang, Jifeng Dai -*/ -/***************** Adapted by Charles Shang *********************/ - -#include -#include -#include -#include -#include - -using namespace at; - -#define CUDA_KERNEL_LOOP(i, n) \ - for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ - i < (n); \ - i += blockDim.x * gridDim.x) - -const int CUDA_NUM_THREADS = 1024; -inline int GET_BLOCKS(const int N) -{ - return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; -} - -template -__device__ scalar_t bilinear_interp( - const scalar_t *data, - const scalar_t x, - const scalar_t y, - const int width, - const int height) -{ - int x1 = floor(x); - int x2 = ceil(x); - int y1 = floor(y); - int y2 = ceil(y); - scalar_t dist_x = (scalar_t)(x - x1); - scalar_t dist_y = (scalar_t)(y - y1); - scalar_t value11 = data[y1 * width + x1]; - scalar_t value12 = data[y2 * width + x1]; - scalar_t value21 = data[y1 * width + x2]; - scalar_t value22 = data[y2 * width + x2]; - scalar_t value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; - return value; -} - -template -__global__ void DeformablePSROIPoolForwardKernel( - const int count, - const scalar_t *bottom_data, - const scalar_t spatial_scale, - const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const scalar_t *bottom_rois, const scalar_t *bottom_trans, - const int no_trans, - const scalar_t trans_std, - const int sample_per_part, - const int output_dim, - const int group_size, - const int part_size, - const int num_classes, - const int channels_each_class, - scalar_t *top_data, - scalar_t *top_count) -{ - CUDA_KERNEL_LOOP(index, count) - { - // The output is in order (n, ctop, ph, pw) - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int ctop = (index / pooled_width / pooled_height) % output_dim; - int n = index / pooled_width / pooled_height / output_dim; - - // [start, end) interval for spatial sampling - const scalar_t *offset_bottom_rois = bottom_rois + n * 5; - int roi_batch_ind = offset_bottom_rois[0]; - scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; - scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; - scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; - scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; - - // Force too small ROIs to be 1x1 - scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 - scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); - - // Compute w and h at bottom - scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); - scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); - - scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); - scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); - - int part_h = floor((scalar_t)(ph) / pooled_height * part_size); - int part_w = floor((scalar_t)(pw) / pooled_width * part_size); - int class_id = ctop / channels_each_class; - scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; - scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; - - scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; - wstart += trans_x * roi_width; - scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; - hstart += trans_y * roi_height; - - scalar_t sum = 0; - int count = 0; - int gw = floor((scalar_t)(pw)*group_size / pooled_width); - int gh = floor((scalar_t)(ph)*group_size / pooled_height); - gw = min(max(gw, 0), group_size - 1); - gh = min(max(gh, 0), group_size - 1); - - const scalar_t *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; - for (int ih = 0; ih < sample_per_part; ih++) - { - for (int iw = 0; iw < sample_per_part; iw++) - { - scalar_t w = wstart + iw * sub_bin_size_w; - scalar_t h = hstart + ih * sub_bin_size_h; - // bilinear interpolation - if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) - { - continue; - } - w = min(max(w, 0.), width - 1.); - h = min(max(h, 0.), height - 1.); - int c = (ctop * group_size + gh) * group_size + gw; - scalar_t val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height); - sum += val; - count++; - } - } - top_data[index] = count == 0 ? (scalar_t)(0) : sum / count; - top_count[index] = count; - } -} - -template -__global__ void DeformablePSROIPoolBackwardAccKernel( - const int count, - const scalar_t *top_diff, - const scalar_t *top_count, - const int num_rois, - const scalar_t spatial_scale, - const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int output_dim, - scalar_t *bottom_data_diff, scalar_t *bottom_trans_diff, - const scalar_t *bottom_data, - const scalar_t *bottom_rois, - const scalar_t *bottom_trans, - const int no_trans, - const scalar_t trans_std, - const int sample_per_part, - const int group_size, - const int part_size, - const int num_classes, - const int channels_each_class) -{ - CUDA_KERNEL_LOOP(index, count) - { - // The output is in order (n, ctop, ph, pw) - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int ctop = (index / pooled_width / pooled_height) % output_dim; - int n = index / pooled_width / pooled_height / output_dim; - - // [start, end) interval for spatial sampling - const scalar_t *offset_bottom_rois = bottom_rois + n * 5; - int roi_batch_ind = offset_bottom_rois[0]; - scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; - scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; - scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; - scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; - - // Force too small ROIs to be 1x1 - scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 - scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); - - // Compute w and h at bottom - scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); - scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); - - scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); - scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); - - int part_h = floor((scalar_t)(ph) / pooled_height * part_size); - int part_w = floor((scalar_t)(pw) / pooled_width * part_size); - int class_id = ctop / channels_each_class; - scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; - scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; - - scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; - wstart += trans_x * roi_width; - scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; - hstart += trans_y * roi_height; - - if (top_count[index] <= 0) - { - continue; - } - scalar_t diff_val = top_diff[index] / top_count[index]; - const scalar_t *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; - scalar_t *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; - int gw = floor((scalar_t)(pw)*group_size / pooled_width); - int gh = floor((scalar_t)(ph)*group_size / pooled_height); - gw = min(max(gw, 0), group_size - 1); - gh = min(max(gh, 0), group_size - 1); - - for (int ih = 0; ih < sample_per_part; ih++) - { - for (int iw = 0; iw < sample_per_part; iw++) - { - scalar_t w = wstart + iw * sub_bin_size_w; - scalar_t h = hstart + ih * sub_bin_size_h; - // bilinear interpolation - if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) - { - continue; - } - w = min(max(w, 0.), width - 1.); - h = min(max(h, 0.), height - 1.); - int c = (ctop * group_size + gh) * group_size + gw; - // backward on feature - int x0 = floor(w); - int x1 = ceil(w); - int y0 = floor(h); - int y1 = ceil(h); - scalar_t dist_x = w - x0, dist_y = h - y0; - scalar_t q00 = (1 - dist_x) * (1 - dist_y); - scalar_t q01 = (1 - dist_x) * dist_y; - scalar_t q10 = dist_x * (1 - dist_y); - scalar_t q11 = dist_x * dist_y; - int bottom_index_base = c * height * width; - atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val); - atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val); - atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val); - atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val); - - if (no_trans) - { - continue; - } - scalar_t U00 = offset_bottom_data[bottom_index_base + y0 * width + x0]; - scalar_t U01 = offset_bottom_data[bottom_index_base + y1 * width + x0]; - scalar_t U10 = offset_bottom_data[bottom_index_base + y0 * width + x1]; - scalar_t U11 = offset_bottom_data[bottom_index_base + y1 * width + x1]; - scalar_t diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val; - diff_x *= roi_width; - scalar_t diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val; - diff_y *= roi_height; - - atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); - atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); - } - } - } -} - -void DeformablePSROIPoolForward(const at::Tensor data, - const at::Tensor bbox, - const at::Tensor trans, - at::Tensor out, - at::Tensor top_count, - const int batch, - const int channels, - const int height, - const int width, - const int num_bbox, - const int channels_trans, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std) -{ - const int pooled_height = pooled_size; - const int pooled_width = pooled_size; - const int count = num_bbox * output_dim * pooled_height * pooled_width; - const int num_classes = no_trans ? 1 : channels_trans / 2; - const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; - - AT_DISPATCH_FLOATING_TYPES_AND_HALF( - data.scalar_type(), "deformable_psroi_pool_forward", ([&] { - const scalar_t *bottom_data = data.data(); - const scalar_t *bottom_rois = bbox.data(); - const scalar_t *bottom_trans = no_trans ? NULL : trans.data(); - scalar_t *top_data = out.data(); - scalar_t *top_count_data = top_count.data(); - - DeformablePSROIPoolForwardKernel<<>>( - count, bottom_data, (scalar_t)spatial_scale, channels, height, width, pooled_height, pooled_width, - bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, output_dim, - group_size, part_size, num_classes, channels_each_class, top_data, top_count_data); - })); - - cudaError_t err = cudaGetLastError(); - if (err != cudaSuccess) - { - printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); - } -} - -void DeformablePSROIPoolBackwardAcc(const at::Tensor out_grad, - const at::Tensor data, - const at::Tensor bbox, - const at::Tensor trans, - const at::Tensor top_count, - at::Tensor in_grad, - at::Tensor trans_grad, - const int batch, - const int channels, - const int height, - const int width, - const int num_bbox, - const int channels_trans, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std) -{ - // LOG(INFO) << "DeformablePSROIPoolBackward"; - const int num_rois = num_bbox; - const int pooled_height = pooled_size; - const int pooled_width = pooled_size; - const int count = num_bbox * output_dim * pooled_height * pooled_width; - const int num_classes = no_trans ? 1 : channels_trans / 2; - const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; - - AT_DISPATCH_FLOATING_TYPES_AND_HALF( - out_grad.scalar_type(), "deformable_psroi_pool_backward_acc", ([&] { - const scalar_t *top_diff = out_grad.data(); - const scalar_t *bottom_data = data.data(); - const scalar_t *bottom_rois = bbox.data(); - const scalar_t *bottom_trans = no_trans ? NULL : trans.data(); - scalar_t *bottom_data_diff = in_grad.data(); - scalar_t *bottom_trans_diff = no_trans ? NULL : trans_grad.data(); - const scalar_t *top_count_data = top_count.data(); - - DeformablePSROIPoolBackwardAccKernel<<>>( - count, top_diff, top_count_data, num_rois, (scalar_t)spatial_scale, channels, height, width, - pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff, - bottom_data, bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, - group_size, part_size, num_classes, channels_each_class); - })); - - cudaError_t err = cudaGetLastError(); - if (err != cudaSuccess) - { - printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); - } -} \ No newline at end of file diff --git a/tracker/sot/lib/models/mask.py b/tracker/sot/lib/models/mask.py deleted file mode 100644 index 59a327c..0000000 --- a/tracker/sot/lib/models/mask.py +++ /dev/null @@ -1,342 +0,0 @@ -import math -import torch -import torch.nn as nn -import torch.nn.functional as F -from .connect import xcorr_depthwise - -class ARN(nn.Module): - """ - Attention Retrieval Network in Ocean+ - """ - def __init__(self, inchannels=256, outchannels=256): - super(ARN, self).__init__() - self.s_embed = nn.Conv2d(inchannels, outchannels, 1) # embedding for search feature - self.t_embed = nn.Conv2d(inchannels, outchannels, 1) # embeeding for template feature - - def forward(self, xf, zf, zf_mask): - # xf: [B, C, H, W] - # zf: [B, C, H, W] - # zf_mask: [B, H, W] - # pdb.set_trace() - xf = self.s_embed(xf) - zf = self.t_embed(zf) - - B, C, Hx, Wx = xf.size() - B, C, Hz, Wz = zf.size() - - xf = xf.permute(0, 2, 3, 1).contiguous() # [B, H, W, C] - xf = xf.view(B, -1, C) # [B, H*W, C] - zf = zf.view(B, C, -1) # [B, C, H*W] - - att = torch.matmul(xf, zf) # [HW, HW] - att = att / math.sqrt(C) - att = F.softmax(att, dim=-1) # [HW, HW] - zf_mask = nn.Upsample(size=(Hz, Wz), mode='bilinear', align_corners=True)(zf_mask.unsqueeze(1)) - # zf_mask = (zf_mask > 0.5).float() - zf_mask = zf_mask.view(B, -1, 1) - - arn = torch.matmul(att, zf_mask) # [B, H*W] - arn = arn.view(B, Hx, Hx).unsqueeze(1) - return arn - -class MSS(nn.Module): - """ - Multi-resolution Single-stage Segmentation (fast, used for VOT-RT) - """ - - def __init__(self): - super(MSS, self).__init__() - # BACKBONE - self.b4 = nn.Sequential(nn.Conv2d(256, 64, 3, padding=1), nn.ReLU()) - self.b3 = nn.Sequential(nn.Conv2d(512, 32, 3, padding=1), nn.ReLU()) - self.b2 = nn.Sequential(nn.Conv2d(256, 16, 3, padding=1), nn.ReLU()) - self.b1 = nn.Sequential(nn.Conv2d(64, 4, 3, padding=1), nn.ReLU()) - self.b0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1), nn.ReLU()) - - # REFINE - self.rCo = nn.Sequential(nn.Conv2d(256, 64, 3, padding=1), nn.ReLU()) - self.r3 = nn.Sequential(nn.Conv2d(64, 32, 3, padding=1), nn.ReLU()) - self.r2 = nn.Sequential(nn.Conv2d(32, 16, 3, padding=1), nn.ReLU()) - self.r1 = nn.Sequential(nn.Conv2d(16, 4, 3, padding=1), nn.ReLU()) - self.r0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) - - # multi refine layer - self.m3 = nn.Sequential(nn.Conv2d(32, 2, 3, padding=1)) # stride = 8 - self.m2 = nn.Sequential(nn.Conv2d(16, 2, 3, padding=1)) # stride = 4 - self.m1 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) # stride = 2 - - self.multi_loss = True - - # for sequential - self.sequential = ARN(256, 64) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_uniform_(m.weight, a=1) - - def forward(self, features, input_size=None, zf_ori=None, template_mask=None): - b1, b2, b3, b4, corr = features - - b4_size = b4.size()[2:] - b3_size = b3.size()[2:] - b2_size = b2.size()[2:] - b1_size = b1.size()[2:] - if input_size is None: input_size = (255, 255) - - # prepare for sequential model - arn = self.sequential(b4, zf_ori, template_mask) # [B, H, W] - arn = torch.clamp(arn, 0, 1) - b4 = b4 + arn - - corr = nn.Upsample(size=b4_size, mode='bilinear', align_corners=True)(corr) - r4 = self.rCo(corr) + self.b4(b4) - - r4 = nn.Upsample(size=b3_size, mode='bilinear', align_corners=True)(r4) - r3 = self.r3(r4) + self.b3(b3) - - # r3up + b2 - r3 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(r3) - r2 = self.r2(r3) + self.b2(b2) - - - # r2up + b1 - r2 = nn.Upsample(size=b1_size, mode='bilinear', align_corners=True)(r2) - r1 = self.r1(r2) + self.b1(b1) - - # r1 up - r1 = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)(r1) - mask = self.r0(r1) - - return mask - -class MMS(nn.Module): - def __init__(self): - """ - Multi-resolution Multi-stage Segmentation (suitable for VOS) - """ - super(MMS, self).__init__() - # BACKBONE - self.b4 = nn.Sequential(nn.Conv2d(256, 64, 3, padding=1), nn.ReLU()) - self.b3 = nn.Sequential(nn.Conv2d(512, 32, 3, padding=1), nn.ReLU()) - self.b2 = nn.Sequential(nn.Conv2d(256, 16, 3, padding=1), nn.ReLU()) - self.b1 = nn.Sequential(nn.Conv2d(64, 4, 3, padding=1), nn.ReLU()) - self.b0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1), nn.ReLU()) - - # REFINE - self.rCo = nn.Sequential(nn.Conv2d(256, 64, 3, padding=1), nn.ReLU()) - self.r3 = nn.Sequential(nn.Conv2d(64, 32, 3, padding=1), nn.ReLU()) - self.r2 = nn.Sequential(nn.Conv2d(32, 16, 3, padding=1), nn.ReLU()) - self.r1 = nn.Sequential(nn.Conv2d(16, 4, 3, padding=1), nn.ReLU()) - self.r0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) - - # being used in training (not inference) - self.m3 = nn.Sequential(nn.Conv2d(32, 2, 3, padding=1)) # stride = 8 - self.m2 = nn.Sequential(nn.Conv2d(16, 2, 3, padding=1)) # stride = 4 - self.m1 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) # stride = 2 - - # for sequential - self.sequential = ARN(256, 64) # transduction attention - self.iter = IterRefine() - self.iter2 = IterRefine2() - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_uniform_(m.weight, a=1) - - self.ratio1, self.ratio2, self.ratio3 = 0.33, 0.33, 0.33 - def forward(self, features, input_size=None, zf_ori=None, template_mask=None): - b1, b2, b3, b4, corr = features - - b4_size = b4.size()[2:] - b3_size = b3.size()[2:] - b2_size = b2.size()[2:] - b1_size = b1.size()[2:] - if input_size is None: input_size = (255, 255) - - # iter list -- return for iter list - iterList = [] - - # transduction network - arn = self.sequential(b4, zf_ori, template_mask) # [B, H, W] - arn = torch.clamp(arn, 0, 1) - b4 = b4 + arn - - corr = nn.Upsample(size=b4_size, mode='bilinear', align_corners=True)(corr) - corr = self.rCo(corr) - b4 = self.b4(b4) - r4 = corr + b4 - iterList.append(r4) # [64] - - b3 = self.b3(b3) - iterList.append(b3) # [64, 32] - - r3 = self.r3(r4) + b3 - r3 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(r3) - - b2 = self.b2(b2) # [64, 32, 16] - iterList.append(b2) # [64, 32, 16] - r2 = self.r2(r3) + b2 - r2 = nn.Upsample(size=b1_size, mode='bilinear', align_corners=True)(r2) - - b1 = self.b1(b1) # [64, 32, 16, 4] - iterList.append(b1) - r1 = self.r1(r2) + b1 - - # r1 up - r1 = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)(r1) - mask = self.r0(r1) - - ##### iter refine - mask_iter, flist = self.iter(iterList, pre_mask = mask, input_size=input_size) - mask_list = self.iter2(flist, pre_mask=mask_iter, input_size=input_size) - - return self.ratio1 * mask + self.ratio2 * mask_iter[0] + self.ratio3 * mask_list - - def update_iter(self, ratio1, ratio2, ratio3): - self.ratio1, self.ratio2, self.ratio3 = ratio1, ratio2, ratio3 - - -class IterRefine(nn.Module): - def __init__(self): - """ - stage2 of MMS - TODO: simplify the code - """ - super(IterRefine, self).__init__() - # BACKBONE - self.b3 = nn.Sequential(nn.Conv2d(32, 32, 3, padding=1), nn.ReLU()) - self.b2 = nn.Sequential(nn.Conv2d(16, 16, 3, padding=1), nn.ReLU()) - self.b1 = nn.Sequential(nn.Conv2d(4, 4, 3, padding=1), nn.ReLU()) - - # REFINE - self.r4 = nn.Sequential(nn.Conv2d(64, 32, 3, padding=1), nn.ReLU()) - self.r3 = nn.Sequential(nn.Conv2d(32, 32, 3, padding=1), nn.ReLU()) - self.r2 = nn.Sequential(nn.Conv2d(32, 16, 3, padding=1), nn.ReLU()) - self.r1 = nn.Sequential(nn.Conv2d(16, 4, 3, padding=1), nn.ReLU()) - self.r0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) - - # being used in training (not inference) - self.m3 = nn.Sequential(nn.Conv2d(32, 2, 3, padding=1)) # stride = 8 - self.m2 = nn.Sequential(nn.Conv2d(16, 2, 3, padding=1)) # stride = 4 - self.m1 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) # stride = 2 - - self.sequential = ARN(256, 64) # transduction attention - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_uniform_(m.weight, a=1) - - def forward(self, fList, pre_mask=None, input_size=None): - b4, b3, b2, b1 = fList # [64, 32, 16, 4] - iterList = [] - - # pre_mask processing - att = F.softmax(pre_mask, dim=1)[:,1,...].unsqueeze(1) - att = torch.clamp(att, 0.7, 1) - - b4_size = b4.size()[2:] - b3_size = b3.size()[2:] - b2_size = b2.size()[2:] - b1_size = b1.size()[2:] - if input_size is None: input_size = (255, 255) - - att_b4 = nn.Upsample(size=b4_size, mode='bilinear', align_corners=True)(att) - att_b2 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(att) - att_b1 = nn.Upsample(size=b1_size, mode='bilinear', align_corners=True)(att) - b4, b3, b2, b1 = att_b4 * b4, att_b4 * b3, att_b2 * b2, att_b1 * b1 - - r4 = self.r4(b4) - iterList.append(r4) - - b3 = self.b3(b3) - iterList.append(b3) - r3 = self.r3(r4) + b3 # 32 - - # r3up + b2 - r3 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(r3) - b2 = self.b2(b2) - iterList.append(b2) - r2 = self.r2(r3) + b2 # 16 - - # r2up + b1 - r2 = nn.Upsample(size=b1_size, mode='bilinear', align_corners=True)(r2) - b1 = self.b1(b1) - iterList.append(b1) - r1 = self.r1(r2) + b1 - - - # r1 up - r1 = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)(r1) - mask = self.r0(r1) - - return [mask], iterList - - -class IterRefine2(nn.Module): - def __init__(self): - """ - stage3 of MMS - TODO: simplify the code - """ - super(IterRefine2, self).__init__() - # BACKBONE - self.b3 = nn.Sequential(nn.Conv2d(32, 16, 3, padding=1), nn.ReLU()) - self.b2 = nn.Sequential(nn.Conv2d(16, 8, 3, padding=1), nn.ReLU()) - self.b1 = nn.Sequential(nn.Conv2d(4, 4, 3, padding=1), nn.ReLU()) - - # REFINE - self.r4 = nn.Sequential(nn.Conv2d(32, 16, 3, padding=1), nn.ReLU()) - self.r3 = nn.Sequential(nn.Conv2d(16, 16, 3, padding=1), nn.ReLU()) - self.r2 = nn.Sequential(nn.Conv2d(16, 8, 3, padding=1), nn.ReLU()) - self.r1 = nn.Sequential(nn.Conv2d(8, 4, 3, padding=1), nn.ReLU()) - self.r0 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) - - # being used in training (not inference) - self.m3 = nn.Sequential(nn.Conv2d(16, 2, 3, padding=1)) # stride = 8 - self.m2 = nn.Sequential(nn.Conv2d(8, 2, 3, padding=1)) # stride = 4 - self.m1 = nn.Sequential(nn.Conv2d(4, 2, 3, padding=1)) # stride = 2 - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_uniform_(m.weight, a=1) - - def forward(self, fList, pre_mask=None, input_size=None): - b4, b3, b2, b1 = fList # [32, 32, 16, 4] - iterList = [] - # pre_mask processing - att = F.softmax(pre_mask[0], dim=1)[:, 1, ...].unsqueeze(1) - att = torch.clamp(att, 0.7, 1) - - b4_size = b4.size()[2:] - b3_size = b3.size()[2:] - b2_size = b2.size()[2:] - b1_size = b1.size()[2:] - if input_size is None: input_size = (255, 255) - - att_b4 = nn.Upsample(size=b4_size, mode='bilinear', align_corners=True)(att) - att_b2 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(att) - att_b1 = nn.Upsample(size=b1_size, mode='bilinear', align_corners=True)(att) - b4, b3, b2, b1 = att_b4 * b4, att_b4 * b3, att_b2 * b2, att_b1 * b1 - - r4 = self.r4(b4) - b3 = self.b3(b3) - r3 = self.r3(r4) + b3 # 16 - - # r3up + b2 - r3 = nn.Upsample(size=b2_size, mode='bilinear', align_corners=True)(r3) - b2 = self.b2(b2) - r2 = self.r2(r3) + b2 # 8 - - # r2up + b1 - r2 = nn.Upsample(size=b1_size, mode='bilinear', align_corners=True)(r2) - b1 = self.b1(b1) - r1 = self.r1(r2) + b1 - - # r1 up - r1 = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)(r1) - mask = self.r0(r1) - - return mask - - - diff --git a/tracker/sot/lib/models/ocean.py b/tracker/sot/lib/models/ocean.py deleted file mode 100644 index f4c9167..0000000 --- a/tracker/sot/lib/models/ocean.py +++ /dev/null @@ -1,354 +0,0 @@ -# ------------------------------------------------------------------------------ -# Copyright (c) Microsoft -# Licensed under the MIT License. -# Written by Zhipeng Zhang (zhangzhipeng2017@ia.ac.cn) -# ------------------------------------------------------------------------------ -import torch -import torch.nn as nn -import numpy as np - -class Ocean_(nn.Module): - def __init__(self): - super(Ocean_, self).__init__() - self.features = None - self.connect_model = None - self.align_head = None - self.zf = None - self.criterion = nn.BCEWithLogitsLoss() - self.neck = None - self.search_size = 255 - self.score_size = 25 - self.batch = 32 if self.training else 1 - - self.grids() - - def feature_extractor(self, x, online=False): - return self.features(x, online=online) - - def extract_for_online(self, x): - xf = self.feature_extractor(x, online=True) - return xf - - def connector(self, template_feature, search_feature): - pred_score = self.connect_model(template_feature, search_feature) - return pred_score - - def _cls_loss(self, pred, label, select): - if len(select.size()) == 0: return 0 - pred = torch.index_select(pred, 0, select) - label = torch.index_select(label, 0, select) - return self.criterion(pred, label) # the same as tf version - - def _weighted_BCE(self, pred, label): - pred = pred.view(-1) - label = label.view(-1) - pos = label.data.eq(1).nonzero().squeeze().cuda() - neg = label.data.eq(0).nonzero().squeeze().cuda() - - loss_pos = self._cls_loss(pred, label, pos) - loss_neg = self._cls_loss(pred, label, neg) - return loss_pos * 0.5 + loss_neg * 0.5 - - def _weighted_BCE_align(self, pred, label): - pred = pred.view(-1) - label = label.view(-1) - pos = label.data.eq(1).nonzero().squeeze().cuda() - neg = label.data.eq(0).nonzero().squeeze().cuda() - - loss_pos = self._cls_loss(pred, label, pos) - loss_neg = self._cls_loss(pred, label, neg) - - return loss_pos * 0.5 + loss_neg * 0.5 - - def _IOULoss(self, pred, target, weight=None): - pred_left = pred[:, 0] - pred_top = pred[:, 1] - pred_right = pred[:, 2] - pred_bottom = pred[:, 3] - - target_left = target[:, 0] - target_top = target[:, 1] - target_right = target[:, 2] - target_bottom = target[:, 3] - - target_area = (target_left + target_right) * (target_top + target_bottom) - pred_area = (pred_left + pred_right) * (pred_top + pred_bottom) - - w_intersect = torch.min(pred_left, target_left) + torch.min(pred_right, target_right) - h_intersect = torch.min(pred_bottom, target_bottom) + torch.min(pred_top, target_top) - - area_intersect = w_intersect * h_intersect - area_union = target_area + pred_area - area_intersect - - losses = -torch.log((area_intersect + 1.0) / (area_union + 1.0)) - - if weight is not None and weight.sum() > 0: - return (losses * weight).sum() / weight.sum() - else: - assert losses.numel() != 0 - return losses.mean() - - def add_iouloss(self, bbox_pred, reg_target, reg_weight): - """ - - :param bbox_pred: - :param reg_target: - :param reg_weight: - :param grid_x: used to get real target bbox - :param grid_y: used to get real target bbox - :return: - """ - - bbox_pred_flatten = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - reg_target_flatten = reg_target.reshape(-1, 4) - reg_weight_flatten = reg_weight.reshape(-1) - pos_inds = torch.nonzero(reg_weight_flatten > 0).squeeze(1) - - bbox_pred_flatten = bbox_pred_flatten[pos_inds] - reg_target_flatten = reg_target_flatten[pos_inds] - - loss = self._IOULoss(bbox_pred_flatten, reg_target_flatten) - - return loss - - # --------------------- - # classification align - # --------------------- - def grids(self): - """ - each element of feature map on input search image - :return: H*W*2 (position for each element) - """ - sz = self.score_size - stride = 8 - - sz_x = sz // 2 - sz_y = sz // 2 - - x, y = np.meshgrid(np.arange(0, sz) - np.floor(float(sz_x)), - np.arange(0, sz) - np.floor(float(sz_y))) - - self.grid_to_search = {} - self.grid_to_search_x = x * stride + self.search_size // 2 - self.grid_to_search_y = y * stride + self.search_size // 2 - - self.grid_to_search_x = torch.Tensor(self.grid_to_search_x).unsqueeze(0).unsqueeze(0).cuda() - self.grid_to_search_y = torch.Tensor(self.grid_to_search_y).unsqueeze(0).unsqueeze(0).cuda() - - self.grid_to_search_x = self.grid_to_search_x.repeat(self.batch, 1, 1, 1) - self.grid_to_search_y = self.grid_to_search_y.repeat(self.batch, 1, 1, 1) - - def pred_to_image(self, bbox_pred): - self.grid_to_search_x = self.grid_to_search_x.to(bbox_pred.device) - self.grid_to_search_y = self.grid_to_search_y.to(bbox_pred.device) - - pred_x1 = self.grid_to_search_x - bbox_pred[:, 0, ...].unsqueeze(1) # 17*17 - pred_y1 = self.grid_to_search_y - bbox_pred[:, 1, ...].unsqueeze(1) # 17*17 - pred_x2 = self.grid_to_search_x + bbox_pred[:, 2, ...].unsqueeze(1) # 17*17 - pred_y2 = self.grid_to_search_y + bbox_pred[:, 3, ...].unsqueeze(1) # 17*17 - - pred = [pred_x1, pred_y1, pred_x2, pred_y2] - - pred = torch.cat(pred, dim=1) - - return pred - - - def align_label(self, pred, target, weight): - # calc predicted box iou (treat it as aligned label) - - pred = pred.permute(0, 2, 3, 1) # [B, 25, 25, 4] - pred_left = pred[..., 0] - pred_top = pred[..., 1] - pred_right = pred[..., 2] - pred_bottom = pred[..., 3] - - target_left = target[..., 0] - target_top = target[..., 1] - target_right = target[..., 2] - target_bottom = target[..., 3] - - target_area = (target_left + target_right) * (target_top + target_bottom) - pred_area = (pred_left + pred_right) * (pred_top + pred_bottom) - - w_intersect = torch.min(pred_left, target_left) + torch.min(pred_right, target_right) - h_intersect = torch.min(pred_bottom, target_bottom) + torch.min(pred_top, target_top) - - area_intersect = w_intersect * h_intersect - area_union = target_area + pred_area - area_intersect - - ious = (area_intersect + 1.0) / (area_union + 1.0) - - ious = torch.abs(weight * ious) # delete points out of object - - ious[ious < 0] = 0 - ious[ious >= 1] = 1 - - return ious - - def offset(self, boxes, featmap_sizes): - """ - refers to Cascade RPN - Params: - box_list: [N, 4] [x1, y1, x2, y2] # predicted bbox - """ - - def _shape_offset(boxes, stride): - ks = 3 - dilation = 1 - pad = (ks - 1) // 2 - idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device) - xx, yy = torch.meshgrid(idx, idx) - xx = xx.reshape(-1) - yy = yy.reshape(-1) - - pad = (ks - 1) // 2 - idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device) - yy, xx = torch.meshgrid(idx, idx) # return order matters - xx = xx.reshape(-1) - yy = yy.reshape(-1) - w = (boxes[:, 2] - boxes[:, 0] + 1) / stride - h = (boxes[:, 3] - boxes[:, 1] + 1) / stride - w = w / (ks - 1) - dilation - h = h / (ks - 1) - dilation - offset_x = w[:, None] * xx # (NA, ks**2) - offset_y = h[:, None] * yy # (NA, ks**2) - return offset_x, offset_y - - def _ctr_offset(boxes, stride, featmap_size): - feat_h, feat_w = featmap_size - image_size = self.search_size - - assert len(boxes) == feat_h * feat_w - - x = (boxes[:, 0] + boxes[:, 2]) * 0.5 - y = (boxes[:, 1] + boxes[:, 3]) * 0.5 - - # # compute centers on feature map - # x = (x - (stride - 1) * 0.5) / stride - # y = (y - (stride - 1) * 0.5) / stride - - # different here for Siamese - # use center of image as coordinate origin - x = (x - image_size * 0.5) / stride + feat_w // 2 - y = (y - image_size * 0.5) / stride + feat_h // 2 - - # compute predefine centers - # different here for Siamese - xx = torch.arange(0, feat_w, device=boxes.device) - yy = torch.arange(0, feat_h, device=boxes.device) - yy, xx = torch.meshgrid(yy, xx) - xx = xx.reshape(-1).type_as(x) - yy = yy.reshape(-1).type_as(y) - - offset_x = x - xx # (NA, ) - offset_y = y - yy # (NA, ) - return offset_x, offset_y - - num_imgs = len(boxes) - dtype = boxes[0].dtype - device = boxes[0][0].device - - featmap_sizes = featmap_sizes[2:] - - offset_list = [] - for i in range(num_imgs): - c_offset_x, c_offset_y = _ctr_offset(boxes[i], 8, featmap_sizes) - s_offset_x, s_offset_y = _shape_offset(boxes[i], 8) - - # offset = ctr_offset + shape_offset - offset_x = s_offset_x + c_offset_x[:, None] - offset_y = s_offset_y + c_offset_y[:, None] - - # offset order (y0, x0, y1, x0, .., y9, x8, y9, x9)from torch.autograd import Variable - offset = torch.stack([offset_y, offset_x], dim=-1) - offset = offset.reshape(offset.size(0), -1).unsqueeze(0) # [NA, 2*ks**2] - offset_list.append(offset) - - offsets = torch.cat(offset_list, 0) - return offsets - - def template(self, z): - _, self.zf = self.feature_extractor(z) - - if self.neck is not None: - _, self.zf = self.neck(self.zf, crop=True) - - if self.align_head is not None: - self.update_flag = True - else: - pass - - def track(self, x): - - _, xf = self.feature_extractor(x) - - if self.neck is not None: - xf = self.neck(xf) - - if self.align_head is not None: - if self.update_flag: - self.batch = 1 - self.search_size = x.size(-1) - self.score_size = (self.search_size - 127) // 8 + 1 + 8 - self.grids() - self.update_flag = False - - bbox_pred, cls_pred, cls_feature, reg_feature = self.connect_model(xf, self.zf) - bbox_pred_to_img = self.pred_to_image(bbox_pred) - offsets = self.offset(bbox_pred_to_img.permute(0, 2, 3, 1).reshape(bbox_pred_to_img.size(0), -1, 4), bbox_pred.size()) - cls_align = self.align_head(reg_feature, offsets) - - return cls_pred, bbox_pred, cls_align - else: - bbox_pred, cls_pred, _, _ = self.connect_model(xf, self.zf) - - return cls_pred, bbox_pred - - def forward(self, template, search, label=None, reg_target=None, reg_weight=None): - """ - :return: - """ - _, zf = self.feature_extractor(template) - _, xf = self.feature_extractor(search) - - if self.neck is not None: - _, zf = self.neck(zf, crop=True) - xf = self.neck(xf, crop=False) - - # depth-wise cross correlation --> tower --> box pred - if self.align_head is not None: - bbox_pred, cls_pred, cls_feature, reg_feature = self.connect_model(xf, zf) - - bbox_pred_to_img = self.pred_to_image(bbox_pred) - offsets = self.offset(bbox_pred_to_img.permute(0, 2, 3, 1).reshape(bbox_pred_to_img.size(0), -1, 4), bbox_pred.size()) - cls_align = self.align_head(reg_feature, offsets) - - # add iou loss - reg_loss = self.add_iouloss(bbox_pred, reg_target, reg_weight) - - # add cls loss - align_cls_label = self.align_label(bbox_pred, reg_target, reg_weight) - cls_loss_ori = self._weighted_BCE(cls_pred, label) - cls_loss = self.criterion(cls_align.squeeze(), align_cls_label) - - if torch.isnan(cls_loss): - cls_loss = 0 * cls_loss_ori - - return cls_loss_ori, cls_loss, reg_loss - else: - bbox_pred, cls_pred, _, _ = self.connect_model(xf, zf) - reg_loss = self.add_iouloss(bbox_pred, reg_target, reg_weight) - cls_loss = self._weighted_BCE(cls_pred, label) - return cls_loss, None, reg_loss - - - - - - - - - - - diff --git a/tracker/sot/lib/models/oceanTRT.py b/tracker/sot/lib/models/oceanTRT.py deleted file mode 100644 index 8378436..0000000 --- a/tracker/sot/lib/models/oceanTRT.py +++ /dev/null @@ -1,61 +0,0 @@ -# ------------------------------------------------------------------------------ -# Copyright (c) Microsoft -# Licensed under the MIT License. -# Written by Zhipeng Zhang (zhangzhipeng2017@ia.ac.cn) -# ------------------------------------------------------------------------------ - -import torch -import torch.nn as nn -import numpy as np - -class OceanTRT_(nn.Module): - def __init__(self): - super(OceanTRT_, self).__init__() - self.features = None - self.connect_model = None - self.zf = None # for online tracking - self.neck = None - self.search_size = 255 - self.score_size = 25 - - - def tensorrt_init(self, trt_net, corr=None): - """ - TensorRT init - """ - self.t_backbone255, self.s_backbone_siam255, self.s_backbone_siam287, self.s_backbone_online, self.t_neck255, \ - self.s_neck255, self.s_neck287, self.multiDiCorr255, self.multiDiCorr287, self.boxtower255, self.boxtower287 = trt_net - - if corr: - self.multiDiCorr255, self.multiDiCorr287 = corr - - def extract_for_online(self, x): - xf = self.s_backbone_online(x, torch.Tensor([1]).cuda()) - return xf - - def template(self, z): - _, _, _, self.zf = self.t_backbone255(z, torch.Tensor([]).cuda()) - self.zf_ori = self.t_neck255(self.zf) - self.zf = self.zf_ori[:, :, 4:-4, 4:-4].contiguous() - - def track(self, x): - """ - Please see OceanOnlinePT for pytorch version (more clean) - """ - b1, b2, b3, xf = self.s_backbone_siam255(x, torch.Tensor([]).cuda()) - xf = self.s_neck255(xf) # b4 - - # backbone encode (something is wrong with connect model) - cls_z0, cls_z1, cls_z2, cls_x0, cls_x1, cls_x2, reg_z0, reg_z1, reg_z2, reg_x0, reg_x1, reg_x2 = self.multiDiCorr255(xf, self.zf) - - # correlation - cls_z = [cls_z0, cls_z1, cls_z2] - cls_x = [cls_x0, cls_x1, cls_x2] - reg_z = [reg_z0, reg_z1, reg_z2] - reg_x = [reg_x0, reg_x1, reg_x2] - - cls_dw, reg_dw = self.connect_model2(cls_z, cls_x, reg_z, reg_x) - # cls and reg - bbox_pred, cls_pred = self.boxtower255(cls_dw, reg_dw) - - return cls_pred, bbox_pred.squeeze(0) diff --git a/tracker/sot/lib/models/oceanplus.py b/tracker/sot/lib/models/oceanplus.py deleted file mode 100644 index e40be7e..0000000 --- a/tracker/sot/lib/models/oceanplus.py +++ /dev/null @@ -1,106 +0,0 @@ -import torch -import torch.nn as nn -import numpy as np -import torch.nn.functional as F - -class OceanPlus_(nn.Module): - def __init__(self): - super(OceanPlus_, self).__init__() - self.features = None - self.connect_model = None - self.mask_model = None - self.zf = None - self.criterion = nn.BCEWithLogitsLoss() - self.neck = None - self.search_size = 255 - self.score_size = 25 - self.batch = 32 if self.training else 1 - self.lambda_u = 0.1 - self.lambda_s = 0.2 - - # self.grids() - - def feature_extractor(self, x, online=False): - return self.features(x, online=online) - - def extract_for_online(self, x): - xf = self.feature_extractor(x, online=True) - return xf - - def connector(self, template_feature, search_feature): - pred_score = self.connect_model(template_feature, search_feature) - return pred_score - - def update_roi_template(self, target_pos, target_sz, score): - """ - :param target_pos: pos in search (not the original) - :param target_sz: size in target size - :param score: - :return: - """ - - lambda_u = self.lambda_u * float(score) - lambda_s = self.lambda_s - N, C, H, W = self.search_size - stride = 8 - assert N == 1, "not supported" - l = W // 2 - x = range(-l, l + 1) - y = range(-l, l + 1) - - hc_z = (target_sz[1] + 0.3 * sum(target_sz)) / stride - wc_z = (target_sz[0] + 0.3 * sum(target_sz)) / stride - grid_x = np.linspace(- wc_z / 2, wc_z / 2, 17) - grid_y = np.linspace(- hc_z / 2, hc_z / 2, 17) - grid_x = grid_x[5:-5] + target_pos[0] / stride - grid_y = grid_y[5:-5] + target_pos[1] / stride - x_offset = grid_x / l - y_offset = grid_y / l - - grid = np.reshape(np.transpose([np.tile(x_offset, len(y_offset)), np.repeat(y_offset, len(x_offset))]), (len(grid_y), len(grid_x), 2)) - grid = torch.from_numpy(grid).unsqueeze(0).cuda() - - zmap = nn.functional.grid_sample(self.xf.double(), grid).float() - # cls_kernel = self.rpn.cls.make_kernel(zmap) - self.MA_kernel = (1 - lambda_u) * self.MA_kernel + lambda_u * zmap - self.zf_update = self.zf * lambda_s + self.MA_kernel * (1.0 - lambda_s) - - def template(self, z, template_mask = None): - _, self.zf = self.feature_extractor(z) - - if self.neck is not None: - self.zf_ori, self.zf = self.neck(self.zf, crop=True) - - self.template_mask = template_mask.float() - self.MA_kernel = self.zf.detach() - self.zf_update = None - - - def track(self, x): - - features_stages, xf = self.feature_extractor(x) - - if self.neck is not None: - xf = self.neck(xf, crop=False) - - features_stages.append(xf) - bbox_pred, cls_pred, cls_feature, reg_feature = self.connect_model(xf, self.zf, update=self.zf_update) - - features_stages.append(cls_feature) - pred_mask = self.mask_model(features_stages, input_size=x.size()[2:], zf_ori=self.zf_ori, template_mask=self.template_mask) - self.search_size = xf.size() - self.xf = xf.detach() - - return cls_pred, bbox_pred, pred_mask - - - - - - - - - - - - diff --git a/tracker/sot/lib/models/siamfc.py b/tracker/sot/lib/models/siamfc.py index d184ddf..20e89e6 100644 --- a/tracker/sot/lib/models/siamfc.py +++ b/tracker/sot/lib/models/siamfc.py @@ -30,13 +30,6 @@ def forward(self, x): return response def update(self, z, lr=0): - #outs = [] - #def h(module, inp, out): - # outs.append(out.data.cpu().numpy()) - #self.features.conv1.register_forward_hook(h) - #self.features.bn1.register_forward_hook(h) - #self.features.maxpool.register_forward_hook(h) - #pdb.set_trace() zf = self.feature_extractor(z).detach() _, _, ts, ts = zf.shape