Cyril666 commited on
Commit
da7a010
·
1 Parent(s): 94a8b7c

First model version

Browse files
maskrcnn_benchmark/csrc/cpu/dcn_v2_cpu.cpp CHANGED
@@ -1,74 +1,229 @@
1
  #include <vector>
 
 
2
 
3
  #include <ATen/ATen.h>
4
- #include <ATen/cuda/CUDAContext.h>
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  at::Tensor
8
  dcn_v2_cpu_forward(const at::Tensor &input,
9
- const at::Tensor &weight,
10
- const at::Tensor &bias,
11
- const at::Tensor &offset,
12
- const at::Tensor &mask,
13
- const int kernel_h,
14
- const int kernel_w,
15
- const int stride_h,
16
- const int stride_w,
17
- const int pad_h,
18
- const int pad_w,
19
- const int dilation_h,
20
- const int dilation_w,
21
- const int deformable_group)
22
- {
23
- AT_ERROR("Not implement on cpu");
24
- }
25
-
26
- std::vector<at::Tensor>
27
- dcn_v2_cpu_backward(const at::Tensor &input,
28
  const at::Tensor &weight,
29
  const at::Tensor &bias,
30
  const at::Tensor &offset,
31
  const at::Tensor &mask,
32
- const at::Tensor &grad_output,
33
- int kernel_h, int kernel_w,
34
- int stride_h, int stride_w,
35
- int pad_h, int pad_w,
36
- int dilation_h, int dilation_w,
37
- int deformable_group)
 
 
 
38
  {
39
- AT_ERROR("Not implement on cpu");
40
- }
 
 
 
 
41
 
42
- std::tuple<at::Tensor, at::Tensor>
43
- dcn_v2_psroi_pooling_cpu_forward(const at::Tensor &input,
44
- const at::Tensor &bbox,
45
- const at::Tensor &trans,
46
- const int no_trans,
47
- const float spatial_scale,
48
- const int output_dim,
49
- const int group_size,
50
- const int pooled_size,
51
- const int part_size,
52
- const int sample_per_part,
53
- const float trans_std)
54
- {
55
- AT_ERROR("Not implement on cpu");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  }
57
 
58
- std::tuple<at::Tensor, at::Tensor>
59
- dcn_v2_psroi_pooling_cpu_backward(const at::Tensor &out_grad,
60
- const at::Tensor &input,
61
- const at::Tensor &bbox,
62
- const at::Tensor &trans,
63
- const at::Tensor &top_count,
64
- const int no_trans,
65
- const float spatial_scale,
66
- const int output_dim,
67
- const int group_size,
68
- const int pooled_size,
69
- const int part_size,
70
- const int sample_per_part,
71
- const float trans_std)
72
  {
73
- AT_ERROR("Not implement on cpu");
74
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  #include <vector>
2
+ #include "cpu/dcn_v2_im2col_cpu.h"
3
+ #include <iostream>
4
 
5
  #include <ATen/ATen.h>
6
+ //#include <ATen/cuda/CUDAContext.h>
7
 
8
+ #include <TH/TH.h>
9
+ //#include <THC/THCAtomics.cuh>
10
+ //#include <THC/THCDeviceUtils.cuh>
11
+
12
+ //extern THCState *state;
13
+
14
+ // author: Charles Shang
15
+ // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
16
+
17
+ // modified from the CUDA version for CPU use by Daniel K. Suhendro
18
+
19
+ // edit by: James Bockman and Matthew Howe
20
+ // modified for torch implementation to remove use of deprecated torch access to Blas
21
 
22
  at::Tensor
23
  dcn_v2_cpu_forward(const at::Tensor &input,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  const at::Tensor &weight,
25
  const at::Tensor &bias,
26
  const at::Tensor &offset,
27
  const at::Tensor &mask,
28
+ const int kernel_h,
29
+ const int kernel_w,
30
+ const int stride_h,
31
+ const int stride_w,
32
+ const int pad_h,
33
+ const int pad_w,
34
+ const int dilation_h,
35
+ const int dilation_w,
36
+ const int deformable_group)
37
  {
38
+ // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
39
+ /*AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
40
+ AT_ASSERTM(weight.is_cuda(), "weight must be a CUDA tensor");
41
+ AT_ASSERTM(bias.is_cuda(), "bias must be a CUDA tensor");
42
+ AT_ASSERTM(offset.is_cuda(), "offset must be a CUDA tensor");
43
+ AT_ASSERTM(mask.is_cuda(), "mask must be a CUDA tensor");*/
44
 
45
+ const int batch = input.size(0);
46
+ const int channels = input.size(1);
47
+ const int height = input.size(2);
48
+ const int width = input.size(3);
49
+
50
+ const int channels_out = weight.size(0);
51
+ const int channels_kernel = weight.size(1);
52
+ const int kernel_h_ = weight.size(2);
53
+ const int kernel_w_ = weight.size(3);
54
+
55
+ // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
56
+ // printf("Channels: %d %d\n", channels, channels_kernel);
57
+ // printf("Channels: %d %d\n", channels_out, channels_kernel);
58
+
59
+ AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
60
+ "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
61
+
62
+ AT_ASSERTM(channels == channels_kernel,
63
+ "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
64
+
65
+ const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
66
+ const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
67
+
68
+ // auto ones = at::ones({height_out, width_out}, input.options());
69
+ auto ones = at::ones({bias.sizes()[0], height_out, width_out}, input.options());
70
+ auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
71
+ auto output = at::zeros({batch, channels_out, height_out, width_out}, input.options());
72
+
73
+ using scalar_t = float;
74
+ for (int b = 0; b < batch; b++)
75
+ {
76
+ auto input_n = input.select(0, b);
77
+ auto offset_n = offset.select(0, b);
78
+ auto mask_n = mask.select(0, b);
79
+ auto output_n = output.select(0, b);
80
+ // std::cout << "output_n: " << output_n << "output.select(0,b): " << output.select(0,b) << "\n";
81
+
82
+ // Do Bias first:
83
+ // M,N,K are dims of matrix A and B
84
+ // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
85
+ // (N x 1) (1 x M)
86
+
87
+ // torch implementation
88
+ auto ones_T = at::transpose(ones.contiguous(), 2, 0);
89
+ ones_T = at::mul(ones_T, bias.contiguous());
90
+ ones_T = at::transpose(ones_T, 2, 0);
91
+ output_n = at::add(output_n, ones_T);
92
+
93
+ modulated_deformable_im2col_cpu(input_n.data_ptr<scalar_t>(),
94
+ offset_n.data_ptr<scalar_t>(),
95
+ mask_n.data_ptr<scalar_t>(),
96
+ 1, channels, height, width,
97
+ height_out, width_out, kernel_h, kernel_w,
98
+ pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
99
+ deformable_group,
100
+ columns.data_ptr<scalar_t>());
101
+
102
+ //(k * m) x (m * n)
103
+ // Y = WC
104
+
105
+ // torch implementation
106
+ auto weight_flat = weight.view({channels_out, channels * kernel_h * kernel_w});
107
+ auto product = at::matmul(weight_flat, columns);
108
+ output.select(0, b) = at::add(output_n, product.view({channels_out, height_out, width_out}));
109
+ }
110
+ return output;
111
  }
112
 
113
+ std::vector<at::Tensor> dcn_v2_cpu_backward(const at::Tensor &input,
114
+ const at::Tensor &weight,
115
+ const at::Tensor &bias,
116
+ const at::Tensor &offset,
117
+ const at::Tensor &mask,
118
+ const at::Tensor &grad_output,
119
+ int kernel_h, int kernel_w,
120
+ int stride_h, int stride_w,
121
+ int pad_h, int pad_w,
122
+ int dilation_h, int dilation_w,
123
+ int deformable_group)
 
 
 
124
  {
125
+
126
+ THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous");
127
+ THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous");
128
+
129
+ /*AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
130
+ AT_ASSERTM(weight.is_cuda(), "weight must be a CUDA tensor");
131
+ AT_ASSERTM(bias.is_cuda(), "bias must be a CUDA tensor");
132
+ AT_ASSERTM(offset.is_cuda(), "offset must be a CUDA tensor");
133
+ AT_ASSERTM(mask.is_cuda(), "mask must be a CUDA tensor");*/
134
+
135
+ const int batch = input.size(0);
136
+ const int channels = input.size(1);
137
+ const int height = input.size(2);
138
+ const int width = input.size(3);
139
+
140
+ const int channels_out = weight.size(0);
141
+ const int channels_kernel = weight.size(1);
142
+ const int kernel_h_ = weight.size(2);
143
+ const int kernel_w_ = weight.size(3);
144
+
145
+ AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
146
+ "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
147
+
148
+ AT_ASSERTM(channels == channels_kernel,
149
+ "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
150
+
151
+ const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
152
+ const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
153
+
154
+ auto ones = at::ones({height_out, width_out}, input.options());
155
+ auto columns = at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
156
+ auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
157
+
158
+ auto grad_input = at::zeros_like(input);
159
+ auto grad_weight = at::zeros_like(weight);
160
+ auto grad_bias = at::zeros_like(bias);
161
+ auto grad_offset = at::zeros_like(offset);
162
+ auto grad_mask = at::zeros_like(mask);
163
+
164
+ using scalar_t = float;
165
+
166
+ for (int b = 0; b < batch; b++)
167
+ {
168
+ auto input_n = input.select(0, b);
169
+ auto offset_n = offset.select(0, b);
170
+ auto mask_n = mask.select(0, b);
171
+ auto grad_output_n = grad_output.select(0, b);
172
+ auto grad_input_n = grad_input.select(0, b);
173
+ auto grad_offset_n = grad_offset.select(0, b);
174
+ auto grad_mask_n = grad_mask.select(0, b);
175
+
176
+
177
+
178
+ // Torch implementation
179
+ auto weight_flat = weight.view({channels_out, channels*kernel_h*kernel_w});
180
+ weight_flat = at::transpose(weight_flat, 1, 0);
181
+ auto grad_output_n_flat = grad_output_n.view({channels_out, height_out*width_out});
182
+ columns = at::matmul(weight_flat, grad_output_n_flat);
183
+
184
+ // gradient w.r.t. input coordinate data
185
+ modulated_deformable_col2im_coord_cpu(columns.data_ptr<scalar_t>(),
186
+ input_n.data_ptr<scalar_t>(),
187
+ offset_n.data_ptr<scalar_t>(),
188
+ mask_n.data_ptr<scalar_t>(),
189
+ 1, channels, height, width,
190
+ height_out, width_out, kernel_h, kernel_w,
191
+ pad_h, pad_w, stride_h, stride_w,
192
+ dilation_h, dilation_w, deformable_group,
193
+ grad_offset_n.data_ptr<scalar_t>(),
194
+ grad_mask_n.data_ptr<scalar_t>());
195
+ // gradient w.r.t. input data
196
+ modulated_deformable_col2im_cpu(columns.data_ptr<scalar_t>(),
197
+ offset_n.data_ptr<scalar_t>(),
198
+ mask_n.data_ptr<scalar_t>(),
199
+ 1, channels, height, width,
200
+ height_out, width_out, kernel_h, kernel_w,
201
+ pad_h, pad_w, stride_h, stride_w,
202
+ dilation_h, dilation_w, deformable_group,
203
+ grad_input_n.data_ptr<scalar_t>());
204
+
205
+ // gradient w.r.t. weight, dWeight should accumulate across the batch and group
206
+ modulated_deformable_im2col_cpu(input_n.data_ptr<scalar_t>(),
207
+ offset_n.data_ptr<scalar_t>(),
208
+ mask_n.data_ptr<scalar_t>(),
209
+ 1, channels, height, width,
210
+ height_out, width_out, kernel_h, kernel_w,
211
+ pad_h, pad_w, stride_h, stride_w,
212
+ dilation_h, dilation_w, deformable_group,
213
+ columns.data_ptr<scalar_t>());
214
+
215
+ // Torch implementation
216
+ auto product = at::matmul(grad_output_n_flat, at::transpose(columns, 1, 0));
217
+ grad_weight = at::add(grad_weight, product.view({channels_out, channels, kernel_h, kernel_w}));
218
+
219
+
220
+ // Torch implementation
221
+ auto ones_flat = ones.view({height_out*width_out});
222
+ product = at::matmul(grad_output_n_flat, ones_flat);
223
+ grad_bias = at::add(grad_bias, product);
224
+ }
225
+
226
+ return {
227
+ grad_input, grad_offset, grad_mask, grad_weight, grad_bias
228
+ };
229
+ }
maskrcnn_benchmark/csrc/cpu/dcn_v2_im2col_cpu.cpp ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "dcn_v2_im2col_cpu.h"
2
+ #include <cstdio>
3
+ #include <algorithm>
4
+ #include <cstring>
5
+
6
+ #include <ATen/ATen.h>
7
+ //#include <ATen/cuda/CUDAContext.h>
8
+
9
+ #include <TH/TH.h>
10
+ //#include <THC/THCAtomics.cuh>
11
+ //#include <THC/THCDeviceUtils.cuh>
12
+
13
+ // modified from the CUDA version for CPU use by Daniel K. Suhendro
14
+
15
+ /*#define CUDA_KERNEL_LOOP(i, n) \
16
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
17
+ i < (n); \
18
+ i += blockDim.x * gridDim.x)
19
+
20
+ const int CUDA_NUM_THREADS = 1024;
21
+ inline int GET_BLOCKS(const int N)
22
+ {
23
+ return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
24
+ }*/
25
+
26
+
27
+ float dmcn_im2col_bilinear_cpu(const float *bottom_data, const int data_width,
28
+ const int height, const int width, float h, float w)
29
+ {
30
+ int h_low = floor(h);
31
+ int w_low = floor(w);
32
+ int h_high = h_low + 1;
33
+ int w_high = w_low + 1;
34
+
35
+ float lh = h - h_low;
36
+ float lw = w - w_low;
37
+ float hh = 1 - lh, hw = 1 - lw;
38
+
39
+ float v1 = 0;
40
+ if (h_low >= 0 && w_low >= 0)
41
+ v1 = bottom_data[h_low * data_width + w_low];
42
+ float v2 = 0;
43
+ if (h_low >= 0 && w_high <= width - 1)
44
+ v2 = bottom_data[h_low * data_width + w_high];
45
+ float v3 = 0;
46
+ if (h_high <= height - 1 && w_low >= 0)
47
+ v3 = bottom_data[h_high * data_width + w_low];
48
+ float v4 = 0;
49
+ if (h_high <= height - 1 && w_high <= width - 1)
50
+ v4 = bottom_data[h_high * data_width + w_high];
51
+
52
+ float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
53
+
54
+ float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
55
+ return val;
56
+ }
57
+
58
+ float dmcn_get_gradient_weight_cpu(float argmax_h, float argmax_w,
59
+ const int h, const int w, const int height, const int width)
60
+ {
61
+ if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
62
+ {
63
+ //empty
64
+ return 0;
65
+ }
66
+
67
+ int argmax_h_low = floor(argmax_h);
68
+ int argmax_w_low = floor(argmax_w);
69
+ int argmax_h_high = argmax_h_low + 1;
70
+ int argmax_w_high = argmax_w_low + 1;
71
+
72
+ float weight = 0;
73
+ if (h == argmax_h_low && w == argmax_w_low)
74
+ weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
75
+ if (h == argmax_h_low && w == argmax_w_high)
76
+ weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
77
+ if (h == argmax_h_high && w == argmax_w_low)
78
+ weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
79
+ if (h == argmax_h_high && w == argmax_w_high)
80
+ weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
81
+ return weight;
82
+ }
83
+
84
+ float dmcn_get_coordinate_weight_cpu(float argmax_h, float argmax_w,
85
+ const int height, const int width, const float *im_data,
86
+ const int data_width, const int bp_dir)
87
+ {
88
+ if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
89
+ {
90
+ //empty
91
+ return 0;
92
+ }
93
+
94
+ int argmax_h_low = floor(argmax_h);
95
+ int argmax_w_low = floor(argmax_w);
96
+ int argmax_h_high = argmax_h_low + 1;
97
+ int argmax_w_high = argmax_w_low + 1;
98
+
99
+ float weight = 0;
100
+
101
+ if (bp_dir == 0)
102
+ {
103
+ if (argmax_h_low >= 0 && argmax_w_low >= 0)
104
+ weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
105
+ if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
106
+ weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
107
+ if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
108
+ weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
109
+ if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
110
+ weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
111
+ }
112
+ else if (bp_dir == 1)
113
+ {
114
+ if (argmax_h_low >= 0 && argmax_w_low >= 0)
115
+ weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
116
+ if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
117
+ weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
118
+ if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
119
+ weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
120
+ if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
121
+ weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
122
+ }
123
+
124
+ return weight;
125
+ }
126
+
127
+ void modulated_deformable_im2col_cpu_kernel(const int n, const float *data_im, const float *data_offset, const float *data_mask,
128
+ const int height, const int width, const int kernel_h, const int kernel_w,
129
+ const int pad_h, const int pad_w,
130
+ const int stride_h, const int stride_w,
131
+ const int dilation_h, const int dilation_w,
132
+ const int channel_per_deformable_group,
133
+ const int batch_size, const int num_channels, const int deformable_group,
134
+ const int height_col, const int width_col,
135
+ float *data_col)
136
+ {
137
+ // launch channels * batch_size * height_col * width_col cores
138
+ for(int index=0; index<n; index++)
139
+ {
140
+ // NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow)
141
+ // here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis
142
+
143
+ // index index of output matrix
144
+ const int w_col = index % width_col;
145
+ const int h_col = (index / width_col) % height_col;
146
+ // const int b_col = (index / width_col / height_col) % batch_size;
147
+ const int b_col = (index / width_col / height_col / num_channels) % batch_size;
148
+ // const int c_im = (index / width_col / height_col) / batch_size;
149
+ const int c_im = (index / width_col / height_col) % num_channels;
150
+ // const int c_col = c_im * kernel_h * kernel_w;
151
+ const int c_col = c_im * kernel_h * kernel_w;
152
+
153
+ // compute deformable group index
154
+ const int deformable_group_index = c_im / channel_per_deformable_group;
155
+
156
+ const int h_in = h_col * stride_h - pad_h;
157
+ const int w_in = w_col * stride_w - pad_w;
158
+
159
+ // float *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
160
+ float *data_col_ptr = data_col + ((b_col * num_channels * kernel_w * kernel_h + c_col) * height_col + h_col) * width_col + w_col;
161
+ //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
162
+ const float *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
163
+ const float *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
164
+
165
+ const float *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
166
+
167
+ for (int i = 0; i < kernel_h; ++i)
168
+ {
169
+ for (int j = 0; j < kernel_w; ++j)
170
+ {
171
+ const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
172
+ const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
173
+ const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
174
+ const float offset_h = data_offset_ptr[data_offset_h_ptr];
175
+ const float offset_w = data_offset_ptr[data_offset_w_ptr];
176
+ const float mask = data_mask_ptr[data_mask_hw_ptr];
177
+ float val = static_cast<float>(0);
178
+ const float h_im = h_in + i * dilation_h + offset_h;
179
+ const float w_im = w_in + j * dilation_w + offset_w;
180
+ //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
181
+ if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
182
+ {
183
+ //const float map_h = i * dilation_h + offset_h;
184
+ //const float map_w = j * dilation_w + offset_w;
185
+ //const int cur_height = height - h_in;
186
+ //const int cur_width = width - w_in;
187
+ //val = dmcn_im2col_bilinear_cpu(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
188
+ val = dmcn_im2col_bilinear_cpu(data_im_ptr, width, height, width, h_im, w_im);
189
+ }
190
+ *data_col_ptr = val * mask;
191
+ // data_col_ptr += batch_size * height_col * width_col;
192
+ data_col_ptr += height_col * width_col;
193
+ }
194
+ }
195
+ }
196
+ }
197
+
198
+ void modulated_deformable_col2im_cpu_kernel(const int n, const float *data_col, const float *data_offset, const float *data_mask,
199
+ const int channels, const int height, const int width,
200
+ const int kernel_h, const int kernel_w,
201
+ const int pad_h, const int pad_w,
202
+ const int stride_h, const int stride_w,
203
+ const int dilation_h, const int dilation_w,
204
+ const int channel_per_deformable_group,
205
+ const int batch_size, const int deformable_group,
206
+ const int height_col, const int width_col,
207
+ float *grad_im)
208
+ {
209
+ for(int index = 0; index < n; index++)
210
+ {
211
+ const int j = (index / width_col / height_col / batch_size) % kernel_w;
212
+ const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
213
+ const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
214
+ // compute the start and end of the output
215
+
216
+ const int deformable_group_index = c / channel_per_deformable_group;
217
+
218
+ int w_out = index % width_col;
219
+ int h_out = (index / width_col) % height_col;
220
+ int b = (index / width_col / height_col) % batch_size;
221
+ int w_in = w_out * stride_w - pad_w;
222
+ int h_in = h_out * stride_h - pad_h;
223
+
224
+ const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
225
+ const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
226
+ const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
227
+ const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
228
+ const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
229
+ const float offset_h = data_offset_ptr[data_offset_h_ptr];
230
+ const float offset_w = data_offset_ptr[data_offset_w_ptr];
231
+ const float mask = data_mask_ptr[data_mask_hw_ptr];
232
+ const float cur_inv_h_data = h_in + i * dilation_h + offset_h;
233
+ const float cur_inv_w_data = w_in + j * dilation_w + offset_w;
234
+
235
+ const float cur_top_grad = data_col[index] * mask;
236
+ const int cur_h = (int)cur_inv_h_data;
237
+ const int cur_w = (int)cur_inv_w_data;
238
+
239
+ for (int dy = -2; dy <= 2; dy++)
240
+ {
241
+ for (int dx = -2; dx <= 2; dx++)
242
+ {
243
+ if (cur_h + dy >= 0 && cur_h + dy < height &&
244
+ cur_w + dx >= 0 && cur_w + dx < width &&
245
+ abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
246
+ abs(cur_inv_w_data - (cur_w + dx)) < 1)
247
+ {
248
+ int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
249
+ float weight = dmcn_get_gradient_weight_cpu(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
250
+ //atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
251
+ *(grad_im + cur_bottom_grad_pos) += weight * cur_top_grad;
252
+
253
+ }
254
+ }
255
+ }
256
+ }
257
+ }
258
+
259
+ void modulated_deformable_col2im_coord_cpu_kernel(const int n, const float *data_col, const float *data_im,
260
+ const float *data_offset, const float *data_mask,
261
+ const int channels, const int height, const int width,
262
+ const int kernel_h, const int kernel_w,
263
+ const int pad_h, const int pad_w,
264
+ const int stride_h, const int stride_w,
265
+ const int dilation_h, const int dilation_w,
266
+ const int channel_per_deformable_group,
267
+ const int batch_size, const int offset_channels, const int deformable_group,
268
+ const int height_col, const int width_col,
269
+ float *grad_offset, float *grad_mask)
270
+ {
271
+ for(int index = 0; index < n; index++)
272
+ {
273
+ float val = 0, mval = 0;
274
+ int w = index % width_col;
275
+ int h = (index / width_col) % height_col;
276
+ int c = (index / width_col / height_col) % offset_channels;
277
+ int b = (index / width_col / height_col) / offset_channels;
278
+ // compute the start and end of the output
279
+
280
+ const int deformable_group_index = c / (2 * kernel_h * kernel_w);
281
+ const int col_step = kernel_h * kernel_w;
282
+ int cnt = 0;
283
+ const float *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
284
+ const float *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
285
+ const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
286
+ const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
287
+
288
+ const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
289
+
290
+ for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
291
+ {
292
+ const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
293
+ const int bp_dir = offset_c % 2;
294
+
295
+ int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
296
+ int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
297
+ int w_out = col_pos % width_col;
298
+ int h_out = (col_pos / width_col) % height_col;
299
+ int w_in = w_out * stride_w - pad_w;
300
+ int h_in = h_out * stride_h - pad_h;
301
+ const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
302
+ const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
303
+ const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
304
+ const float offset_h = data_offset_ptr[data_offset_h_ptr];
305
+ const float offset_w = data_offset_ptr[data_offset_w_ptr];
306
+ const float mask = data_mask_ptr[data_mask_hw_ptr];
307
+ float inv_h = h_in + i * dilation_h + offset_h;
308
+ float inv_w = w_in + j * dilation_w + offset_w;
309
+ if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
310
+ {
311
+ inv_h = inv_w = -2;
312
+ }
313
+ else
314
+ {
315
+ mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear_cpu(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
316
+ }
317
+ const float weight = dmcn_get_coordinate_weight_cpu(
318
+ inv_h, inv_w,
319
+ height, width, data_im_ptr + cnt * height * width, width, bp_dir);
320
+ val += weight * data_col_ptr[col_pos] * mask;
321
+ cnt += 1;
322
+ }
323
+ // KERNEL_ASSIGN(grad_offset[index], offset_req, val);
324
+ grad_offset[index] = val;
325
+ if (offset_c % 2 == 0)
326
+ // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
327
+ grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
328
+ }
329
+ }
330
+
331
+ void modulated_deformable_im2col_cpu(const float* data_im, const float* data_offset, const float* data_mask,
332
+ const int batch_size, const int channels, const int height_im, const int width_im,
333
+ const int height_col, const int width_col, const int kernel_h, const int kernel_w,
334
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
335
+ const int dilation_h, const int dilation_w,
336
+ const int deformable_group, float* data_col) {
337
+ // num_axes should be smaller than block size
338
+ const int channel_per_deformable_group = channels / deformable_group;
339
+ const int num_kernels = channels * batch_size * height_col * width_col;
340
+ modulated_deformable_im2col_cpu_kernel(
341
+ num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kernel_w,
342
+ pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
343
+ batch_size, channels, deformable_group, height_col, width_col, data_col);
344
+
345
+ /*cudaError_t err = cudaGetLastError();
346
+ if (err != cudaSuccess)
347
+ {
348
+ printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
349
+ }*/
350
+
351
+ }
352
+
353
+ void modulated_deformable_col2im_cpu(const float* data_col, const float* data_offset, const float* data_mask,
354
+ const int batch_size, const int channels, const int height_im, const int width_im,
355
+ const int height_col, const int width_col, const int kernel_h, const int kernel_w,
356
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
357
+ const int dilation_h, const int dilation_w,
358
+ const int deformable_group, float* grad_im){
359
+
360
+ const int channel_per_deformable_group = channels / deformable_group;
361
+ const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
362
+ modulated_deformable_col2im_cpu_kernel(
363
+ num_kernels, data_col, data_offset, data_mask, channels, height_im, width_im,
364
+ kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
365
+ dilation_h, dilation_w, channel_per_deformable_group,
366
+ batch_size, deformable_group, height_col, width_col, grad_im);
367
+ /*cudaError_t err = cudaGetLastError();
368
+ if (err != cudaSuccess)
369
+ {
370
+ printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
371
+ }*/
372
+
373
+ }
374
+
375
+ void modulated_deformable_col2im_coord_cpu(const float* data_col, const float* data_im, const float* data_offset, const float* data_mask,
376
+ const int batch_size, const int channels, const int height_im, const int width_im,
377
+ const int height_col, const int width_col, const int kernel_h, const int kernel_w,
378
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
379
+ const int dilation_h, const int dilation_w,
380
+ const int deformable_group,
381
+ float* grad_offset, float* grad_mask) {
382
+ const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
383
+ const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
384
+ modulated_deformable_col2im_coord_cpu_kernel(
385
+ num_kernels, data_col, data_im, data_offset, data_mask, channels, height_im, width_im,
386
+ kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
387
+ dilation_h, dilation_w, channel_per_deformable_group,
388
+ batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
389
+ grad_offset, grad_mask);
390
+ /*cudaError_t err = cudaGetLastError();
391
+ if (err != cudaSuccess)
392
+ {
393
+ printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
394
+ }*/
395
+ }
maskrcnn_benchmark/csrc/cpu/dcn_v2_im2col_cpu.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /*!
3
+ ******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
4
+ *
5
+ * COPYRIGHT
6
+ *
7
+ * All contributions by the University of California:
8
+ * Copyright (c) 2014-2017 The Regents of the University of California (Regents)
9
+ * All rights reserved.
10
+ *
11
+ * All other contributions:
12
+ * Copyright (c) 2014-2017, the respective contributors
13
+ * All rights reserved.
14
+ *
15
+ * Caffe uses a shared copyright model: each contributor holds copyright over
16
+ * their contributions to Caffe. The project versioning records all such
17
+ * contribution and copyright details. If a contributor wants to further mark
18
+ * their specific copyright on a particular contribution, they should indicate
19
+ * their copyright solely in the commit message of the change when it is
20
+ * committed.
21
+ *
22
+ * LICENSE
23
+ *
24
+ * Redistribution and use in source and binary forms, with or without
25
+ * modification, are permitted provided that the following conditions are met:
26
+ *
27
+ * 1. Redistributions of source code must retain the above copyright notice, this
28
+ * list of conditions and the following disclaimer.
29
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
30
+ * this list of conditions and the following disclaimer in the documentation
31
+ * and/or other materials provided with the distribution.
32
+ *
33
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
34
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
35
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
36
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
37
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
40
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
42
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43
+ *
44
+ * CONTRIBUTION AGREEMENT
45
+ *
46
+ * By contributing to the BVLC/caffe repository through pull-request, comment,
47
+ * or otherwise, the contributor releases their content to the
48
+ * license and copyright terms herein.
49
+ *
50
+ ***************** END Caffe Copyright Notice and Disclaimer ********************
51
+ *
52
+ * Copyright (c) 2018 Microsoft
53
+ * Licensed under The MIT License [see LICENSE for details]
54
+ * \file modulated_deformable_im2col.h
55
+ * \brief Function definitions of converting an image to
56
+ * column matrix based on kernel, padding, dilation, and offset.
57
+ * These functions are mainly used in deformable convolution operators.
58
+ * \ref: https://arxiv.org/abs/1811.11168
59
+ * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu
60
+ */
61
+
62
+ /***************** Adapted by Charles Shang *********************/
63
+ // modified from the CUDA version for CPU use by Daniel K. Suhendro
64
+
65
+ #ifndef DCN_V2_IM2COL_CPU
66
+ #define DCN_V2_IM2COL_CPU
67
+
68
+ #ifdef __cplusplus
69
+ extern "C"
70
+ {
71
+ #endif
72
+
73
+ void modulated_deformable_im2col_cpu(const float *data_im, const float *data_offset, const float *data_mask,
74
+ const int batch_size, const int channels, const int height_im, const int width_im,
75
+ const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
76
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
77
+ const int dilation_h, const int dilation_w,
78
+ const int deformable_group, float *data_col);
79
+
80
+ void modulated_deformable_col2im_cpu(const float *data_col, const float *data_offset, const float *data_mask,
81
+ const int batch_size, const int channels, const int height_im, const int width_im,
82
+ const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
83
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
84
+ const int dilation_h, const int dilation_w,
85
+ const int deformable_group, float *grad_im);
86
+
87
+ void modulated_deformable_col2im_coord_cpu(const float *data_col, const float *data_im, const float *data_offset, const float *data_mask,
88
+ const int batch_size, const int channels, const int height_im, const int width_im,
89
+ const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
90
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
91
+ const int dilation_h, const int dilation_w,
92
+ const int deformable_group,
93
+ float *grad_offset, float *grad_mask);
94
+
95
+ #ifdef __cplusplus
96
+ }
97
+ #endif
98
+
99
+ #endif
maskrcnn_benchmark/csrc/cpu/dcn_v2_psroi_pooling_cpu.cpp ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ * Copyright (c) 2017 Microsoft
3
+ * Licensed under The MIT License [see LICENSE for details]
4
+ * \file deformable_psroi_pooling.cu
5
+ * \brief
6
+ * \author Yi Li, Guodong Zhang, Jifeng Dai
7
+ */
8
+ /***************** Adapted by Charles Shang *********************/
9
+ // modified from the CUDA version for CPU use by Daniel K. Suhendro
10
+
11
+ #include <cstdio>
12
+ #include <algorithm>
13
+ #include <cstring>
14
+
15
+ #include <ATen/ATen.h>
16
+ //#include <ATen/cuda/CUDAContext.h>
17
+
18
+ #include <TH/TH.h>
19
+ //#include <THC/THCAtomics.cuh>
20
+ //#include <THC/THCDeviceUtils.cuh>
21
+
22
+ /*#define CUDA_KERNEL_LOOP(i, n) \
23
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
24
+ i < (n); \
25
+ i += blockDim.x * gridDim.x)
26
+
27
+ const int CUDA_NUM_THREADS = 1024;
28
+ inline int GET_BLOCKS(const int N)
29
+ {
30
+ return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
31
+ }*/
32
+
33
+ template <typename T>
34
+ T bilinear_interp_cpu(
35
+ const T *data,
36
+ const T x,
37
+ const T y,
38
+ const int width,
39
+ const int height)
40
+ {
41
+ int x1 = floor(x);
42
+ int x2 = ceil(x);
43
+ int y1 = floor(y);
44
+ int y2 = ceil(y);
45
+ T dist_x = static_cast<T>(x - x1);
46
+ T dist_y = static_cast<T>(y - y1);
47
+ T value11 = data[y1 * width + x1];
48
+ T value12 = data[y2 * width + x1];
49
+ T value21 = data[y1 * width + x2];
50
+ T value22 = data[y2 * width + x2];
51
+ T value = (1 - dist_x) * (1 - dist_y) * value11 +
52
+ (1 - dist_x) * dist_y * value12 +
53
+ dist_x * (1 - dist_y) * value21 +
54
+ dist_x * dist_y * value22;
55
+ return value;
56
+ }
57
+
58
+ template <typename T>
59
+ void DeformablePSROIPoolForwardKernelCpu(
60
+ const int count,
61
+ const T *bottom_data,
62
+ const T spatial_scale,
63
+ const int channels,
64
+ const int height, const int width,
65
+ const int pooled_height, const int pooled_width,
66
+ const T *bottom_rois, const T *bottom_trans,
67
+ const int no_trans,
68
+ const T trans_std,
69
+ const int sample_per_part,
70
+ const int output_dim,
71
+ const int group_size,
72
+ const int part_size,
73
+ const int num_classes,
74
+ const int channels_each_class,
75
+ T *top_data,
76
+ T *top_count)
77
+ {
78
+ for(int index = 0; index < count; index++)
79
+ {
80
+ // The output is in order (n, ctop, ph, pw)
81
+ int pw = index % pooled_width;
82
+ int ph = (index / pooled_width) % pooled_height;
83
+ int ctop = (index / pooled_width / pooled_height) % output_dim;
84
+ int n = index / pooled_width / pooled_height / output_dim;
85
+
86
+ // [start, end) interval for spatial sampling
87
+ const T *offset_bottom_rois = bottom_rois + n * 5;
88
+ int roi_batch_ind = offset_bottom_rois[0];
89
+ T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
90
+ T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
91
+ T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
92
+ T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
93
+
94
+ // Force too small ROIs to be 1x1
95
+ T roi_width = std::max(roi_end_w - roi_start_w, T(0.1)); //avoid 0
96
+ T roi_height = std::max(roi_end_h - roi_start_h, T(0.1));
97
+
98
+ // Compute w and h at bottom
99
+ T bin_size_h = roi_height / static_cast<T>(pooled_height);
100
+ T bin_size_w = roi_width / static_cast<T>(pooled_width);
101
+
102
+ T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
103
+ T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
104
+
105
+ int part_h = floor(static_cast<T>(ph) / pooled_height * part_size);
106
+ int part_w = floor(static_cast<T>(pw) / pooled_width * part_size);
107
+ int class_id = ctop / channels_each_class;
108
+ T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std;
109
+ T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std;
110
+
111
+ T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
112
+ wstart += trans_x * roi_width;
113
+ T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
114
+ hstart += trans_y * roi_height;
115
+
116
+ T sum = 0;
117
+ int count = 0;
118
+ int gw = floor(static_cast<T>(pw) * group_size / pooled_width);
119
+ int gh = floor(static_cast<T>(ph) * group_size / pooled_height);
120
+ gw = std::min(std::max(gw, 0), group_size - 1);
121
+ gh = std::min(std::max(gh, 0), group_size - 1);
122
+
123
+ const T *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
124
+ for (int ih = 0; ih < sample_per_part; ih++)
125
+ {
126
+ for (int iw = 0; iw < sample_per_part; iw++)
127
+ {
128
+ T w = wstart + iw * sub_bin_size_w;
129
+ T h = hstart + ih * sub_bin_size_h;
130
+ // bilinear interpolation
131
+ if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
132
+ {
133
+ continue;
134
+ }
135
+ w = std::min(std::max(w, T(0.)), width - T(1.));
136
+ h = std::min(std::max(h, T(0.)), height - T(1.));
137
+ int c = (ctop * group_size + gh) * group_size + gw;
138
+ T val = bilinear_interp_cpu(offset_bottom_data + c * height * width, w, h, width, height);
139
+ sum += val;
140
+ count++;
141
+ }
142
+ }
143
+ top_data[index] = count == 0 ? static_cast<T>(0) : sum / count;
144
+ top_count[index] = count;
145
+ }
146
+ }
147
+
148
+ template <typename T>
149
+ void DeformablePSROIPoolBackwardAccKernelCpu(
150
+ const int count,
151
+ const T *top_diff,
152
+ const T *top_count,
153
+ const int num_rois,
154
+ const T spatial_scale,
155
+ const int channels,
156
+ const int height, const int width,
157
+ const int pooled_height, const int pooled_width,
158
+ const int output_dim,
159
+ T *bottom_data_diff, T *bottom_trans_diff,
160
+ const T *bottom_data,
161
+ const T *bottom_rois,
162
+ const T *bottom_trans,
163
+ const int no_trans,
164
+ const T trans_std,
165
+ const int sample_per_part,
166
+ const int group_size,
167
+ const int part_size,
168
+ const int num_classes,
169
+ const int channels_each_class)
170
+ {
171
+ for(int index = 0; index < count; index++)
172
+ {
173
+ // The output is in order (n, ctop, ph, pw)
174
+ int pw = index % pooled_width;
175
+ int ph = (index / pooled_width) % pooled_height;
176
+ int ctop = (index / pooled_width / pooled_height) % output_dim;
177
+ int n = index / pooled_width / pooled_height / output_dim;
178
+
179
+ // [start, end) interval for spatial sampling
180
+ const T *offset_bottom_rois = bottom_rois + n * 5;
181
+ int roi_batch_ind = offset_bottom_rois[0];
182
+ T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
183
+ T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
184
+ T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
185
+ T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
186
+
187
+ // Force too small ROIs to be 1x1
188
+ T roi_width = std::max(roi_end_w - roi_start_w, T(0.1)); //avoid 0
189
+ T roi_height = std::max(roi_end_h - roi_start_h, T(0.1));
190
+
191
+ // Compute w and h at bottom
192
+ T bin_size_h = roi_height / static_cast<T>(pooled_height);
193
+ T bin_size_w = roi_width / static_cast<T>(pooled_width);
194
+
195
+ T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
196
+ T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
197
+
198
+ int part_h = floor(static_cast<T>(ph) / pooled_height * part_size);
199
+ int part_w = floor(static_cast<T>(pw) / pooled_width * part_size);
200
+ int class_id = ctop / channels_each_class;
201
+ T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std;
202
+ T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std;
203
+
204
+ T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
205
+ wstart += trans_x * roi_width;
206
+ T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
207
+ hstart += trans_y * roi_height;
208
+
209
+ if (top_count[index] <= 0)
210
+ {
211
+ continue;
212
+ }
213
+ T diff_val = top_diff[index] / top_count[index];
214
+ const T *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
215
+ T *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
216
+ int gw = floor(static_cast<T>(pw) * group_size / pooled_width);
217
+ int gh = floor(static_cast<T>(ph) * group_size / pooled_height);
218
+ gw = std::min(std::max(gw, 0), group_size - 1);
219
+ gh = std::min(std::max(gh, 0), group_size - 1);
220
+
221
+ for (int ih = 0; ih < sample_per_part; ih++)
222
+ {
223
+ for (int iw = 0; iw < sample_per_part; iw++)
224
+ {
225
+ T w = wstart + iw * sub_bin_size_w;
226
+ T h = hstart + ih * sub_bin_size_h;
227
+ // bilinear interpolation
228
+ if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
229
+ {
230
+ continue;
231
+ }
232
+ w = std::min(std::max(w, T(0.)), width - T(1.));
233
+ h = std::min(std::max(h, T(0.)), height - T(1.));
234
+ int c = (ctop * group_size + gh) * group_size + gw;
235
+ // backward on feature
236
+ int x0 = floor(w);
237
+ int x1 = ceil(w);
238
+ int y0 = floor(h);
239
+ int y1 = ceil(h);
240
+ T dist_x = w - x0, dist_y = h - y0;
241
+ T q00 = (1 - dist_x) * (1 - dist_y);
242
+ T q01 = (1 - dist_x) * dist_y;
243
+ T q10 = dist_x * (1 - dist_y);
244
+ T q11 = dist_x * dist_y;
245
+ int bottom_index_base = c * height * width;
246
+ /*atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
247
+ atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
248
+ atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
249
+ atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);*/
250
+ *(offset_bottom_data_diff + bottom_index_base + y0 * width + x0) += q00 * diff_val;
251
+ *(offset_bottom_data_diff + bottom_index_base + y1 * width + x0) += q01 * diff_val;
252
+ *(offset_bottom_data_diff + bottom_index_base + y0 * width + x1) += q10 * diff_val;
253
+ *(offset_bottom_data_diff + bottom_index_base + y1 * width + x1) += q11 * diff_val;
254
+
255
+
256
+ if (no_trans)
257
+ {
258
+ continue;
259
+ }
260
+ T U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
261
+ T U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
262
+ T U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
263
+ T U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
264
+ T diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val;
265
+ diff_x *= roi_width;
266
+ T diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val;
267
+ diff_y *= roi_height;
268
+
269
+ /*atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x);
270
+ atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y);*/
271
+ *(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w) += diff_x;
272
+ *(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w) += diff_y;
273
+ }
274
+ }
275
+ }
276
+ }
277
+
278
+ std::tuple<at::Tensor, at::Tensor>
279
+ dcn_v2_psroi_pooling_cpu_forward(const at::Tensor &input,
280
+ const at::Tensor &bbox,
281
+ const at::Tensor &trans,
282
+ const int no_trans,
283
+ const float spatial_scale,
284
+ const int output_dim,
285
+ const int group_size,
286
+ const int pooled_size,
287
+ const int part_size,
288
+ const int sample_per_part,
289
+ const float trans_std)
290
+ {
291
+ /*AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
292
+ AT_ASSERTM(bbox.is_cuda(), "rois must be a CUDA tensor");
293
+ AT_ASSERTM(trans.is_cuda(), "trans must be a CUDA tensor");*/
294
+
295
+ // const int batch = input.size(0);
296
+ const int channels = input.size(1);
297
+ const int height = input.size(2);
298
+ const int width = input.size(3);
299
+ const int channels_trans = no_trans ? 2 : trans.size(1);
300
+ const int num_bbox = bbox.size(0);
301
+
302
+ AT_ASSERTM(channels == output_dim, "input channels and output channels must equal");
303
+ auto pooled_height = pooled_size;
304
+ auto pooled_width = pooled_size;
305
+
306
+ auto out = at::empty({num_bbox, output_dim, pooled_height, pooled_width}, input.options());
307
+ long out_size = num_bbox * output_dim * pooled_height * pooled_width;
308
+ auto top_count = at::zeros({num_bbox, output_dim, pooled_height, pooled_width}, input.options());
309
+
310
+ const int num_classes = no_trans ? 1 : channels_trans / 2;
311
+ const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
312
+
313
+ //cudaStream_t stream = at::cuda::getCurrentCUDAStream();
314
+
315
+ if (out.numel() == 0)
316
+ {
317
+ //THCudaCheck(cudaGetLastError());
318
+ return std::make_tuple(out, top_count);
319
+ }
320
+
321
+ /*dim3 grid(std::min(THCCeilDiv(out_size, 512L), 4096L));
322
+ dim3 block(512);*/
323
+
324
+ AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "dcn_v2_psroi_pooling_cpu_forward", [&] {
325
+ DeformablePSROIPoolForwardKernelCpu<scalar_t>(
326
+ out_size,
327
+ input.contiguous().data_ptr<scalar_t>(),
328
+ spatial_scale,
329
+ channels,
330
+ height, width,
331
+ pooled_height,
332
+ pooled_width,
333
+ bbox.contiguous().data_ptr<scalar_t>(),
334
+ trans.contiguous().data_ptr<scalar_t>(),
335
+ no_trans,
336
+ trans_std,
337
+ sample_per_part,
338
+ output_dim,
339
+ group_size,
340
+ part_size,
341
+ num_classes,
342
+ channels_each_class,
343
+ out.data_ptr<scalar_t>(),
344
+ top_count.data_ptr<scalar_t>());
345
+ });
346
+ //THCudaCheck(cudaGetLastError());
347
+ return std::make_tuple(out, top_count);
348
+ }
349
+
350
+ std::tuple<at::Tensor, at::Tensor>
351
+ dcn_v2_psroi_pooling_cpu_backward(const at::Tensor &out_grad,
352
+ const at::Tensor &input,
353
+ const at::Tensor &bbox,
354
+ const at::Tensor &trans,
355
+ const at::Tensor &top_count,
356
+ const int no_trans,
357
+ const float spatial_scale,
358
+ const int output_dim,
359
+ const int group_size,
360
+ const int pooled_size,
361
+ const int part_size,
362
+ const int sample_per_part,
363
+ const float trans_std)
364
+ {
365
+ /*AT_ASSERTM(out_grad.is_cuda(), "out_grad must be a CUDA tensor");
366
+ AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
367
+ AT_ASSERTM(bbox.is_cuda(), "bbox must be a CUDA tensor");
368
+ AT_ASSERTM(trans.is_cuda(), "trans must be a CUDA tensor");
369
+ AT_ASSERTM(top_count.is_cuda(), "top_count must be a CUDA tensor");*/
370
+
371
+ const int batch = input.size(0);
372
+ const int channels = input.size(1);
373
+ const int height = input.size(2);
374
+ const int width = input.size(3);
375
+ const int channels_trans = no_trans ? 2 : trans.size(1);
376
+ const int num_bbox = bbox.size(0);
377
+
378
+ AT_ASSERTM(channels == output_dim, "input channels and output channels must equal");
379
+ auto pooled_height = pooled_size;
380
+ auto pooled_width = pooled_size;
381
+ long out_size = num_bbox * output_dim * pooled_height * pooled_width;
382
+ const int num_classes = no_trans ? 1 : channels_trans / 2;
383
+ const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
384
+
385
+ auto input_grad = at::zeros({batch, channels, height, width}, out_grad.options());
386
+ auto trans_grad = at::zeros_like(trans);
387
+
388
+ if (input_grad.numel() == 0)
389
+ {
390
+ //THCudaCheck(cudaGetLastError());
391
+ return std::make_tuple(input_grad, trans_grad);
392
+ }
393
+
394
+ /*dim3 grid(std::min(THCCeilDiv(out_size, 512L), 4096L));
395
+ dim3 block(512);
396
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();*/
397
+
398
+ AT_DISPATCH_FLOATING_TYPES(out_grad.scalar_type(), "dcn_v2_psroi_pooling_cpu_backward", [&] {
399
+ DeformablePSROIPoolBackwardAccKernelCpu<scalar_t>(
400
+ out_size,
401
+ out_grad.contiguous().data_ptr<scalar_t>(),
402
+ top_count.contiguous().data_ptr<scalar_t>(),
403
+ num_bbox,
404
+ spatial_scale,
405
+ channels,
406
+ height,
407
+ width,
408
+ pooled_height,
409
+ pooled_width,
410
+ output_dim,
411
+ input_grad.contiguous().data_ptr<scalar_t>(),
412
+ trans_grad.contiguous().data_ptr<scalar_t>(),
413
+ input.contiguous().data_ptr<scalar_t>(),
414
+ bbox.contiguous().data_ptr<scalar_t>(),
415
+ trans.contiguous().data_ptr<scalar_t>(),
416
+ no_trans,
417
+ trans_std,
418
+ sample_per_part,
419
+ group_size,
420
+ part_size,
421
+ num_classes,
422
+ channels_each_class);
423
+ });
424
+ //THCudaCheck(cudaGetLastError());
425
+ return std::make_tuple(input_grad, trans_grad);
426
+ }