Cyril666 commited on
Commit
7ef681d
·
1 Parent(s): 0404923

First model version

Browse files
maskrcnn_benchmark/csrc/cpu/dcn_v2_psroi_pooling_cpu.cpp ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ * Copyright (c) 2017 Microsoft
3
+ * Licensed under The MIT License [see LICENSE for details]
4
+ * \file deformable_psroi_pooling.cu
5
+ * \brief
6
+ * \author Yi Li, Guodong Zhang, Jifeng Dai
7
+ */
8
+ /***************** Adapted by Charles Shang *********************/
9
+ // modified from the CUDA version for CPU use by Daniel K. Suhendro
10
+
11
+ #include <cstdio>
12
+ #include <algorithm>
13
+ #include <cstring>
14
+
15
+ #include <ATen/ATen.h>
16
+ //#include <ATen/cuda/CUDAContext.h>
17
+
18
+ #include <TH/TH.h>
19
+ //#include <THC/THCAtomics.cuh>
20
+ //#include <THC/THCDeviceUtils.cuh>
21
+
22
+ /*#define CUDA_KERNEL_LOOP(i, n) \
23
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
24
+ i < (n); \
25
+ i += blockDim.x * gridDim.x)
26
+
27
+ const int CUDA_NUM_THREADS = 1024;
28
+ inline int GET_BLOCKS(const int N)
29
+ {
30
+ return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
31
+ }*/
32
+
33
+ template <typename T>
34
+ T bilinear_interp_cpu(
35
+ const T *data,
36
+ const T x,
37
+ const T y,
38
+ const int width,
39
+ const int height)
40
+ {
41
+ int x1 = floor(x);
42
+ int x2 = ceil(x);
43
+ int y1 = floor(y);
44
+ int y2 = ceil(y);
45
+ T dist_x = static_cast<T>(x - x1);
46
+ T dist_y = static_cast<T>(y - y1);
47
+ T value11 = data[y1 * width + x1];
48
+ T value12 = data[y2 * width + x1];
49
+ T value21 = data[y1 * width + x2];
50
+ T value22 = data[y2 * width + x2];
51
+ T value = (1 - dist_x) * (1 - dist_y) * value11 +
52
+ (1 - dist_x) * dist_y * value12 +
53
+ dist_x * (1 - dist_y) * value21 +
54
+ dist_x * dist_y * value22;
55
+ return value;
56
+ }
57
+
58
+ template <typename T>
59
+ void DeformablePSROIPoolForwardKernelCpu(
60
+ const int count,
61
+ const T *bottom_data,
62
+ const T spatial_scale,
63
+ const int channels,
64
+ const int height, const int width,
65
+ const int pooled_height, const int pooled_width,
66
+ const T *bottom_rois, const T *bottom_trans,
67
+ const int no_trans,
68
+ const T trans_std,
69
+ const int sample_per_part,
70
+ const int output_dim,
71
+ const int group_size,
72
+ const int part_size,
73
+ const int num_classes,
74
+ const int channels_each_class,
75
+ T *top_data,
76
+ T *top_count)
77
+ {
78
+ for(int index = 0; index < count; index++)
79
+ {
80
+ // The output is in order (n, ctop, ph, pw)
81
+ int pw = index % pooled_width;
82
+ int ph = (index / pooled_width) % pooled_height;
83
+ int ctop = (index / pooled_width / pooled_height) % output_dim;
84
+ int n = index / pooled_width / pooled_height / output_dim;
85
+
86
+ // [start, end) interval for spatial sampling
87
+ const T *offset_bottom_rois = bottom_rois + n * 5;
88
+ int roi_batch_ind = offset_bottom_rois[0];
89
+ T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
90
+ T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
91
+ T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
92
+ T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
93
+
94
+ // Force too small ROIs to be 1x1
95
+ T roi_width = std::max(roi_end_w - roi_start_w, T(0.1)); //avoid 0
96
+ T roi_height = std::max(roi_end_h - roi_start_h, T(0.1));
97
+
98
+ // Compute w and h at bottom
99
+ T bin_size_h = roi_height / static_cast<T>(pooled_height);
100
+ T bin_size_w = roi_width / static_cast<T>(pooled_width);
101
+
102
+ T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
103
+ T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
104
+
105
+ int part_h = floor(static_cast<T>(ph) / pooled_height * part_size);
106
+ int part_w = floor(static_cast<T>(pw) / pooled_width * part_size);
107
+ int class_id = ctop / channels_each_class;
108
+ T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std;
109
+ T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std;
110
+
111
+ T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
112
+ wstart += trans_x * roi_width;
113
+ T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
114
+ hstart += trans_y * roi_height;
115
+
116
+ T sum = 0;
117
+ int count = 0;
118
+ int gw = floor(static_cast<T>(pw) * group_size / pooled_width);
119
+ int gh = floor(static_cast<T>(ph) * group_size / pooled_height);
120
+ gw = std::min(std::max(gw, 0), group_size - 1);
121
+ gh = std::min(std::max(gh, 0), group_size - 1);
122
+
123
+ const T *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
124
+ for (int ih = 0; ih < sample_per_part; ih++)
125
+ {
126
+ for (int iw = 0; iw < sample_per_part; iw++)
127
+ {
128
+ T w = wstart + iw * sub_bin_size_w;
129
+ T h = hstart + ih * sub_bin_size_h;
130
+ // bilinear interpolation
131
+ if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
132
+ {
133
+ continue;
134
+ }
135
+ w = std::min(std::max(w, T(0.)), width - T(1.));
136
+ h = std::min(std::max(h, T(0.)), height - T(1.));
137
+ int c = (ctop * group_size + gh) * group_size + gw;
138
+ T val = bilinear_interp_cpu(offset_bottom_data + c * height * width, w, h, width, height);
139
+ sum += val;
140
+ count++;
141
+ }
142
+ }
143
+ top_data[index] = count == 0 ? static_cast<T>(0) : sum / count;
144
+ top_count[index] = count;
145
+ }
146
+ }
147
+
148
+ template <typename T>
149
+ void DeformablePSROIPoolBackwardAccKernelCpu(
150
+ const int count,
151
+ const T *top_diff,
152
+ const T *top_count,
153
+ const int num_rois,
154
+ const T spatial_scale,
155
+ const int channels,
156
+ const int height, const int width,
157
+ const int pooled_height, const int pooled_width,
158
+ const int output_dim,
159
+ T *bottom_data_diff, T *bottom_trans_diff,
160
+ const T *bottom_data,
161
+ const T *bottom_rois,
162
+ const T *bottom_trans,
163
+ const int no_trans,
164
+ const T trans_std,
165
+ const int sample_per_part,
166
+ const int group_size,
167
+ const int part_size,
168
+ const int num_classes,
169
+ const int channels_each_class)
170
+ {
171
+ for(int index = 0; index < count; index++)
172
+ {
173
+ // The output is in order (n, ctop, ph, pw)
174
+ int pw = index % pooled_width;
175
+ int ph = (index / pooled_width) % pooled_height;
176
+ int ctop = (index / pooled_width / pooled_height) % output_dim;
177
+ int n = index / pooled_width / pooled_height / output_dim;
178
+
179
+ // [start, end) interval for spatial sampling
180
+ const T *offset_bottom_rois = bottom_rois + n * 5;
181
+ int roi_batch_ind = offset_bottom_rois[0];
182
+ T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
183
+ T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
184
+ T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
185
+ T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
186
+
187
+ // Force too small ROIs to be 1x1
188
+ T roi_width = std::max(roi_end_w - roi_start_w, T(0.1)); //avoid 0
189
+ T roi_height = std::max(roi_end_h - roi_start_h, T(0.1));
190
+
191
+ // Compute w and h at bottom
192
+ T bin_size_h = roi_height / static_cast<T>(pooled_height);
193
+ T bin_size_w = roi_width / static_cast<T>(pooled_width);
194
+
195
+ T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
196
+ T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
197
+
198
+ int part_h = floor(static_cast<T>(ph) / pooled_height * part_size);
199
+ int part_w = floor(static_cast<T>(pw) / pooled_width * part_size);
200
+ int class_id = ctop / channels_each_class;
201
+ T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std;
202
+ T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std;
203
+
204
+ T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
205
+ wstart += trans_x * roi_width;
206
+ T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
207
+ hstart += trans_y * roi_height;
208
+
209
+ if (top_count[index] <= 0)
210
+ {
211
+ continue;
212
+ }
213
+ T diff_val = top_diff[index] / top_count[index];
214
+ const T *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
215
+ T *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
216
+ int gw = floor(static_cast<T>(pw) * group_size / pooled_width);
217
+ int gh = floor(static_cast<T>(ph) * group_size / pooled_height);
218
+ gw = std::min(std::max(gw, 0), group_size - 1);
219
+ gh = std::min(std::max(gh, 0), group_size - 1);
220
+
221
+ for (int ih = 0; ih < sample_per_part; ih++)
222
+ {
223
+ for (int iw = 0; iw < sample_per_part; iw++)
224
+ {
225
+ T w = wstart + iw * sub_bin_size_w;
226
+ T h = hstart + ih * sub_bin_size_h;
227
+ // bilinear interpolation
228
+ if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
229
+ {
230
+ continue;
231
+ }
232
+ w = std::min(std::max(w, T(0.)), width - T(1.));
233
+ h = std::min(std::max(h, T(0.)), height - T(1.));
234
+ int c = (ctop * group_size + gh) * group_size + gw;
235
+ // backward on feature
236
+ int x0 = floor(w);
237
+ int x1 = ceil(w);
238
+ int y0 = floor(h);
239
+ int y1 = ceil(h);
240
+ T dist_x = w - x0, dist_y = h - y0;
241
+ T q00 = (1 - dist_x) * (1 - dist_y);
242
+ T q01 = (1 - dist_x) * dist_y;
243
+ T q10 = dist_x * (1 - dist_y);
244
+ T q11 = dist_x * dist_y;
245
+ int bottom_index_base = c * height * width;
246
+ /*atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
247
+ atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
248
+ atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
249
+ atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);*/
250
+ *(offset_bottom_data_diff + bottom_index_base + y0 * width + x0) += q00 * diff_val;
251
+ *(offset_bottom_data_diff + bottom_index_base + y1 * width + x0) += q01 * diff_val;
252
+ *(offset_bottom_data_diff + bottom_index_base + y0 * width + x1) += q10 * diff_val;
253
+ *(offset_bottom_data_diff + bottom_index_base + y1 * width + x1) += q11 * diff_val;
254
+
255
+
256
+ if (no_trans)
257
+ {
258
+ continue;
259
+ }
260
+ T U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
261
+ T U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
262
+ T U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
263
+ T U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
264
+ T diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val;
265
+ diff_x *= roi_width;
266
+ T diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val;
267
+ diff_y *= roi_height;
268
+
269
+ /*atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x);
270
+ atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y);*/
271
+ *(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w) += diff_x;
272
+ *(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w) += diff_y;
273
+ }
274
+ }
275
+ }
276
+ }
277
+
278
+ std::tuple<at::Tensor, at::Tensor>
279
+ dcn_v2_psroi_pooling_cpu_forward(const at::Tensor &input,
280
+ const at::Tensor &bbox,
281
+ const at::Tensor &trans,
282
+ const int no_trans,
283
+ const float spatial_scale,
284
+ const int output_dim,
285
+ const int group_size,
286
+ const int pooled_size,
287
+ const int part_size,
288
+ const int sample_per_part,
289
+ const float trans_std)
290
+ {
291
+ /*AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
292
+ AT_ASSERTM(bbox.is_cuda(), "rois must be a CUDA tensor");
293
+ AT_ASSERTM(trans.is_cuda(), "trans must be a CUDA tensor");*/
294
+
295
+ // const int batch = input.size(0);
296
+ const int channels = input.size(1);
297
+ const int height = input.size(2);
298
+ const int width = input.size(3);
299
+ const int channels_trans = no_trans ? 2 : trans.size(1);
300
+ const int num_bbox = bbox.size(0);
301
+
302
+ AT_ASSERTM(channels == output_dim, "input channels and output channels must equal");
303
+ auto pooled_height = pooled_size;
304
+ auto pooled_width = pooled_size;
305
+
306
+ auto out = at::empty({num_bbox, output_dim, pooled_height, pooled_width}, input.options());
307
+ long out_size = num_bbox * output_dim * pooled_height * pooled_width;
308
+ auto top_count = at::zeros({num_bbox, output_dim, pooled_height, pooled_width}, input.options());
309
+
310
+ const int num_classes = no_trans ? 1 : channels_trans / 2;
311
+ const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
312
+
313
+ //cudaStream_t stream = at::cuda::getCurrentCUDAStream();
314
+
315
+ if (out.numel() == 0)
316
+ {
317
+ //THCudaCheck(cudaGetLastError());
318
+ return std::make_tuple(out, top_count);
319
+ }
320
+
321
+ /*dim3 grid(std::min(THCCeilDiv(out_size, 512L), 4096L));
322
+ dim3 block(512);*/
323
+
324
+ AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "dcn_v2_psroi_pooling_cpu_forward", [&] {
325
+ DeformablePSROIPoolForwardKernelCpu<scalar_t>(
326
+ out_size,
327
+ input.contiguous().data_ptr<scalar_t>(),
328
+ spatial_scale,
329
+ channels,
330
+ height, width,
331
+ pooled_height,
332
+ pooled_width,
333
+ bbox.contiguous().data_ptr<scalar_t>(),
334
+ trans.contiguous().data_ptr<scalar_t>(),
335
+ no_trans,
336
+ trans_std,
337
+ sample_per_part,
338
+ output_dim,
339
+ group_size,
340
+ part_size,
341
+ num_classes,
342
+ channels_each_class,
343
+ out.data_ptr<scalar_t>(),
344
+ top_count.data_ptr<scalar_t>());
345
+ });
346
+ //THCudaCheck(cudaGetLastError());
347
+ return std::make_tuple(out, top_count);
348
+ }
349
+
350
+ std::tuple<at::Tensor, at::Tensor>
351
+ dcn_v2_psroi_pooling_cpu_backward(const at::Tensor &out_grad,
352
+ const at::Tensor &input,
353
+ const at::Tensor &bbox,
354
+ const at::Tensor &trans,
355
+ const at::Tensor &top_count,
356
+ const int no_trans,
357
+ const float spatial_scale,
358
+ const int output_dim,
359
+ const int group_size,
360
+ const int pooled_size,
361
+ const int part_size,
362
+ const int sample_per_part,
363
+ const float trans_std)
364
+ {
365
+ /*AT_ASSERTM(out_grad.is_cuda(), "out_grad must be a CUDA tensor");
366
+ AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
367
+ AT_ASSERTM(bbox.is_cuda(), "bbox must be a CUDA tensor");
368
+ AT_ASSERTM(trans.is_cuda(), "trans must be a CUDA tensor");
369
+ AT_ASSERTM(top_count.is_cuda(), "top_count must be a CUDA tensor");*/
370
+
371
+ const int batch = input.size(0);
372
+ const int channels = input.size(1);
373
+ const int height = input.size(2);
374
+ const int width = input.size(3);
375
+ const int channels_trans = no_trans ? 2 : trans.size(1);
376
+ const int num_bbox = bbox.size(0);
377
+
378
+ AT_ASSERTM(channels == output_dim, "input channels and output channels must equal");
379
+ auto pooled_height = pooled_size;
380
+ auto pooled_width = pooled_size;
381
+ long out_size = num_bbox * output_dim * pooled_height * pooled_width;
382
+ const int num_classes = no_trans ? 1 : channels_trans / 2;
383
+ const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
384
+
385
+ auto input_grad = at::zeros({batch, channels, height, width}, out_grad.options());
386
+ auto trans_grad = at::zeros_like(trans);
387
+
388
+ if (input_grad.numel() == 0)
389
+ {
390
+ //THCudaCheck(cudaGetLastError());
391
+ return std::make_tuple(input_grad, trans_grad);
392
+ }
393
+
394
+ /*dim3 grid(std::min(THCCeilDiv(out_size, 512L), 4096L));
395
+ dim3 block(512);
396
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();*/
397
+
398
+ AT_DISPATCH_FLOATING_TYPES(out_grad.scalar_type(), "dcn_v2_psroi_pooling_cpu_backward", [&] {
399
+ DeformablePSROIPoolBackwardAccKernelCpu<scalar_t>(
400
+ out_size,
401
+ out_grad.contiguous().data_ptr<scalar_t>(),
402
+ top_count.contiguous().data_ptr<scalar_t>(),
403
+ num_bbox,
404
+ spatial_scale,
405
+ channels,
406
+ height,
407
+ width,
408
+ pooled_height,
409
+ pooled_width,
410
+ output_dim,
411
+ input_grad.contiguous().data_ptr<scalar_t>(),
412
+ trans_grad.contiguous().data_ptr<scalar_t>(),
413
+ input.contiguous().data_ptr<scalar_t>(),
414
+ bbox.contiguous().data_ptr<scalar_t>(),
415
+ trans.contiguous().data_ptr<scalar_t>(),
416
+ no_trans,
417
+ trans_std,
418
+ sample_per_part,
419
+ group_size,
420
+ part_size,
421
+ num_classes,
422
+ channels_each_class);
423
+ });
424
+ //THCudaCheck(cudaGetLastError());
425
+ return std::make_tuple(input_grad, trans_grad);
426
+ }