Skip to content

Commit 9efd51c

Browse files
committed
Added position-sensitive ROI pool/align
1 parent 8958dd4 commit 9efd51c

File tree

10 files changed

+1461
-7
lines changed

10 files changed

+1461
-7
lines changed

test/test_ops.py

Lines changed: 401 additions & 0 deletions
Large diffs are not rendered by default.

torchvision/csrc/PSROIAlign.h

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
#pragma once
2+
3+
#include "cpu/vision_cpu.h"
4+
5+
#ifdef WITH_CUDA
6+
#include "cuda/vision_cuda.h"
7+
#endif
8+
9+
#ifdef WITH_CUDA
10+
std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda(
11+
const at::Tensor& input,
12+
const at::Tensor& rois,
13+
const float spatial_scale,
14+
const int pooled_height,
15+
const int pooled_width,
16+
const int sampling_ratio);
17+
18+
at::Tensor PSROIAlign_backward_cuda(
19+
const at::Tensor& grad,
20+
const at::Tensor& rois,
21+
const at::Tensor& mapping_channel,
22+
const float spatial_scale,
23+
const int pooled_height,
24+
const int pooled_width,
25+
const int sampling_ratio,
26+
const int batch_size,
27+
const int channels,
28+
const int height,
29+
const int width);
30+
#endif
31+
32+
std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward(
33+
const at::Tensor& input,
34+
const at::Tensor& rois,
35+
const float spatial_scale,
36+
const int pooled_height,
37+
const int pooled_width,
38+
const int sampling_ratio) {
39+
if (input.type().is_cuda()) {
40+
#ifdef WITH_CUDA
41+
return PSROIAlign_forward_cuda(
42+
input,
43+
rois,
44+
spatial_scale,
45+
pooled_height,
46+
pooled_width,
47+
sampling_ratio);
48+
#else
49+
AT_ERROR("Not compiled with GPU support");
50+
#endif
51+
}
52+
AT_ERROR("Not implemented on the CPU");
53+
}
54+
55+
at::Tensor PSROIAlign_backward(
56+
const at::Tensor& grad,
57+
const at::Tensor& rois,
58+
const at::Tensor& mapping_channel,
59+
const float spatial_scale,
60+
const int pooled_height,
61+
const int pooled_width,
62+
const int sampling_ratio,
63+
const int batch_size,
64+
const int channels,
65+
const int height,
66+
const int width) {
67+
if (grad.type().is_cuda()) {
68+
#ifdef WITH_CUDA
69+
return PSROIAlign_backward_cuda(
70+
grad,
71+
rois,
72+
mapping_channel,
73+
spatial_scale,
74+
pooled_height,
75+
pooled_width,
76+
sampling_ratio,
77+
batch_size,
78+
channels,
79+
height,
80+
width);
81+
#else
82+
AT_ERROR("Not compiled with GPU support");
83+
#endif
84+
}
85+
AT_ERROR("Not implemented on the CPU");
86+
}

torchvision/csrc/PSROIPool.h

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
#pragma once
2+
3+
#include "cpu/vision_cpu.h"
4+
5+
#ifdef WITH_CUDA
6+
#include "cuda/vision_cuda.h"
7+
#endif
8+
9+
#ifdef WITH_CUDA
10+
std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda(
11+
const at::Tensor& input,
12+
const at::Tensor& rois,
13+
const float spatial_scale,
14+
const int pooled_height,
15+
const int pooled_width);
16+
17+
at::Tensor PSROIPool_backward_cuda(
18+
const at::Tensor& grad,
19+
const at::Tensor& rois,
20+
const at::Tensor& mapping_channel,
21+
const float spatial_scale,
22+
const int pooled_height,
23+
const int pooled_width,
24+
const int batch_size,
25+
const int channels,
26+
const int height,
27+
const int width);
28+
#endif
29+
30+
std::tuple<at::Tensor, at::Tensor> PSROIPool_forward(
31+
const at::Tensor& input,
32+
const at::Tensor& rois,
33+
const float spatial_scale,
34+
const int pooled_height,
35+
const int pooled_width) {
36+
if (input.type().is_cuda()) {
37+
#ifdef WITH_CUDA
38+
return PSROIPool_forward_cuda(
39+
input, rois, spatial_scale, pooled_height, pooled_width);
40+
#else
41+
AT_ERROR("Not compiled with GPU support");
42+
#endif
43+
}
44+
AT_ERROR("Not implemented on the CPU");
45+
}
46+
47+
at::Tensor PSROIPool_backward(
48+
const at::Tensor& grad,
49+
const at::Tensor& rois,
50+
const at::Tensor& mapping_channel,
51+
const float spatial_scale,
52+
const int pooled_height,
53+
const int pooled_width,
54+
const int batch_size,
55+
const int channels,
56+
const int height,
57+
const int width) {
58+
if (grad.type().is_cuda()) {
59+
#ifdef WITH_CUDA
60+
return PSROIPool_backward_cuda(
61+
grad,
62+
rois,
63+
mapping_channel,
64+
spatial_scale,
65+
pooled_height,
66+
pooled_width,
67+
batch_size,
68+
channels,
69+
height,
70+
width);
71+
#else
72+
AT_ERROR("Not compiled with GPU support");
73+
#endif
74+
}
75+
AT_ERROR("Not implemented on the CPU");
76+
}

0 commit comments

Comments
 (0)