GridSampler.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. #pragma once
  2. #include <algorithm>
  3. #include <cmath>
  4. #include <cstdint>
  5. #include <utility>
  6. #include <ATen/native/GridSamplerUtils.h>
  7. namespace at { namespace native {
  8. using detail::GridSamplerInterpolation;
  9. using detail::GridSamplerPadding;
  10. // Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value,
  11. // where we view each pixel as an area between (idx - 0.5) and (idx + 0.5).
  12. // if align_corners: -1 and +1 get sent to the centers of the corner pixels
  13. // -1 --> 0
  14. // +1 --> (size - 1)
  15. // scale_factor = (size - 1) / 2
  16. // if not align_corners: -1 and +1 get sent to the image edges
  17. // -1 --> -0.5
  18. // +1 --> (size - 1) + 0.5 == size - 0.5
  19. // scale_factor = size / 2
  20. template <typename scalar_t>
  21. static inline scalar_t grid_sampler_unnormalize(scalar_t coord, int64_t size,
  22. bool align_corners) {
  23. if (align_corners) {
  24. // unnormalize coord from [-1, 1] to [0, size - 1]
  25. return ((coord + 1) / 2) * (size - 1);
  26. } else {
  27. // unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
  28. return ((coord + 1) * size - 1) / 2;
  29. }
  30. }
  31. // grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize
  32. // except that it also returns the `d output / d input` via pointer argument
  33. // `grad_in`.
  34. // This is useful in the backward pass of grid_sampler.
  35. template <typename scalar_t>
  36. static inline scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int64_t size,
  37. bool align_corners, scalar_t *grad_in) {
  38. if (align_corners) {
  39. // unnormalize coord from [-1, 1] to [0, size - 1]
  40. *grad_in = static_cast<scalar_t>(size - 1) / 2;
  41. return ((coord + 1) / 2) * (size - 1);
  42. } else {
  43. // unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
  44. *grad_in = static_cast<scalar_t>(size) / 2;
  45. return ((coord + 1) * size - 1) / 2;
  46. }
  47. }
  48. // Clips coordinates to between 0 and clip_limit - 1
  49. template<typename scalar_t>
  50. static inline scalar_t clip_coordinates(scalar_t in, int64_t clip_limit) {
  51. return std::min(static_cast<scalar_t>(clip_limit - 1), std::max(in, static_cast<scalar_t>(0)));
  52. }
  53. // clip_coordinates_set_grad works similarly to clip_coordinates except that
  54. // it also returns the `d output / d input` via pointer argument `grad_in`.
  55. // This is useful in the backward pass of grid_sampler.
  56. template<typename scalar_t>
  57. static inline scalar_t clip_coordinates_set_grad(scalar_t in, int64_t clip_limit,
  58. scalar_t *grad_in) {
  59. // Note that it is important for the gradient calculation that borders
  60. // are considered out of bounds.
  61. if (in <= static_cast<scalar_t>(0)) {
  62. *grad_in = static_cast<scalar_t>(0);
  63. return static_cast<scalar_t>(0);
  64. } else {
  65. scalar_t max = static_cast<scalar_t>(clip_limit - 1);
  66. if (in >= max) {
  67. *grad_in = static_cast<scalar_t>(0);
  68. return max;
  69. } else {
  70. *grad_in = static_cast<scalar_t>(1);
  71. return in;
  72. }
  73. }
  74. }
  75. // Reflects coordinates until they fall between low and high (inclusive).
  76. // The bounds are passed as twice their value so that half-integer values
  77. // can be represented as ints.
  78. template<typename scalar_t>
  79. static inline scalar_t reflect_coordinates(scalar_t in, int64_t twice_low,
  80. int64_t twice_high) {
  81. if (twice_low == twice_high) {
  82. return static_cast<scalar_t>(0);
  83. }
  84. scalar_t min = static_cast<scalar_t>(twice_low) / 2;
  85. scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
  86. in = std::fabs(in - min);
  87. // `fmod` returns same sign as `in`, which is positive after the `fabs` above.
  88. scalar_t extra = std::fmod(in, span);
  89. int flips = static_cast<int>(std::floor(in / span));
  90. if (flips % 2 == 0) {
  91. return extra + min;
  92. } else {
  93. return span - extra + min;
  94. }
  95. }
  96. // reflect_coordinates_set_grad works similarly to reflect_coordinates except
  97. // that it also returns the `d output / d input` via pointer argument
  98. // `grad_in`.
  99. // This is useful in the backward pass of grid_sampler.
  100. template<typename scalar_t>
  101. static inline scalar_t reflect_coordinates_set_grad(scalar_t in, int64_t twice_low,
  102. int64_t twice_high, scalar_t *grad_in) {
  103. if (twice_low == twice_high) {
  104. *grad_in = static_cast<scalar_t>(0);
  105. return static_cast<scalar_t>(0);
  106. }
  107. int grad_in_mult_;
  108. scalar_t min = static_cast<scalar_t>(twice_low) / 2;
  109. scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
  110. in = in - min;
  111. if (in < static_cast<scalar_t>(0)) {
  112. grad_in_mult_ = -1;
  113. in = -in;
  114. } else {
  115. grad_in_mult_ = 1;
  116. }
  117. // `fmod` returns same sign as `in`, which is positive after the `if` above.
  118. scalar_t extra = std::fmod(in, span);
  119. int flips = static_cast<int>(std::floor(in / span));
  120. if (flips % 2 == 0) {
  121. *grad_in = static_cast<scalar_t>(grad_in_mult_);
  122. return extra + min;
  123. } else {
  124. *grad_in = static_cast<scalar_t>(-grad_in_mult_);
  125. return span - extra + min;
  126. }
  127. }
  128. // Mapping the out-of-boundary points back into boundary
  129. // This would only affect padding_mode=border or reflection
  130. template<typename scalar_t>
  131. static inline scalar_t compute_coordinates(scalar_t coord, int64_t size,
  132. GridSamplerPadding padding_mode,
  133. bool align_corners) {
  134. if (padding_mode == GridSamplerPadding::Border) {
  135. // clip coordinates to image borders
  136. coord = clip_coordinates(coord, size);
  137. } else if (padding_mode == GridSamplerPadding::Reflection) {
  138. // reflect coordinates by image borders
  139. if (align_corners) {
  140. coord = reflect_coordinates(coord, 0, 2*(size - 1));
  141. } else {
  142. coord = reflect_coordinates(coord, -1, 2*size - 1);
  143. }
  144. // clip coordinates to image borders
  145. coord = clip_coordinates(coord, size);
  146. }
  147. return coord;
  148. }
  149. // Computes the pixel source index value for a grid coordinate
  150. template <typename scalar_t>
  151. static inline scalar_t grid_sampler_compute_source_index(
  152. scalar_t coord,
  153. int64_t size,
  154. GridSamplerPadding padding_mode,
  155. bool align_corners) {
  156. coord = grid_sampler_unnormalize(coord, size, align_corners);
  157. coord = compute_coordinates(coord, size, padding_mode, align_corners);
  158. return coord;
  159. }
  160. // grid_sampler_compute_source_index_set_grad works similarly to
  161. // grid_sampler_compute_source_index except that it also returns the
  162. // `d output / d input` via pointer argument `grad_in`.
  163. // This is useful in the backward pass of grid_sampler.
  164. template <typename scalar_t>
  165. static inline scalar_t grid_sampler_compute_source_index_set_grad(
  166. scalar_t coord,
  167. int64_t size,
  168. GridSamplerPadding padding_mode,
  169. bool align_corners,
  170. scalar_t *grad_in) {
  171. scalar_t grad_clip, grad_refl;
  172. coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in);
  173. if (padding_mode == GridSamplerPadding::Border) {
  174. // clip coordinates to image borders
  175. coord = clip_coordinates_set_grad(coord, size, &grad_clip);
  176. *grad_in = (*grad_in) * grad_clip;
  177. } else if (padding_mode == GridSamplerPadding::Reflection) {
  178. // reflect coordinates by image borders
  179. if (align_corners) {
  180. coord = reflect_coordinates_set_grad(coord, 0, 2*(size - 1), &grad_refl);
  181. } else {
  182. coord = reflect_coordinates_set_grad(coord, -1, 2*size - 1, &grad_refl);
  183. }
  184. // clip coordinates to image borders
  185. coord = clip_coordinates_set_grad(coord, size, &grad_clip);
  186. *grad_in = (*grad_in) * grad_refl * grad_clip;
  187. }
  188. return coord;
  189. }
  190. static inline bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W) {
  191. return h >= 0 && h < H && w >= 0 && w < W;
  192. }
  193. static inline bool within_bounds_3d(int64_t d, int64_t h, int64_t w, int64_t D, int64_t H, int64_t W) {
  194. return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W;
  195. }
  196. template<typename scalar_t>
  197. static inline scalar_t get_value_bounded(
  198. scalar_t* data,
  199. scalar_t x,
  200. scalar_t y,
  201. int64_t W,
  202. int64_t H,
  203. int64_t sW,
  204. int64_t sH,
  205. GridSamplerPadding padding_mode,
  206. bool align_corners) {
  207. x = compute_coordinates(x, W, padding_mode, align_corners);
  208. y = compute_coordinates(y, H, padding_mode, align_corners);
  209. int64_t ix = static_cast<int64_t>(x);
  210. int64_t iy = static_cast<int64_t>(y);
  211. if (within_bounds_2d(iy, ix, H, W)) {
  212. return data[iy * sH + ix * sW];
  213. }
  214. return static_cast<scalar_t>(0);
  215. }
  216. template<typename scalar_t>
  217. static inline void safe_add_2d(scalar_t *data, int64_t h, int64_t w,
  218. int64_t sH, int64_t sW, int64_t H, int64_t W,
  219. scalar_t delta) {
  220. if (within_bounds_2d(h, w, H, W)) {
  221. data[h * sH + w * sW] += delta;
  222. }
  223. }
  224. template<typename scalar_t>
  225. static inline void safe_add_3d(scalar_t *data, int64_t d, int64_t h, int64_t w,
  226. int64_t sD, int64_t sH, int64_t sW,
  227. int64_t D, int64_t H, int64_t W,
  228. scalar_t delta) {
  229. if (within_bounds_3d(d, h, w, D, H, W)) {
  230. data[d * sD + h * sH + w * sW] += delta;
  231. }
  232. }
  233. template<typename scalar_t>
  234. static inline void add_value_bounded(
  235. scalar_t* data,
  236. scalar_t x,
  237. scalar_t y,
  238. int64_t W,
  239. int64_t H,
  240. int64_t sW,
  241. int64_t sH,
  242. scalar_t delta,
  243. GridSamplerPadding padding_mode,
  244. bool align_corners) {
  245. x = compute_coordinates(x, W, padding_mode, align_corners);
  246. y = compute_coordinates(y, H, padding_mode, align_corners);
  247. int64_t ix = static_cast<int64_t>(x);
  248. int64_t iy = static_cast<int64_t>(y);
  249. safe_add_2d(data, iy, ix, sH, sW, H, W, delta);
  250. }
  251. // Calculate the differential of the cubic convolution, i.e. `d coeff / d x`
  252. template<typename scalar_t>
  253. static inline void get_cubic_coefficients_grad(
  254. scalar_t coeffs[4],
  255. scalar_t t) {
  256. // Must be the same as forward calculation in
  257. // aten/src/ATen/native/UpSample.h:get_cubic_upsample_coefficients
  258. scalar_t A = -0.75;
  259. scalar_t x;
  260. x = -1 - t; // 1 < x = |-1 - tx| < 2
  261. coeffs[0] = (-3 * A * x - 10 * A ) * x - 8 * A;
  262. x = -t; // x = |0 - tx| <= 1
  263. coeffs[1] = (-3 * (A + 2) * x - 2 * (A + 3)) * x;
  264. x = 1 - t; // x = |1 - tx| <= 1
  265. coeffs[2] = (3 * (A + 2) * x - 2 * (A + 3)) * x;
  266. x = 2 - t; // 1 < x = |2 - tx| < 2
  267. coeffs[3] = (3 * A * x - 10 * A) * x + 8 * A;
  268. }
  269. }} // namespace at::native