GridSamplerUtils.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. #pragma once
  2. // See NOTE: [Tensor vs. TensorBase]
  3. // https://github.com/pytorch/pytorch/pull/66979
  4. #include <ATen/core/TensorBase.h>
  5. #include <ATen/native/TensorProperties.h>
  6. #include <ATen/native/CanUse32BitIndexMath.h>
  7. namespace at { namespace native {
  8. namespace detail {
  9. enum class GridSamplerInterpolation {Bilinear, Nearest, Bicubic};
  10. enum class GridSamplerPadding {Zeros, Border, Reflection};
  11. } // namespace detail
  12. using detail::GridSamplerInterpolation;
  13. using detail::GridSamplerPadding;
  14. namespace {
  15. // See NOTE [ grid_sampler Native Functions ].
  16. void check_grid_sampler_common(
  17. const TensorBase& input,
  18. const TensorBase& grid
  19. ) {
  20. auto input_opt = input.options();
  21. auto grid_opt = grid.options();
  22. TORCH_CHECK(
  23. input.defined(),
  24. "grid_sampler(): expected input to not be undefined");
  25. TORCH_CHECK(
  26. grid.defined(),
  27. "grid_sampler(): expected grid to not be undefined");
  28. TORCH_CHECK(
  29. input_opt.device() == grid_opt.device(),
  30. "grid_sampler(): expected input and grid to be on same device, but input "
  31. "is on ", input_opt.device(), " and grid is on ", grid_opt.device());
  32. TORCH_CHECK(
  33. input_opt.layout() == kStrided && grid_opt.layout() == kStrided,
  34. "grid_sampler(): expected input and grid to have torch.strided layout, but "
  35. "input has ", input_opt.layout(), " and grid has ", grid_opt.layout());
  36. TORCH_CHECK(
  37. input.size(0) == grid.size(0),
  38. "grid_sampler(): expected grid and input to have same batch size, but got "
  39. "input with sizes ", input.sizes(), " and grid with sizes ", grid.sizes());
  40. TORCH_CHECK(
  41. grid.size(-1) == input.dim() - 2,
  42. "grid_sampler(): expected grid to have size ", input.dim() - 2, " in last "
  43. "dimension, but got grid with sizes ", grid.sizes());
  44. for (const auto i : c10::irange(2, input.dim())) {
  45. TORCH_CHECK(input.size(i) > 0,
  46. "grid_sampler(): expected input to have non-empty spatial dimensions, "
  47. "but input has sizes ", input.sizes(), " with dimension ", i, " being "
  48. "empty");
  49. }
  50. }
  51. // See NOTE [ grid_sampler Native Functions ].
  52. void check_grid_sampler_2d(
  53. const TensorBase& input,
  54. const TensorBase& grid
  55. ) {
  56. TORCH_CHECK(
  57. input.dim() == 4 && input.dim() == grid.dim(),
  58. "grid_sampler(): expected 4D input and grid with same number of "
  59. "dimensions, but got input with sizes ", input.sizes(),
  60. " and grid with sizes ", grid.sizes());
  61. }
  62. // See NOTE [ grid_sampler Native Functions ].
  63. void check_grid_sampler_3d(
  64. const TensorBase& input,
  65. const TensorBase& grid,
  66. int64_t interpolation_mode
  67. ) {
  68. TORCH_CHECK(
  69. input.dim() == 5 && input.dim() == grid.dim(),
  70. "grid_sampler(): expected 5D input and grid with same number of "
  71. "dimensions, but got input with sizes ", input.sizes(),
  72. " and grid with sizes ", grid.sizes());
  73. TORCH_CHECK(
  74. !(input.dim() == 5 &&
  75. static_cast<GridSamplerInterpolation>(interpolation_mode) ==
  76. GridSamplerInterpolation::Bicubic),
  77. "grid_sampler(): bicubic interpolation only supports 4D input");
  78. }
  79. // See NOTE [ grid_sampler Native Functions ].
  80. // cudnn does not support inputs larger than 1024.
  81. bool cond_cudnn_grid_sampler(
  82. const TensorBase& input,
  83. const TensorBase& grid
  84. ) {
  85. return (
  86. at::native::cudnn_is_acceptable(input) &&
  87. at::native::cudnn_is_acceptable(grid) &&
  88. at::native::canUse32BitIndexMath(input) &&
  89. at::native::canUse32BitIndexMath(grid) &&
  90. input.dim() == 4 &&
  91. input.sym_size(1) <= 1024);
  92. }
  93. } // anonymous namespace
  94. }} // namespace at::native