AdaptivePooling.h 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041
  1. #pragma once
  2. #include <ATen/core/Tensor.h>
  3. #include <ATen/native/DispatchStub.h>
  4. #include <c10/util/ArrayRef.h>
  5. #include <c10/util/irange.h>
  6. #include <cmath>
  7. namespace at {
  8. namespace native {
  9. using adaptive_avg_pooling_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
  10. using adaptive_avg_pooling_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
  11. DECLARE_DISPATCH(adaptive_avg_pooling_fn, adaptive_avg_pool2d_kernel);
  12. DECLARE_DISPATCH(adaptive_avg_pooling_backward_fn, adaptive_avg_pool2d_backward_kernel);
  13. using adaptive_max_pooling_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
  14. using adaptive_max_pooling_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
  15. DECLARE_DISPATCH(adaptive_max_pooling_fn, adaptive_max_pool2d_kernel);
  16. DECLARE_DISPATCH(adaptive_max_pooling_backward_fn, adaptive_max_pool2d_backward_kernel);
  17. static inline int64_t start_index(int64_t a, int64_t b, int64_t c) {
  18. return (a / b) * c + ((a % b) * c) / b;
  19. }
  20. static inline int64_t end_index(int64_t a, int64_t b, int64_t c) {
  21. return 1 + ((a + 1) * c - 1) / b;
  22. }
  23. static inline void adaptive_pool_empty_output_check(const Tensor& gradOutput_, const char* arg_name) {
  24. int64_t ndim = gradOutput_.ndimension();
  25. for (const auto i : c10::irange(1, ndim)) {
  26. TORCH_CHECK(gradOutput_.size(i) > 0,
  27. arg_name, "(): Expected grad_output to have non-zero size for non-batch dimensions, "
  28. "but grad_output has sizes ", gradOutput_.sizes(), " with dimension ", i,
  29. " being empty");
  30. }
  31. }
  32. }} // namespace at::native