batch_norm.h 1.3 KB

12345678910111213141516171819202122232425262728293031323334353637
  1. #pragma once
  2. #include <ATen/core/Tensor.h>
  3. #include <ATen/native/DispatchStub.h>
  4. namespace at {
  5. namespace native {
  6. using batch_norm_fn = void (*)(Tensor&, const Tensor&, const Tensor&,
  7. const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, bool, double);
  8. using batch_norm_collect_stats_fn = void (*)(Tensor&, Tensor&, const Tensor&);
  9. using batch_norm_backward_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&,
  10. const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, bool, double);
  11. DECLARE_DISPATCH(batch_norm_fn, batch_norm_cpu_stub);
  12. DECLARE_DISPATCH(batch_norm_collect_stats_fn, batch_norm_cpu_collect_stats_stub);
  13. DECLARE_DISPATCH(batch_norm_backward_fn, batch_norm_cpu_backward_stub);
  14. // TensorAccessor when it is defined to work around undefined...
  15. template <typename scalar_t>
  16. static TensorAccessor<scalar_t, 1> conditional_accessor_1d(const Tensor& t) {
  17. if (! t.defined()) {
  18. return TensorAccessor<scalar_t, 1>(nullptr, nullptr, nullptr);
  19. }
  20. return t.accessor<scalar_t, 1>();
  21. }
  22. template <typename scalar_t>
  23. static scalar_t* conditional_data_ptr(const Tensor& t) {
  24. return t.defined() ? t.contiguous().data_ptr<scalar_t>()
  25. : nullptr;
  26. }
  27. } // namespace native
  28. } // namespace at