Functions.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. #pragma once
  2. // ${generated_comment}
  3. #ifdef TORCH_ASSERT_NO_OPERATORS
  4. #error This change adds a dependency on native_functions.yaml, \
  5. meaning the file will need to be re-compiled every time an operator \
  6. is changed or added. Consider if your change would be better placed in \
  7. another file, or if a more specific header might achieve the same goal. \
  8. See NOTE: [Tensor vs. TensorBase]
  9. #endif
  10. #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
  11. #error This change adds a dependency on all pytorch operators, meaning the \
  12. file will need to be re-compiled every time an operator is changed or added. \
  13. Consider including a specific operator from <ATen/ops/{my_operator}.h> and \
  14. see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
  15. #endif
  16. // NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS]
  17. //
  18. // In ATen, certain generated headers files include the definitions of
  19. // every single operator in PyTorch. Unfortunately this means every
  20. // time an operator signature is updated or changed in
  21. // native_functions.yaml, you (and every other PyTorch developer) need
  22. // to recompile every source file that includes any of these headers.
  23. //
  24. // To break up these header dependencies, and improve incremental
  25. // build times for all PyTorch developers. These headers are split
  26. // into per-operator headers in the `ATen/ops` folder. This limits
  27. // incremental builds to only changes to methods of `Tensor`, or files
  28. // that use the specific operator being changed. With `at::sum` as an
  29. // example, you should include
  30. //
  31. // <ATen/ops/sum.h> // instead of ATen/Functions.h
  32. // <ATen/ops/sum_native.h> // instead of ATen/NativeFunctions.h
  33. // <ATen/ops/sum_ops.h> // instead of ATen/Operators.h
  34. // <ATen/ops/sum_cpu_dispatch.h> // instead of ATen/CPUFunctions.h
  35. //
  36. // However, even if you're careful to use this in your own code.
  37. // `Functions.h` might be included indirectly through another header
  38. // without you realising. To avoid this, you can add
  39. //
  40. // #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
  41. //
  42. // to the top of your source file. This way any time the non-specific
  43. // headers are included, the compiler will error out.
  44. //
  45. // Also, be aware that `ops` are not available in all build
  46. // configurations (namely fb-internal) so you must guard these
  47. // includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g.
  48. //
  49. // #ifndef AT_PER_OPERATOR_HEADERS
  50. // #include <ATen/Functions.h>
  51. // #else
  52. // #include <ATen/ops/sum.h>
  53. // #endif
  54. #include <ATen/Context.h>
  55. #include <ATen/DeviceGuard.h>
  56. #include <ATen/TensorUtils.h>
  57. #include <ATen/TracerMode.h>
  58. #include <ATen/core/Generator.h>
  59. #include <ATen/core/Reduction.h>
  60. #include <c10/core/SymInt.h>
  61. #include <ATen/core/Tensor.h>
  62. #include <c10/core/Scalar.h>
  63. #include <c10/core/Storage.h>
  64. #include <c10/core/TensorOptions.h>
  65. #include <c10/util/Deprecated.h>
  66. #include <c10/util/Optional.h>
  67. #include <c10/util/OptionalArrayRef.h>
  68. #include <ATen/ops/from_blob.h>
  69. #include <ATen/ops/tensor.h>
  70. ${Functions_includes}
  71. namespace at {
  72. ${Functions_declarations}
  73. // Special C++ only overloads for std()-like functions (See gh-40287)
  74. // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
  75. // So, for example std(0) would select the std(unbiased=False) overload
  76. TORCH_API inline Tensor var(const Tensor& self, int dim) {
  77. return at::var(self, IntArrayRef{dim});
  78. }
  79. TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
  80. return at::var_mean(self, IntArrayRef{dim});
  81. }
  82. TORCH_API inline Tensor std(const Tensor& self, int dim) {
  83. return at::std(self, IntArrayRef{dim});
  84. }
  85. TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
  86. return at::std_mean(self, IntArrayRef{dim});
  87. }
  88. inline int64_t numel(const Tensor& tensor) {
  89. return tensor.numel();
  90. }
  91. inline int64_t size(const Tensor& tensor, int64_t dim) {
  92. return tensor.size(dim);
  93. }
  94. inline int64_t stride(const Tensor& tensor, int64_t dim) {
  95. return tensor.stride(dim);
  96. }
  97. inline bool is_complex(const Tensor& tensor) {
  98. return tensor.is_complex();
  99. }
  100. inline bool is_floating_point(const Tensor& tensor) {
  101. return tensor.is_floating_point();
  102. }
  103. inline bool is_signed(const Tensor& tensor) {
  104. return tensor.is_signed();
  105. }
  106. inline bool is_inference(const Tensor& tensor) {
  107. return tensor.is_inference();
  108. }
  109. inline bool _is_zerotensor(const Tensor& tensor) {
  110. return tensor._is_zerotensor();
  111. }
  112. inline bool is_conj(const Tensor& tensor) {
  113. return tensor.is_conj();
  114. }
  115. inline Tensor conj(const Tensor& tensor) {
  116. return tensor.conj();
  117. }
  118. inline bool is_neg(const Tensor& tensor) {
  119. return tensor.is_neg();
  120. }
  121. }