BinaryOps.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. #pragma once
  2. #include <ATen/core/TensorBase.h>
  3. #include <ATen/native/DispatchStub.h>
  4. #include <c10/core/Scalar.h>
  5. namespace at {
  6. struct TensorIterator;
  7. struct TensorIteratorBase;
  8. }
  9. namespace at { namespace native {
  10. inline void alpha_check(const ScalarType dtype, const Scalar& alpha) {
  11. TORCH_CHECK(! alpha.isBoolean() || dtype == ScalarType::Bool,
  12. "Boolean alpha only supported for Boolean results.");
  13. TORCH_CHECK(isFloatingType(dtype) || isComplexType(dtype)
  14. || alpha.isIntegral(true),
  15. "For integral input tensors, argument alpha must not be a floating point number.");
  16. TORCH_CHECK(isComplexType(dtype) || !alpha.isComplex(),
  17. "For non-complex input tensors, argument alpha must not be a complex number.")
  18. }
  19. // Basic checking for all sub functions.
  20. inline void sub_check(const TensorBase& self, const TensorBase& other) {
  21. TORCH_CHECK(self.scalar_type() != kBool || other.scalar_type() != kBool,
  22. "Subtraction, the `-` operator, with two bool tensors is not supported. "
  23. "Use the `^` or `logical_xor()` operator instead.")
  24. TORCH_CHECK(self.scalar_type() != kBool && other.scalar_type() != kBool,
  25. "Subtraction, the `-` operator, with a bool tensor is not supported. "
  26. "If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
  27. }
  28. inline void sub_check(const TensorBase& self, const Scalar& scalar) {
  29. TORCH_CHECK(self.scalar_type() != kBool || !scalar.isBoolean(),
  30. "Subtraction, the `-` operator, with two bool tensors is not supported. "
  31. "Use the `^` or `logical_xor()` operator instead.")
  32. TORCH_CHECK(self.scalar_type() != kBool && !scalar.isBoolean(),
  33. "Subtraction, the `-` operator, with a bool tensor is not supported. "
  34. "If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
  35. }
  36. using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
  37. using structured_binary_fn_double = void(*)(TensorIteratorBase&, double);
  38. using structured_binary_fn = void(*)(TensorIteratorBase&);
  39. using binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
  40. using binary_fn_double = void(*)(TensorIterator&, double);
  41. using binary_fn = void(*)(TensorIterator&);
  42. using binary_clamp_fn_alpha =
  43. void(*)(TensorIterator&, const Scalar& alpha, const Scalar& min_val, const Scalar& max_val);
  44. // NB: codegenned
  45. DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub);
  46. DECLARE_DISPATCH(binary_clamp_fn_alpha, add_clamp_stub);
  47. DECLARE_DISPATCH(structured_binary_fn_alpha, sub_stub);
  48. DECLARE_DISPATCH(structured_binary_fn, mul_stub);
  49. DECLARE_DISPATCH(structured_binary_fn, div_true_stub);
  50. DECLARE_DISPATCH(structured_binary_fn, div_floor_stub);
  51. DECLARE_DISPATCH(structured_binary_fn, div_trunc_stub);
  52. DECLARE_DISPATCH(structured_binary_fn, atan2_stub);
  53. DECLARE_DISPATCH(structured_binary_fn, remainder_stub);
  54. DECLARE_DISPATCH(structured_binary_fn, bitwise_and_stub);
  55. DECLARE_DISPATCH(structured_binary_fn, bitwise_or_stub);
  56. DECLARE_DISPATCH(structured_binary_fn, bitwise_xor_stub);
  57. DECLARE_DISPATCH(structured_binary_fn, lshift_stub);
  58. DECLARE_DISPATCH(structured_binary_fn, rshift_stub);
  59. DECLARE_DISPATCH(binary_fn, logical_xor_stub);
  60. DECLARE_DISPATCH(binary_fn, logical_and_stub);
  61. DECLARE_DISPATCH(binary_fn, logical_or_stub);
  62. DECLARE_DISPATCH(structured_binary_fn, lt_stub);
  63. DECLARE_DISPATCH(structured_binary_fn, le_stub);
  64. DECLARE_DISPATCH(structured_binary_fn, gt_stub);
  65. DECLARE_DISPATCH(structured_binary_fn, ge_stub);
  66. DECLARE_DISPATCH(structured_binary_fn, eq_stub);
  67. DECLARE_DISPATCH(structured_binary_fn, ne_stub);
  68. DECLARE_DISPATCH(binary_fn, max_elementwise_stub);
  69. DECLARE_DISPATCH(binary_fn, min_elementwise_stub);
  70. DECLARE_DISPATCH(structured_binary_fn, maximum_stub);
  71. DECLARE_DISPATCH(structured_binary_fn, minimum_stub);
  72. DECLARE_DISPATCH(structured_binary_fn, fmax_stub);
  73. DECLARE_DISPATCH(structured_binary_fn, fmin_stub);
  74. DECLARE_DISPATCH(structured_binary_fn_double, smooth_l1_stub);
  75. DECLARE_DISPATCH(binary_fn_double, huber_stub);
  76. DECLARE_DISPATCH(structured_binary_fn, sigmoid_backward_stub);
  77. DECLARE_DISPATCH(binary_fn_alpha, logit_backward_stub);
  78. DECLARE_DISPATCH(structured_binary_fn, tanh_backward_stub);
  79. DECLARE_DISPATCH(structured_binary_fn, mse_stub);
  80. DECLARE_DISPATCH(structured_binary_fn, fmod_stub);
  81. DECLARE_DISPATCH(structured_binary_fn, logaddexp_stub);
  82. DECLARE_DISPATCH(structured_binary_fn, logaddexp2_stub);
  83. DECLARE_DISPATCH(structured_binary_fn, gcd_stub);
  84. DECLARE_DISPATCH(structured_binary_fn, lcm_stub);
  85. DECLARE_DISPATCH(structured_binary_fn, hypot_stub);
  86. DECLARE_DISPATCH(structured_binary_fn, igamma_stub);
  87. DECLARE_DISPATCH(structured_binary_fn, igammac_stub);
  88. DECLARE_DISPATCH(structured_binary_fn, nextafter_stub);
  89. DECLARE_DISPATCH(structured_binary_fn, heaviside_stub);
  90. DECLARE_DISPATCH(structured_binary_fn, copysign_stub);
  91. DECLARE_DISPATCH(structured_binary_fn, xlogy_stub);
  92. DECLARE_DISPATCH(structured_binary_fn, xlog1py_stub);
  93. DECLARE_DISPATCH(structured_binary_fn, zeta_stub);
  94. DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_t_stub);
  95. DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_u_stub);
  96. DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_v_stub);
  97. DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_w_stub);
  98. DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_h_stub);
  99. DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_he_stub);
  100. DECLARE_DISPATCH(structured_binary_fn, laguerre_polynomial_l_stub);
  101. DECLARE_DISPATCH(structured_binary_fn, legendre_polynomial_p_stub);
  102. DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_t_stub);
  103. DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_u_stub);
  104. DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_v_stub);
  105. DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_w_stub);
  106. }} // namespace at::native