div_ops.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. #pragma once
  2. // @generated by torchgen/gen.py from Operator.h
  3. #include <tuple>
  4. #include <vector>
  5. // Forward declarations of any types needed in the operator signatures.
  6. // We can't directly include these classes because it will cause circular include dependencies.
  7. // This file is included by TensorBody.h, which defines the Tensor class.
  8. #include <ATen/core/ATen_fwd.h>
  9. namespace at {
  10. namespace _ops {
  11. struct TORCH_API div_Tensor {
  12. using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
  13. using ptr_schema = schema*;
  14. // See Note [static constexpr char* members for windows NVCC]
  15. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
  16. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
  17. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Tensor(Tensor self, Tensor other) -> Tensor")
  18. static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
  19. static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
  20. };
  21. struct TORCH_API div__Tensor {
  22. using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
  23. using ptr_schema = schema*;
  24. // See Note [static constexpr char* members for windows NVCC]
  25. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_")
  26. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
  27. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
  28. static at::Tensor & call(at::Tensor & self, const at::Tensor & other);
  29. static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other);
  30. };
  31. struct TORCH_API div_out {
  32. using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
  33. using ptr_schema = schema*;
  34. // See Note [static constexpr char* members for windows NVCC]
  35. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
  36. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
  37. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
  38. static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
  39. static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
  40. };
  41. struct TORCH_API div_Tensor_mode {
  42. using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional<c10::string_view>);
  43. using ptr_schema = schema*;
  44. // See Note [static constexpr char* members for windows NVCC]
  45. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
  46. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_mode")
  47. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor")
  48. static at::Tensor call(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode);
  49. static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode);
  50. };
  51. struct TORCH_API div__Tensor_mode {
  52. using schema = at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional<c10::string_view>);
  53. using ptr_schema = schema*;
  54. // See Note [static constexpr char* members for windows NVCC]
  55. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_")
  56. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_mode")
  57. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)")
  58. static at::Tensor & call(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode);
  59. static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode);
  60. };
  61. struct TORCH_API div_out_mode {
  62. using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional<c10::string_view>, at::Tensor &);
  63. using ptr_schema = schema*;
  64. // See Note [static constexpr char* members for windows NVCC]
  65. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
  66. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out_mode")
  67. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)")
  68. static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out);
  69. static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out);
  70. };
  71. struct TORCH_API div_Scalar {
  72. using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
  73. using ptr_schema = schema*;
  74. // See Note [static constexpr char* members for windows NVCC]
  75. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
  76. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
  77. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar(Tensor self, Scalar other) -> Tensor")
  78. static at::Tensor call(const at::Tensor & self, const at::Scalar & other);
  79. static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other);
  80. };
  81. struct TORCH_API div__Scalar {
  82. using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
  83. using ptr_schema = schema*;
  84. // See Note [static constexpr char* members for windows NVCC]
  85. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_")
  86. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
  87. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
  88. static at::Tensor & call(at::Tensor & self, const at::Scalar & other);
  89. static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other);
  90. };
  91. struct TORCH_API div_Scalar_mode {
  92. using schema = at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional<c10::string_view>);
  93. using ptr_schema = schema*;
  94. // See Note [static constexpr char* members for windows NVCC]
  95. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
  96. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_mode")
  97. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor")
  98. static at::Tensor call(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode);
  99. static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode);
  100. };
  101. struct TORCH_API div__Scalar_mode {
  102. using schema = at::Tensor & (at::Tensor &, const at::Scalar &, c10::optional<c10::string_view>);
  103. using ptr_schema = schema*;
  104. // See Note [static constexpr char* members for windows NVCC]
  105. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_")
  106. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_mode")
  107. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)")
  108. static at::Tensor & call(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode);
  109. static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode);
  110. };
  111. struct TORCH_API div_Scalar_out {
  112. using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
  113. using ptr_schema = schema*;
  114. // See Note [static constexpr char* members for windows NVCC]
  115. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
  116. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
  117. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
  118. static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
  119. static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
  120. };
  121. struct TORCH_API div_Scalar_mode_out {
  122. using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, c10::optional<c10::string_view>, at::Tensor &);
  123. using ptr_schema = schema*;
  124. // See Note [static constexpr char* members for windows NVCC]
  125. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
  126. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_mode_out")
  127. STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)")
  128. static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out);
  129. static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out);
  130. };
  131. }} // namespace at::_ops