Operators.h 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. #pragma once
  2. // ${generated_comment}
  3. #ifdef TORCH_ASSERT_NO_OPERATORS
  4. #error This change adds a dependency on native_functions.yaml, \
  5. meaning the file will need to be re-compiled every time an operator \
  6. is changed or added. Consider if your change would be better placed in \
  7. another file, or if a more specific header might achieve the same goal. \
  8. See NOTE: [Tensor vs. TensorBase]
  9. #endif
  10. #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
  11. #error This change adds a dependency on all pytorch operators, meaning the \
  12. file will need to be re-compiled every time an operator is changed or added. \
  13. Consider including a specific operator from <ATen/ops/{my_operator}_ops.h> \
  14. and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
  15. #endif
  16. #include <c10/core/SymInt.h>
  17. #include <c10/core/SymIntArrayRef.h>
  18. #include <c10/core/Scalar.h>
  19. #include <c10/core/TensorOptions.h>
  20. #include <c10/core/QScheme.h>
  21. #include <c10/util/OptionalArrayRef.h>
  22. #include <tuple>
  23. #include <vector>
  24. ${Operators_includes}
  25. // Extension writers: do you write wrapper functions? Are you frustrated with
  26. // resolving overloads of operators? Are you frustrated with dealing with
  27. // pointer-to-methods and resolving overloads of pointer-to-methods?? Look no
  28. // further, this is the utility for you.
  29. //
  30. // Given an operator schema: aten::op.overload(...
  31. //
  32. // Use ATEN_FN2(op, overload) to get a *function* version of the operator
  33. // that is guaranteed to not be overloaded. This means that you can safely
  34. // decltype(&ATEN_FN2(op, overload)) it. NB: the 2 means this macro takes 2 args.
  35. //
  36. // Given an operator schema without an overload name: aten::op(...
  37. //
  38. // Use ATEN_FN(op) to get an unambiguous *function* version of the operator.
  39. //
  40. // There is some interesting behavior for out= operations.
  41. // ATEN_FN2(sin, out) gives a function that is *faithful* to the schema;
  42. // that is, the order of arguments is exactly what it looks like in the schema.
  43. #define ATEN_FN2(op_name, overload) at::_ops::op_name##_##overload::call
  44. #define ATEN_FN(op_name) at::_ops::op_name::call
  45. // Separately, ATEN_OP(op) and ATEN_OP2(op, overload) define a class containing compile-time
  46. // metadata about a given aten operator.
  47. // Notable data on the class includes:
  48. // - ATEN_OP2(add, Tensor)::name // returns the string name: "add"
  49. // - ATEN_OP2(add, Tensor)::overload_name // returns the string overload name: "Tensor"
  50. // - ATEN_OP2(add, Tensor)::schema // returns the C++ schema type: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &)
  51. // - ATEN_OP2(add, Tensor)::schema_str // returns the string jit type: "add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
  52. #define ATEN_OP2(op_name, overload) at::_ops::op_name##_##overload
  53. #define ATEN_OP(op_name) at::_ops::op_name
  54. // WARNING: Please do not call any of the ops in the _ops namespace directly.
  55. // Use the ATEN_FN macros. We do not guarantee stability of the naming
  56. // scheme for the functions in at::_ops
  57. // See Note [The ATen Operators API] for details of the at::_ops namespace
  58. namespace at {
  59. namespace _ops {
  60. ${Operators_declarations}
  61. } // namespace _ops
  62. } // namespace at