CompositeImplicitAutogradNestedTensorFunctions_inl.h 1.0 KB

123456789101112131415161718192021222324
  1. #pragma once
  2. // @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h
  3. // NB: The implementing C++ file is RegisterDispatchKey.cpp
  4. // The only #includes we need are for custom classes that have defaults in the C++ API
  5. #include <c10/core/MemoryFormat.h>
  6. #include <c10/core/Scalar.h>
  7. #include <ATen/core/Reduction.h>
  8. #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
  9. #error This change adds a dependency on all pytorch operators, meaning the \
  10. file will need to be re-compiled every time an operator is changed or added. \
  11. Consider including a specific operator from \
  12. <ATen/ops/{my_operator}_compositeimplicitautogradnestedtensor_dispatch.h>. \
  13. See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
  14. #endif
  15. #include <ATen/ops/reshape_compositeimplicitautogradnestedtensor_dispatch.h>
  16. #include <ATen/ops/reshape_as_compositeimplicitautogradnestedtensor_dispatch.h>
  17. #include <ATen/ops/unbind_compositeimplicitautogradnestedtensor_dispatch.h>