nanquantile_compositeimplicitautograd_dispatch.h 1.8 KB

12345678910111213141516171819202122232425262728
  1. #pragma once
  2. // @generated by torchgen/gen.py from DispatchKeyFunction.h
  3. // NB: The implementing C++ file is RegisterDispatchKey.cpp
  4. // The only #includes we need are for custom classes that have defaults in the C++ API
  5. #include <c10/core/MemoryFormat.h>
  6. #include <c10/core/Scalar.h>
  7. #include <ATen/core/Reduction.h>
  8. // Forward declarations of any types needed in the operator signatures.
  9. // We can't directly include these classes because it will cause circular include dependencies.
  10. // This file is included by TensorBody.h, which defines the Tensor class.
  11. #include <ATen/core/ATen_fwd.h>
  12. namespace at {
  13. namespace compositeimplicitautograd {
  14. TORCH_API at::Tensor nanquantile(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear");
  15. TORCH_API at::Tensor & nanquantile_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear");
  16. TORCH_API at::Tensor & nanquantile_outf(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out);
  17. TORCH_API at::Tensor nanquantile(const at::Tensor & self, double q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear");
  18. TORCH_API at::Tensor & nanquantile_out(at::Tensor & out, const at::Tensor & self, double q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear");
  19. TORCH_API at::Tensor & nanquantile_outf(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out);
  20. } // namespace compositeimplicitautograd
  21. } // namespace at