123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105 |
- #pragma once
- // @generated by torchgen/gen.py from Operator.h
- #include <tuple>
- #include <vector>
- // Forward declarations of any types needed in the operator signatures.
- // We can't directly include these classes because it will cause circular include dependencies.
- // This file is included by TensorBody.h, which defines the Tensor class.
- #include <ATen/core/ATen_fwd.h>
- namespace at {
- namespace _ops {
- struct TORCH_API randint {
- using schema = at::Tensor (int64_t, c10::SymIntArrayRef, c10::optional<at::ScalarType>, c10::optional<at::Layout>, c10::optional<at::Device>, c10::optional<bool>);
- using ptr_schema = schema*;
- // See Note [static constexpr char* members for windows NVCC]
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
- static at::Tensor call(int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
- static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
- };
- struct TORCH_API randint_generator {
- using schema = at::Tensor (int64_t, c10::SymIntArrayRef, c10::optional<at::Generator>, c10::optional<at::ScalarType>, c10::optional<at::Layout>, c10::optional<at::Device>, c10::optional<bool>);
- using ptr_schema = schema*;
- // See Note [static constexpr char* members for windows NVCC]
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "generator")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
- static at::Tensor call(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
- static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
- };
- struct TORCH_API randint_low {
- using schema = at::Tensor (int64_t, int64_t, c10::SymIntArrayRef, c10::optional<at::ScalarType>, c10::optional<at::Layout>, c10::optional<at::Device>, c10::optional<bool>);
- using ptr_schema = schema*;
- // See Note [static constexpr char* members for windows NVCC]
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
- static at::Tensor call(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
- static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
- };
- struct TORCH_API randint_low_generator {
- using schema = at::Tensor (int64_t, int64_t, c10::SymIntArrayRef, c10::optional<at::Generator>, c10::optional<at::ScalarType>, c10::optional<at::Layout>, c10::optional<at::Device>, c10::optional<bool>);
- using ptr_schema = schema*;
- // See Note [static constexpr char* members for windows NVCC]
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low_generator")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
- static at::Tensor call(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
- static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
- };
- struct TORCH_API randint_out {
- using schema = at::Tensor & (int64_t, c10::SymIntArrayRef, at::Tensor &);
- using ptr_schema = schema*;
- // See Note [static constexpr char* members for windows NVCC]
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)")
- static at::Tensor & call(int64_t high, c10::SymIntArrayRef size, at::Tensor & out);
- static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t high, c10::SymIntArrayRef size, at::Tensor & out);
- };
- struct TORCH_API randint_generator_out {
- using schema = at::Tensor & (int64_t, c10::SymIntArrayRef, c10::optional<at::Generator>, at::Tensor &);
- using ptr_schema = schema*;
- // See Note [static constexpr char* members for windows NVCC]
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "generator_out")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)")
- static at::Tensor & call(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out);
- static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out);
- };
- struct TORCH_API randint_low_out {
- using schema = at::Tensor & (int64_t, int64_t, c10::SymIntArrayRef, at::Tensor &);
- using ptr_schema = schema*;
- // See Note [static constexpr char* members for windows NVCC]
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low_out")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)")
- static at::Tensor & call(int64_t low, int64_t high, c10::SymIntArrayRef size, at::Tensor & out);
- static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, c10::SymIntArrayRef size, at::Tensor & out);
- };
- struct TORCH_API randint_low_generator_out {
- using schema = at::Tensor & (int64_t, int64_t, c10::SymIntArrayRef, c10::optional<at::Generator>, at::Tensor &);
- using ptr_schema = schema*;
- // See Note [static constexpr char* members for windows NVCC]
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low_generator_out")
- STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)")
- static at::Tensor & call(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out);
- static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out);
- };
- }} // namespace at::_ops
|