miopen_rnn_native.h 1.4 KB

12345678910111213141516171819202122
  1. #pragma once
  2. // @generated by torchgen/gen.py from NativeFunction.h
  3. #include <c10/core/Scalar.h>
  4. #include <c10/core/Storage.h>
  5. #include <c10/core/TensorOptions.h>
  6. #include <c10/util/Deprecated.h>
  7. #include <c10/util/Optional.h>
  8. #include <c10/core/QScheme.h>
  9. #include <ATen/core/Reduction.h>
  10. #include <ATen/core/Tensor.h>
  11. #include <tuple>
  12. #include <vector>
  13. namespace at {
  14. namespace native {
  15. TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_out(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4);
  16. TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state);
  17. } // namespace native
  18. } // namespace at