hamming_window.h 6.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. #pragma once
  2. // @generated by torchgen/gen.py from Function.h
  3. #include <ATen/Context.h>
  4. #include <ATen/DeviceGuard.h>
  5. #include <ATen/TensorUtils.h>
  6. #include <ATen/TracerMode.h>
  7. #include <ATen/core/Generator.h>
  8. #include <ATen/core/Reduction.h>
  9. #include <ATen/core/Tensor.h>
  10. #include <c10/core/Scalar.h>
  11. #include <c10/core/Storage.h>
  12. #include <c10/core/TensorOptions.h>
  13. #include <c10/util/Deprecated.h>
  14. #include <c10/util/Optional.h>
  15. #include <ATen/ops/hamming_window_ops.h>
  16. namespace at {
  17. // aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  18. inline at::Tensor hamming_window(int64_t window_length, at::TensorOptions options={}) {
  19. return at::_ops::hamming_window::call(window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
  20. }
  21. // aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  22. inline at::Tensor hamming_window(int64_t window_length, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  23. return at::_ops::hamming_window::call(window_length, dtype, layout, device, pin_memory);
  24. }
  25. // aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  26. inline at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options={}) {
  27. return at::_ops::hamming_window_periodic::call(window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
  28. }
  29. // aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  30. inline at::Tensor hamming_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  31. return at::_ops::hamming_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory);
  32. }
  33. // aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  34. inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options={}) {
  35. return at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
  36. }
  37. // aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  38. inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  39. return at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, dtype, layout, device, pin_memory);
  40. }
  41. // aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  42. inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options={}) {
  43. return at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
  44. }
  45. // aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
  46. inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
  47. return at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, dtype, layout, device, pin_memory);
  48. }
  49. // aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
  50. inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length) {
  51. return at::_ops::hamming_window_out::call(window_length, out);
  52. }
  53. // aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
  54. inline at::Tensor & hamming_window_outf(int64_t window_length, at::Tensor & out) {
  55. return at::_ops::hamming_window_out::call(window_length, out);
  56. }
  57. // aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
  58. inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic) {
  59. return at::_ops::hamming_window_periodic_out::call(window_length, periodic, out);
  60. }
  61. // aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
  62. inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, at::Tensor & out) {
  63. return at::_ops::hamming_window_periodic_out::call(window_length, periodic, out);
  64. }
  65. // aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)
  66. inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha) {
  67. return at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out);
  68. }
  69. // aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)
  70. inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, at::Tensor & out) {
  71. return at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out);
  72. }
  73. // aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)
  74. inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha, double beta) {
  75. return at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out);
  76. }
  77. // aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)
  78. inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) {
  79. return at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out);
  80. }
  81. }