_nn.pyi 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. from torch import Tensor, memory_format
  2. from typing import Callable, Optional, List, overload, Tuple
  3. from torch.types import _bool, _dtype, _device
  4. # Defined in tools/autograd/templates/python_nn_functions.cpp
  5. fractional_max_pool2d: Callable
  6. fractional_max_pool3d: Callable
  7. max_pool1d: Callable
  8. max_pool2d: Callable
  9. max_pool3d: Callable
  10. adaptive_max_pool1d: Callable
  11. adaptive_max_pool2d: Callable
  12. adaptive_max_pool3d: Callable
  13. avg_pool2d: Callable
  14. avg_pool3d: Callable
  15. hardtanh_: Callable
  16. elu_: Callable
  17. leaky_relu_: Callable
  18. logsigmoid: Callable
  19. softplus: Callable
  20. softshrink: Callable
  21. one_hot: Callable
  22. scaled_dot_product_attention: Callable
  23. hardtanh: Callable
  24. leaky_relu: Callable
  25. hardsigmoid: Callable
  26. # Defined in aten/src/ATen/native/mkldnn/Linear.cpp
  27. def mkldnn_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ...
  28. # Defined at aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
  29. def mkldnn_reorder_conv2d_weight(self: Tensor, padding: List, stride: List, dilatation: List, groups: int) -> Tensor: ...
  30. def mkldnn_reorder_conv3d_weight(self: Tensor, padding: List, stride: List, dilatation: List, groups: int) -> Tensor: ...
  31. # Defined in aten/src/ATen/native/mkldnn/Prelu.cpp
  32. def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ...
  33. # Defined at tools/autograd/templates/python_nn_functions.cpp
  34. @overload
  35. def _parse_to(device: _device, dtype: _dtype, non_blocking: _bool, copy: _bool, *,
  36. memory_format: memory_format) -> Tuple[_device, _dtype, _bool, memory_format]: ...
  37. @overload
  38. def _parse_to(dtype: _dtype, non_blocking: _bool, copy: _bool, *,
  39. memory_format: memory_format) -> Tuple[_device, _dtype, _bool, memory_format]: ...
  40. @overload
  41. def _parse_to(tensor: Tensor, non_blocking: _bool, copy: _bool, *,
  42. memory_format: memory_format) -> Tuple[_device, _dtype, _bool, memory_format]: ...
  43. # Defined in aten/src/ATen/naitve/PadSequence.cpp
  44. def pad_sequence(sequences: List[Tensor], batch_first: bool = False,
  45. padding_value: float = ...) -> Tensor: ...
  46. def flatten_dense_tensors(tensors: List[Tensor]) -> Tensor: ...
  47. def unflatten_dense_tensors(flat: Tensor, tensors: List[Tensor]) -> List[Tensor]: ...