__init__.py 2.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970
  1. r"""Quantized Modules
  2. Note::
  3. The `torch.nn.quantized` namespace is in the process of being deprecated.
  4. Please, use `torch.ao.nn.quantized` instead.
  5. """
  6. from torch.ao.nn.quantized.modules.activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid, Softmax, MultiheadAttention, PReLU
  7. from torch.ao.nn.quantized.modules.batchnorm import BatchNorm2d, BatchNorm3d
  8. from torch.ao.nn.quantized.modules.conv import Conv1d, Conv2d, Conv3d
  9. from torch.ao.nn.quantized.modules.conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
  10. from torch.ao.nn.quantized.modules.dropout import Dropout
  11. from torch.ao.nn.quantized.modules.embedding_ops import Embedding, EmbeddingBag
  12. from torch.ao.nn.quantized.modules.functional_modules import FloatFunctional, FXFloatFunctional, QFunctional
  13. from torch.ao.nn.quantized.modules.linear import Linear
  14. from torch.ao.nn.quantized.modules.normalization import LayerNorm, GroupNorm, InstanceNorm1d, InstanceNorm2d, InstanceNorm3d
  15. from torch.ao.nn.quantized.modules.rnn import LSTM
  16. from torch.ao.nn.quantized.modules import MaxPool2d
  17. from torch.ao.nn.quantized.modules import Quantize, DeQuantize
  18. # The following imports are needed in case the user decides
  19. # to import the files directly,
  20. # s.a. `from torch.nn.quantized.modules.conv import ...`.
  21. # No need to add them to the `__all__`.
  22. from torch.ao.nn.quantized.modules import activation
  23. from torch.ao.nn.quantized.modules import batchnorm
  24. from torch.ao.nn.quantized.modules import conv
  25. from torch.ao.nn.quantized.modules import dropout
  26. from torch.ao.nn.quantized.modules import embedding_ops
  27. from torch.ao.nn.quantized.modules import functional_modules
  28. from torch.ao.nn.quantized.modules import linear
  29. from torch.ao.nn.quantized.modules import normalization
  30. from torch.ao.nn.quantized.modules import rnn
  31. from torch.ao.nn.quantized.modules import utils
  32. __all__ = [
  33. 'BatchNorm2d',
  34. 'BatchNorm3d',
  35. 'Conv1d',
  36. 'Conv2d',
  37. 'Conv3d',
  38. 'ConvTranspose1d',
  39. 'ConvTranspose2d',
  40. 'ConvTranspose3d',
  41. 'DeQuantize',
  42. 'ELU',
  43. 'Embedding',
  44. 'EmbeddingBag',
  45. 'GroupNorm',
  46. 'Hardswish',
  47. 'InstanceNorm1d',
  48. 'InstanceNorm2d',
  49. 'InstanceNorm3d',
  50. 'LayerNorm',
  51. 'LeakyReLU',
  52. 'Linear',
  53. 'LSTM',
  54. 'MultiheadAttention',
  55. 'Quantize',
  56. 'ReLU6',
  57. 'Sigmoid',
  58. 'Softmax',
  59. 'Dropout',
  60. 'PReLU',
  61. # Wrapper modules
  62. 'FloatFunctional',
  63. 'FXFloatFunctional',
  64. 'QFunctional',
  65. ]