batchnorm.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. import torch
  2. import torch.ao.nn.intrinsic as nni
  3. __all__ = [
  4. "BatchNorm2d",
  5. "BatchNorm3d"
  6. ]
  7. class _BatchNorm(torch.nn.modules.batchnorm._BatchNorm):
  8. def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None) -> None:
  9. factory_kwargs = {'device': device, 'dtype': dtype}
  10. super().__init__(num_features, eps, momentum, True, True, **factory_kwargs)
  11. self.register_buffer('scale', torch.tensor(1.0, **factory_kwargs))
  12. self.register_buffer('zero_point', torch.tensor(0, **factory_kwargs))
  13. @staticmethod
  14. def from_float(cls, mod):
  15. activation_post_process = mod.activation_post_process
  16. if type(mod) == cls._NNI_BN_RELU_MODULE:
  17. mod = mod[0]
  18. scale, zero_point = activation_post_process.calculate_qparams()
  19. new_mod = cls(mod.num_features, mod.eps)
  20. new_mod.weight = mod.weight
  21. new_mod.bias = mod.bias
  22. new_mod.running_mean = mod.running_mean
  23. new_mod.running_var = mod.running_var
  24. new_mod.scale = scale
  25. new_mod.zero_point = zero_point
  26. return new_mod
  27. @classmethod
  28. def from_reference(cls, bn, output_scale, output_zero_point):
  29. qbn = cls(
  30. bn.num_features,
  31. bn.eps,
  32. bn.momentum,
  33. device=bn.weight.device,
  34. dtype=bn.weight.dtype
  35. )
  36. qbn.weight = bn.weight
  37. qbn.bias = bn.bias
  38. qbn.running_mean = bn.running_mean
  39. qbn.running_var = bn.running_var
  40. qbn.scale = output_scale
  41. qbn.zero_point = output_zero_point
  42. return qbn
  43. class BatchNorm2d(_BatchNorm):
  44. r"""This is the quantized version of :class:`~torch.nn.BatchNorm2d`.
  45. """
  46. _NNI_BN_RELU_MODULE = nni.BNReLU2d
  47. def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None) -> None:
  48. factory_kwargs = {'device': device, 'dtype': dtype}
  49. super().__init__(num_features, eps, momentum, **factory_kwargs)
  50. def _get_name(self):
  51. return 'QuantizedBatchNorm2d'
  52. def _check_input_dim(self, input):
  53. # Temporarily using len(shape) instead of ndim due to JIT issue
  54. # https://github.com/pytorch/pytorch/issues/23890
  55. if len(input.shape) != 4:
  56. raise ValueError("Input shape must be `(N, C, H, W)`!")
  57. def forward(self, input: torch.Tensor) -> torch.Tensor:
  58. # disabling this since this is not symbolically traceable
  59. # self._check_input_dim(input)
  60. return torch.ops.quantized.batch_norm2d(
  61. input, self.weight, self.bias, self.running_mean,
  62. self.running_var, self.eps, self.scale, self.zero_point)
  63. @classmethod
  64. def from_float(cls, mod):
  65. return _BatchNorm.from_float(cls, mod)
  66. class BatchNorm3d(_BatchNorm):
  67. r"""This is the quantized version of :class:`~torch.nn.BatchNorm3d`.
  68. """
  69. _NNI_BN_RELU_MODULE = nni.BNReLU3d
  70. def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
  71. factory_kwargs = {'device': device, 'dtype': dtype}
  72. super().__init__(num_features, eps, momentum, **factory_kwargs)
  73. def _get_name(self):
  74. return 'QuantizedBatchNorm3d'
  75. def _check_input_dim(self, input):
  76. # Temporarily using len(shape) instead of ndim due to JIT issue
  77. # https://github.com/pytorch/pytorch/issues/23890
  78. if len(input.shape) != 5:
  79. raise ValueError("Input shape must be `(N, C, H, W)`!")
  80. def forward(self, input: torch.Tensor) -> torch.Tensor:
  81. # disabling this since this is not symbolically traceable
  82. # self._check_input_dim(input)
  83. return torch.ops.quantized.batch_norm3d(
  84. input, self.weight, self.bias, self.running_mean,
  85. self.running_var, self.eps, self.scale, self.zero_point)
  86. @classmethod
  87. def from_float(cls, mod):
  88. return _BatchNorm.from_float(cls, mod)