observer.py 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606
  1. """
  2. This module implements observers which are used to collect statistics about
  3. the values observed during calibration (PTQ) or training (QAT).
  4. """
  5. import re
  6. import warnings
  7. from abc import ABCMeta, abstractmethod
  8. from collections import OrderedDict
  9. from functools import partial
  10. from typing import Any, List, Tuple, Optional, Dict
  11. import torch
  12. import torch.nn as nn
  13. from torch.ao.quantization.utils import (
  14. check_min_max_valid, calculate_qmin_qmax, is_per_tensor, is_per_channel, validate_qmin_qmax)
  15. __all__ = [
  16. "default_affine_fixed_qparams_observer",
  17. "default_debug_observer",
  18. "default_dynamic_quant_observer",
  19. "default_fixed_qparams_range_0to1_observer",
  20. "default_fixed_qparams_range_neg1to1_observer",
  21. "default_float_qparams_observer",
  22. "default_float_qparams_observer_4bit",
  23. "default_histogram_observer",
  24. "default_observer",
  25. "default_per_channel_weight_observer",
  26. "default_placeholder_observer",
  27. "default_reuse_input_observer",
  28. "default_symmetric_fixed_qparams_observer",
  29. "default_weight_observer",
  30. "get_observer_state_dict",
  31. "load_observer_state_dict",
  32. "per_channel_weight_observer_range_neg_127_to_127",
  33. "weight_observer_range_neg_127_to_127",
  34. "FixedQParamsObserver",
  35. "HistogramObserver",
  36. "MinMaxObserver",
  37. "MovingAverageMinMaxObserver",
  38. "MovingAveragePerChannelMinMaxObserver",
  39. "NoopObserver",
  40. "ObserverBase",
  41. "PerChannelMinMaxObserver",
  42. "PlaceholderObserver",
  43. "RecordingObserver",
  44. "ReuseInputObserver",
  45. "UniformQuantizationObserverBase",
  46. ]
  47. class _PartialWrapper:
  48. def __init__(self, p):
  49. self.p = p
  50. self.callable_args = {}
  51. def __call__(self, *args, **keywords):
  52. # call each arg in callable_args and add them partial, then run with keywords
  53. # skip if arg_name in keywords so its possible to overwrite
  54. for arg_name in self.callable_args:
  55. if arg_name not in keywords:
  56. keywords = {**keywords, **{arg_name: self.callable_args[arg_name]()}}
  57. return self.p(*args, **keywords)
  58. def __repr__(self):
  59. return self.p.__repr__() + self.callable_args.__repr__()
  60. def with_args(self, **kwargs):
  61. return _with_args(self, **kwargs)
  62. def with_callable_args(self, **kwargs):
  63. result = _PartialWrapper(p=self.p)
  64. result.callable_args = {**self.callable_args, **kwargs}
  65. return result
  66. def _with_args(cls_or_self, **kwargs):
  67. r"""Wrapper that allows creation of class factories.
  68. This can be useful when there is a need to create classes with the same
  69. constructor arguments, but different instances. Can be used in conjunction with
  70. _callable_args
  71. Example::
  72. >>> # xdoctest: +SKIP("Undefined vars")
  73. >>> Foo.with_args = classmethod(_with_args)
  74. >>> foo_builder = Foo.with_args(a=3, b=4).with_args(answer=42)
  75. >>> foo_instance1 = foo_builder()
  76. >>> foo_instance2 = foo_builder()
  77. >>> id(foo_instance1) == id(foo_instance2)
  78. False
  79. """
  80. r = _PartialWrapper(partial(cls_or_self, **kwargs))
  81. return r
  82. def _with_callable_args(cls_or_self, **kwargs):
  83. r"""Wrapper that allows creation of class factories args that need to be
  84. called at construction time.
  85. This can be useful when there is a need to create classes with the same
  86. constructor arguments, but different instances and those arguments should only
  87. be calculated at construction time. Can be used in conjunction with _with_args
  88. Example::
  89. >>> # xdoctest: +SKIP("Undefined vars")
  90. >>> Foo.with_callable_args = classmethod(_with_callable_args)
  91. >>> Foo.with_args = classmethod(_with_args)
  92. >>> foo_builder = Foo.with_callable_args(cur_time=get_time_func).with_args(name="dan")
  93. >>> foo_instance1 = foo_builder()
  94. >>> # wait 50
  95. >>> foo_instance2 = foo_builder()
  96. >>> id(foo_instance1.creation_time) == id(foo_instance2.creation_time)
  97. False
  98. """
  99. r = _PartialWrapper(partial(cls_or_self))
  100. return r.with_callable_args(**kwargs)
  101. ABC: Any = ABCMeta(str("ABC"), (object,), {}) # compatible with Python 2 *and* 3:
  102. class ObserverBase(ABC, nn.Module):
  103. r"""Base observer Module.
  104. Any observer implementation should derive from this class.
  105. Concrete observers should follow the same API. In forward, they will update
  106. the statistics of the observed Tensor. And they should provide a
  107. `calculate_qparams` function that computes the quantization parameters given
  108. the collected statistics.
  109. Args:
  110. dtype: dtype argument to the `quantize` node needed to implement the
  111. reference model spec.
  112. """
  113. def __init__(self, dtype):
  114. super().__init__()
  115. self.dtype = dtype
  116. @abstractmethod
  117. def forward(self, x):
  118. pass
  119. @abstractmethod
  120. def calculate_qparams(self, **kwargs):
  121. pass
  122. with_args = classmethod(_with_args)
  123. with_callable_args = classmethod(_with_callable_args)
  124. class UniformQuantizationObserverBase(ObserverBase):
  125. r"""Common base for all observers using uniform quantization to calculate
  126. scale and zero_point.
  127. Args:
  128. dtype: dtype argument to the `quantize` node needed to implement the
  129. reference model spec.
  130. qscheme: Quantization scheme to be used.
  131. reduce_range: Reduces the range of the quantized data type by 1 bit.
  132. This is sometimes required to avoid instruction overflow.
  133. quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
  134. quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
  135. eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
  136. .. warning::
  137. :attr:`dtype` can only take ``torch.qint8`` or ``torch.quint8``.
  138. .. warning::
  139. :attr:`qscheme` can only take one of the following options:
  140. - ``torch.per_tensor_affine``
  141. - ``torch.per_tensor_symmetric``
  142. - ``torch.per_channel_affine``
  143. - ``torch.per_channel_symmetric``
  144. """
  145. # Note: the version is shared by all observer types
  146. #
  147. # Version 1/None
  148. # self
  149. #
  150. # Version 2 (base class only, does not include child class buffers)
  151. # self
  152. # |--- eps : Tensor
  153. #
  154. # Version 3
  155. # for HistogramObserver only, changed the shape of uninitialized
  156. # min_val and max_val buffers from torch.Size([0]) to torch.Size([])
  157. # for PerChannelObservers, changed the name of the buffers from min_vals
  158. # to min_val and from max_vals to max_val.
  159. _version = 3
  160. eps: torch.Tensor
  161. def __init__(
  162. self,
  163. dtype=torch.quint8,
  164. qscheme=torch.per_tensor_affine,
  165. reduce_range=False,
  166. quant_min=None,
  167. quant_max=None,
  168. factory_kwargs=None,
  169. eps=torch.finfo(torch.float32).eps,
  170. ) -> None:
  171. factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
  172. super().__init__(dtype=dtype)
  173. self.qscheme = qscheme
  174. if reduce_range:
  175. warnings.warn(
  176. "Please use quant_min and quant_max to specify the range for observers. \
  177. reduce_range will be deprecated in a future release of PyTorch."
  178. )
  179. self.reduce_range = reduce_range
  180. self.register_buffer(
  181. "eps", torch.tensor([eps], **factory_kwargs)
  182. )
  183. assert self.qscheme in (
  184. torch.per_tensor_affine,
  185. torch.per_tensor_symmetric,
  186. torch.per_channel_affine,
  187. torch.per_channel_symmetric,
  188. torch.per_channel_affine_float_qparams,
  189. ), "Default Observer only works for per_tensor_affine, \
  190. per_tensor_symmetric, per_channel_affine, \
  191. per_channel_symmetric and per_channel_float_qparams quantization scheme"
  192. assert self.dtype in (
  193. torch.qint8,
  194. torch.quint8,
  195. torch.quint4x2,
  196. torch.qint32,
  197. ), "Default Observer only works for qint8, quint8 and quint4x2 data type"
  198. self.has_customized_qrange = (quant_min is not None) and (quant_max is not None)
  199. if self.has_customized_qrange:
  200. validate_qmin_qmax(quant_min, quant_max)
  201. self.quant_min, self.quant_max = \
  202. calculate_qmin_qmax(quant_min, quant_max, self.has_customized_qrange, self.dtype, self.reduce_range)
  203. def _load_from_state_dict(
  204. self,
  205. state_dict,
  206. prefix,
  207. local_metadata,
  208. strict,
  209. missing_keys,
  210. unexpected_keys,
  211. error_msgs,
  212. ):
  213. version = local_metadata.get("version", None)
  214. if version is None or version == 1:
  215. # eps was moved to a buffer in version 2
  216. eps = torch.tensor([torch.finfo(torch.float32).eps])
  217. state_dict[prefix + "eps"] = eps
  218. super()._load_from_state_dict(
  219. state_dict,
  220. prefix,
  221. local_metadata,
  222. strict,
  223. missing_keys,
  224. unexpected_keys,
  225. error_msgs,
  226. )
  227. @torch.jit.export
  228. def _validate_qmin_qmax(self, quant_min: int, quant_max: int) -> None:
  229. r"""Validates that the user-specified quantization range is properly initialized
  230. and within the given bound supported by the observer dtype.
  231. To accommodate lower-bit quantization with respect to the existing torch.qint8 and
  232. torch.quint8 datatypes, the user can choose to use dynamic quantization range by passing
  233. in a tuple of initial qmin and qmax values. One use case is these customized qmin and qmax
  234. values are used to calculate static estimates of the scale and zero point for aggressive lower-bit
  235. fake quantization. These estimates are compared against parameters learned through backpropagation.
  236. The related literatures for scale and zero point via backpropagation are as follows:
  237. Learned Step Size Quantization: https://openreview.net/pdf?id=rkgO66VKDS
  238. Trained Quantization Thresholds: https://arxiv.org/pdf/1903.08066.pdf
  239. """
  240. # The variable names are prefixed with "initial" because their values (qmin and qmax) might be adjusted
  241. # based on whether quantization range is reduced and the datatype (signed/unsigned) used by the observer.
  242. assert (
  243. quant_min <= 0 <= quant_max
  244. ), "Used-specified quantization range must include 0."
  245. assert (
  246. quant_min < quant_max
  247. ), "qmin must be strictly less than qmax for user-specified quantization range."
  248. @torch.jit.export
  249. def _calculate_qparams(
  250. self, min_val: torch.Tensor, max_val: torch.Tensor
  251. ) -> Tuple[torch.Tensor, torch.Tensor]:
  252. r"""Calculates the quantization parameters, given min and max
  253. value tensors. Works for both per tensor and per channel cases
  254. Args:
  255. min_val: Minimum values per channel
  256. max_val: Maximum values per channel
  257. Returns:
  258. scales: Scales tensor of shape (#channels,)
  259. zero_points: Zero points tensor of shape (#channels,)
  260. """
  261. # Functionally equivalent to 'determine_qparams' in utils.py. Observers must be torchscriptable however and qscheme
  262. # as far as I can tell is not allowed to passed as a parameter in torchscript functions. This makes refactoring observer
  263. # to use this utility a massive pain and very gross. For now Im opting just to duplicate as this code
  264. # seems unlikey to change (last update over 1 year ago) and when torchscript is fully deprecated we can refactor.
  265. # TODO(jakeszwe, jerryzh168)
  266. if not check_min_max_valid(min_val, max_val):
  267. return torch.tensor([1.0], device=min_val.device.type), torch.tensor([0], device=min_val.device.type)
  268. quant_min, quant_max = self.quant_min, self.quant_max
  269. min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
  270. max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
  271. device = min_val_neg.device
  272. scale = torch.ones(min_val_neg.size(), dtype=torch.float32, device=device)
  273. zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)
  274. if (
  275. self.qscheme == torch.per_tensor_symmetric
  276. or self.qscheme == torch.per_channel_symmetric
  277. ):
  278. max_val_pos = torch.max(-min_val_neg, max_val_pos)
  279. scale = max_val_pos / (float(quant_max - quant_min) / 2)
  280. scale = torch.max(scale, self.eps)
  281. if self.dtype == torch.quint8:
  282. if self.has_customized_qrange:
  283. # When customized quantization range is used, down-rounded midpoint of the range is chosen.
  284. zero_point = zero_point.new_full(
  285. zero_point.size(), (quant_min + quant_max) // 2
  286. )
  287. else:
  288. zero_point = zero_point.new_full(zero_point.size(), 128)
  289. elif self.qscheme == torch.per_channel_affine_float_qparams:
  290. scale = (max_val - min_val) / float(quant_max - quant_min)
  291. scale = torch.where(scale > self.eps, scale, torch.ones_like(scale))
  292. # We use the quantize function
  293. # xq = Round(Xf * inv_scale + zero_point),
  294. # setting zero_point to (-1 * min *inv_scale) we get
  295. # Xq = Round((Xf - min) * inv_scale)
  296. zero_point = -1 * min_val / scale
  297. else:
  298. scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min)
  299. scale = torch.max(scale, self.eps)
  300. zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int)
  301. zero_point = torch.clamp(zero_point, quant_min, quant_max)
  302. # For scalar values, cast them to Tensors of size 1 to keep the shape
  303. # consistent with default values in FakeQuantize.
  304. if len(scale.shape) == 0:
  305. # TODO: switch to scale.item() after adding JIT support
  306. scale = torch.tensor([float(scale)], dtype=scale.dtype, device=device)
  307. if len(zero_point.shape) == 0:
  308. # TODO: switch to zero_point.item() after adding JIT support
  309. zero_point = torch.tensor(
  310. [int(zero_point)], dtype=zero_point.dtype, device=device
  311. )
  312. if self.qscheme == torch.per_channel_affine_float_qparams:
  313. zero_point = torch.tensor(
  314. [float(zero_point)], dtype=zero_point.dtype, device=device
  315. )
  316. return scale, zero_point
  317. @torch.jit.export
  318. def reset_min_max_vals(self):
  319. raise NotImplementedError("Cannot reset min/max values in the given observer.")
  320. # Originally, this class was called `_ObserverBase`. Keeping the old name around
  321. # for backwards compatibility.
  322. # TODO(after v1.13): delete this
  323. _ObserverBase = UniformQuantizationObserverBase
  324. class MinMaxObserver(UniformQuantizationObserverBase):
  325. r"""Observer module for computing the quantization parameters based on the
  326. running min and max values.
  327. This observer uses the tensor min/max statistics to compute the quantization
  328. parameters. The module records the running minimum and maximum of incoming
  329. tensors, and uses this statistic to compute the quantization parameters.
  330. Args:
  331. dtype: dtype argument to the `quantize` node needed to implement the
  332. reference model spec.
  333. qscheme: Quantization scheme to be used
  334. reduce_range: Reduces the range of the quantized data type by 1 bit
  335. quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
  336. quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
  337. eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
  338. Given running min/max as :math:`x_\text{min}` and :math:`x_\text{max}`,
  339. scale :math:`s` and zero point :math:`z` are computed as:
  340. The running minimum/maximum :math:`x_\text{min/max}` is computed as:
  341. .. math::
  342. \begin{array}{ll}
  343. x_\text{min} &= \begin{cases}
  344. \min(X) & \text{if~}x_\text{min} = \text{None} \\
  345. \min\left(x_\text{min}, \min(X)\right) & \text{otherwise}
  346. \end{cases}\\
  347. x_\text{max} &= \begin{cases}
  348. \max(X) & \text{if~}x_\text{max} = \text{None} \\
  349. \max\left(x_\text{max}, \max(X)\right) & \text{otherwise}
  350. \end{cases}\\
  351. \end{array}
  352. where :math:`X` is the observed tensor.
  353. The scale :math:`s` and zero point :math:`z` are then computed as:
  354. .. math::
  355. \begin{aligned}
  356. \text{if Symmetric:}&\\
  357. &s = 2 \max(|x_\text{min}|, x_\text{max}) /
  358. \left( Q_\text{max} - Q_\text{min} \right) \\
  359. &z = \begin{cases}
  360. 0 & \text{if dtype is qint8} \\
  361. 128 & \text{otherwise}
  362. \end{cases}\\
  363. \text{Otherwise:}&\\
  364. &s = \left( x_\text{max} - x_\text{min} \right ) /
  365. \left( Q_\text{max} - Q_\text{min} \right ) \\
  366. &z = Q_\text{min} - \text{round}(x_\text{min} / s)
  367. \end{aligned}
  368. where :math:`Q_\text{min}` and :math:`Q_\text{max}` are the minimum and
  369. maximum of the quantized data type.
  370. .. warning:: :attr:`dtype` can only take ``torch.qint8`` or ``torch.quint8``.
  371. .. note:: If the running minimum equals to the running maximum, the scale
  372. and zero_point are set to 1.0 and 0.
  373. """
  374. min_val: torch.Tensor
  375. max_val: torch.Tensor
  376. def __init__(
  377. self,
  378. dtype=torch.quint8,
  379. qscheme=torch.per_tensor_affine,
  380. reduce_range=False,
  381. quant_min=None,
  382. quant_max=None,
  383. factory_kwargs=None,
  384. eps=torch.finfo(torch.float32).eps,
  385. ) -> None:
  386. if not is_per_tensor(qscheme):
  387. raise NotImplementedError(
  388. "MinMaxObserver's qscheme only support torch.per_tensor_symmetric \
  389. and torch.per_tensor_affine."
  390. )
  391. # For x86 quantized kernels, we need to ensure that the vpmaddubsw
  392. # instruction does not overflow. We allow for a reduce_range argument to
  393. # observers that reduces the quantized range to (0,127) or (-64, 63).
  394. # For more details see aten/src/ATen/native/quantized/cpu/qconv.cpp
  395. # This is not an optimal choice for non x86 backends as it loses a bit
  396. # of precision for activations.
  397. super().__init__(
  398. dtype=dtype,
  399. qscheme=qscheme,
  400. reduce_range=reduce_range,
  401. quant_min=quant_min,
  402. quant_max=quant_max,
  403. factory_kwargs=factory_kwargs,
  404. eps=eps,
  405. )
  406. factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
  407. self.register_buffer("min_val", torch.tensor(float("inf"), **factory_kwargs))
  408. self.register_buffer("max_val", torch.tensor(float("-inf"), **factory_kwargs))
  409. if (
  410. self.qscheme == torch.per_tensor_symmetric
  411. and self.reduce_range
  412. and self.dtype == torch.quint8
  413. ):
  414. raise NotImplementedError(
  415. "Cannot reduce range for symmetric \
  416. quantization for quint8"
  417. )
  418. def forward(self, x_orig):
  419. r"""Records the running minimum and maximum of ``x``."""
  420. if x_orig.numel() == 0:
  421. return x_orig
  422. x = x_orig.detach() # avoid keeping autograd tape
  423. x = x.to(self.min_val.dtype)
  424. min_val_cur, max_val_cur = torch.aminmax(x)
  425. min_val = torch.min(min_val_cur, self.min_val)
  426. max_val = torch.max(max_val_cur, self.max_val)
  427. self.min_val.copy_(min_val)
  428. self.max_val.copy_(max_val)
  429. return x_orig
  430. @torch.jit.export
  431. def calculate_qparams(self):
  432. r"""Calculates the quantization parameters."""
  433. return self._calculate_qparams(self.min_val, self.max_val)
  434. @torch.jit.export
  435. def extra_repr(self):
  436. return "min_val={}, max_val={}".format(self.min_val, self.max_val)
  437. @torch.jit.export
  438. def reset_min_max_vals(self):
  439. """Resets the min/max values."""
  440. self.min_val.copy_(torch.tensor(float("inf")))
  441. self.max_val.copy_(torch.tensor(float("-inf")))
  442. class MovingAverageMinMaxObserver(MinMaxObserver):
  443. r"""Observer module for computing the quantization parameters based on the
  444. moving average of the min and max values.
  445. This observer computes the quantization parameters based on the moving
  446. averages of minimums and maximums of the incoming tensors. The module
  447. records the average minimum and maximum of incoming tensors, and uses this
  448. statistic to compute the quantization parameters.
  449. Args:
  450. averaging_constant: Averaging constant for min/max.
  451. dtype: dtype argument to the `quantize` node needed to implement the
  452. reference model spec.
  453. qscheme: Quantization scheme to be used
  454. reduce_range: Reduces the range of the quantized data type by 1 bit
  455. quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
  456. quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
  457. eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
  458. The moving average min/max is computed as follows
  459. .. math::
  460. \begin{array}{ll}
  461. x_\text{min} = \begin{cases}
  462. \min(X) & \text{if~}x_\text{min} = \text{None} \\
  463. (1 - c) x_\text{min} + c \min(X) & \text{otherwise}
  464. \end{cases}\\
  465. x_\text{max} = \begin{cases}
  466. \max(X) & \text{if~}x_\text{max} = \text{None} \\
  467. (1 - c) x_\text{max} + c \max(X) & \text{otherwise}
  468. \end{cases}\\
  469. \end{array}
  470. where :math:`x_\text{min/max}` is the running average min/max, :math:`X` is
  471. is the incoming tensor, and :math:`c` is the ``averaging_constant``.
  472. The scale and zero point are then computed as in
  473. :class:`~torch.ao.quantization.observer.MinMaxObserver`.
  474. .. note:: Only works with ``torch.per_tensor_affine`` quantization scheme.
  475. .. note:: If the running minimum equals to the running maximum, the scale
  476. and zero_point are set to 1.0 and 0.
  477. """
  478. def __init__(
  479. self,
  480. averaging_constant=0.01,
  481. dtype=torch.quint8,
  482. qscheme=torch.per_tensor_affine,
  483. reduce_range=False,
  484. quant_min=None,
  485. quant_max=None,
  486. eps=torch.finfo(torch.float32).eps,
  487. **kwargs
  488. ) -> None:
  489. if not is_per_tensor(qscheme):
  490. raise NotImplementedError(
  491. "MovingAverageMinMaxObserver's qscheme only support \
  492. torch.per_tensor_symmetric and torch.per_tensor_affine."
  493. )
  494. self.averaging_constant = averaging_constant
  495. super().__init__(
  496. dtype=dtype,
  497. qscheme=qscheme,
  498. reduce_range=reduce_range,
  499. quant_min=quant_min,
  500. quant_max=quant_max,
  501. eps=eps,
  502. **kwargs
  503. )
  504. def forward(self, x_orig):
  505. if x_orig.numel() == 0:
  506. return x_orig
  507. x = x_orig.detach() # avoid keeping autograd tape
  508. x = x.to(self.min_val.dtype)
  509. min_val = self.min_val
  510. max_val = self.max_val
  511. if min_val == float("inf") and max_val == float("-inf"):
  512. min_val, max_val = torch.aminmax(x)
  513. else:
  514. min_val_cur, max_val_cur = torch.aminmax(x)
  515. min_val = min_val + self.averaging_constant * (min_val_cur - min_val)
  516. max_val = max_val + self.averaging_constant * (max_val_cur - max_val)
  517. self.min_val.copy_(min_val)
  518. self.max_val.copy_(max_val)
  519. return x_orig
  520. class PerChannelMinMaxObserver(UniformQuantizationObserverBase):
  521. r"""Observer module for computing the quantization parameters based on the
  522. running per channel min and max values.
  523. This observer uses the tensor min/max statistics to compute the per channel
  524. quantization parameters. The module records the running minimum and maximum
  525. of incoming tensors, and uses this statistic to compute the quantization
  526. parameters.
  527. Args:
  528. ch_axis: Channel axis
  529. dtype: dtype argument to the `quantize` node needed to implement the
  530. reference model spec.
  531. qscheme: Quantization scheme to be used
  532. reduce_range: Reduces the range of the quantized data type by 1 bit
  533. quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
  534. quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
  535. eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
  536. The quantization parameters are computed the same way as in
  537. :class:`~torch.ao.quantization.observer.MinMaxObserver`, with the difference
  538. that the running min/max values are stored per channel.
  539. Scales and zero points are thus computed per channel as well.
  540. .. note:: If the running minimum equals to the running maximum, the scales
  541. and zero_points are set to 1.0 and 0.
  542. """
  543. min_val: torch.Tensor
  544. max_val: torch.Tensor
  545. def __init__(
  546. self,
  547. ch_axis=0,
  548. dtype=torch.quint8,
  549. qscheme=torch.per_channel_affine,
  550. reduce_range=False,
  551. quant_min=None,
  552. quant_max=None,
  553. factory_kwargs=None,
  554. eps=torch.finfo(torch.float32).eps,
  555. ) -> None:
  556. if not is_per_channel(qscheme):
  557. raise NotImplementedError(
  558. "PerChannelMinMaxObserver's qscheme only support \
  559. torch.per_channel_symmetric, torch.per_channel_affine and torch.per_channel_affine_float_qparams."
  560. )
  561. super().__init__(
  562. dtype=dtype,
  563. qscheme=qscheme,
  564. reduce_range=reduce_range,
  565. quant_min=quant_min,
  566. quant_max=quant_max,
  567. factory_kwargs=factory_kwargs,
  568. eps=eps,
  569. )
  570. factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
  571. self.ch_axis = ch_axis
  572. self.register_buffer("min_val", torch.tensor([], **factory_kwargs))
  573. self.register_buffer("max_val", torch.tensor([], **factory_kwargs))
  574. if (
  575. self.qscheme == torch.per_channel_symmetric
  576. and self.reduce_range
  577. and self.dtype == torch.quint8
  578. ):
  579. raise NotImplementedError(
  580. "Cannot reduce range for symmetric quantization for quint8"
  581. )
  582. def forward(self, x_orig):
  583. return self._forward(x_orig)
  584. def _forward(self, x_orig):
  585. if x_orig.numel() == 0:
  586. return x_orig
  587. x = x_orig.detach() # avoid keeping autograd tape
  588. min_val = self.min_val
  589. max_val = self.max_val
  590. x_dim = x.size()
  591. new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
  592. new_axis_list[self.ch_axis] = 0
  593. new_axis_list[0] = self.ch_axis
  594. y = x.permute(new_axis_list)
  595. # Need to match dtype of min/max because the updates to buffers
  596. # are done in place and types need to match for comparisons
  597. y = y.to(self.min_val.dtype)
  598. y = torch.flatten(y, start_dim=1)
  599. if min_val.numel() == 0 or max_val.numel() == 0:
  600. min_val, max_val = torch.aminmax(y, dim=1)
  601. else:
  602. min_val_cur, max_val_cur = torch.aminmax(y, dim=1)
  603. min_val = torch.min(min_val_cur, min_val)
  604. max_val = torch.max(max_val_cur, max_val)
  605. self.min_val.resize_(min_val.shape)
  606. self.max_val.resize_(max_val.shape)
  607. self.min_val.copy_(min_val)
  608. self.max_val.copy_(max_val)
  609. return x_orig
  610. @torch.jit.export
  611. def calculate_qparams(self):
  612. return self._calculate_qparams(self.min_val, self.max_val)
  613. def extra_repr(self):
  614. return "min_val={}, max_val={}".format(self.min_val, self.max_val)
  615. def _load_from_state_dict(
  616. self,
  617. state_dict: Dict[str, Any],
  618. prefix: str,
  619. local_metadata: Dict[str, torch.Tensor],
  620. strict: bool,
  621. missing_keys: List[str],
  622. unexpected_keys: List[str],
  623. error_msgs: List[str],
  624. ):
  625. version = local_metadata.get("version", None)
  626. if version is None or version < 3:
  627. local_state = ["min_vals", "max_vals"]
  628. expected_min_name = "min_vals"
  629. expected_max_name = "max_vals"
  630. else:
  631. local_state = ["min_val", "max_val"]
  632. expected_min_name = "min_val"
  633. expected_max_name = "max_val"
  634. for name in local_state:
  635. key = prefix + name
  636. if key in state_dict:
  637. val = state_dict[key]
  638. # Custom handling to allow loading min_val or max_val
  639. # of size N into uninitialized buffers of size 0. The
  640. # buffers are resized here, and the values are copied in
  641. # the default state_dict loading code of the parent.
  642. if name == expected_min_name:
  643. self.min_val.resize_(val.shape)
  644. elif name == expected_max_name:
  645. self.max_val.resize_(val.shape)
  646. else:
  647. warnings.warn("Observer load_from_state_dict got unexpected name {}".format(name))
  648. # For torchscript module we need to update the attributes here since we do not
  649. # call the `_load_from_state_dict` function defined module.py
  650. if torch.jit.is_scripting():
  651. if name == expected_min_name:
  652. self.min_val.copy_(val)
  653. elif name == expected_max_name:
  654. self.max_val.copy_(val)
  655. else:
  656. warnings.warn("Observer load_from_state_dict got unexpected name {}".format(name))
  657. elif strict:
  658. missing_keys.append(key)
  659. if not torch.jit.is_scripting():
  660. super()._load_from_state_dict(
  661. state_dict,
  662. prefix,
  663. local_metadata,
  664. False,
  665. missing_keys,
  666. unexpected_keys,
  667. error_msgs,
  668. )
  669. def _load_from_state_dict_script(
  670. self,
  671. state_dict: Dict[str, Any],
  672. prefix: str,
  673. local_metadata: Dict[str, torch.Tensor],
  674. strict: bool,
  675. missing_keys: List[str],
  676. unexpected_keys: List[str],
  677. error_msgs: List[str],
  678. ):
  679. self._load_from_state_dict(
  680. state_dict,
  681. prefix,
  682. local_metadata,
  683. strict,
  684. missing_keys,
  685. unexpected_keys,
  686. error_msgs,
  687. )
  688. @torch.jit.export
  689. def reset_min_max_vals(self):
  690. """Resets the min/max values."""
  691. # This used to be torch.ones but that does not work because
  692. # JIT compiler can optimize it via common subexpression elimination
  693. # in which case both min_val and max_val point to the same tensor.
  694. self.min_val = torch.rand(0, )
  695. self.max_val = torch.rand(0, )
  696. class MovingAveragePerChannelMinMaxObserver(PerChannelMinMaxObserver):
  697. r"""Observer module for computing the quantization parameters based on the
  698. running per channel min and max values.
  699. This observer uses the tensor min/max statistics to compute the per channel
  700. quantization parameters. The module records the running minimum and maximum
  701. of incoming tensors, and uses this statistic to compute the quantization
  702. parameters.
  703. Args:
  704. averaging_constant: Averaging constant for min/max.
  705. ch_axis: Channel axis
  706. dtype: Quantized data type
  707. qscheme: Quantization scheme to be used
  708. reduce_range: Reduces the range of the quantized data type by 1 bit
  709. quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
  710. quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
  711. eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
  712. The quantization parameters are computed the same way as in
  713. :class:`~torch.ao.quantization.observer.MovingAverageMinMaxObserver`, with the
  714. difference that the running min/max values are stored per channel.
  715. Scales and zero points are thus computed per channel as well.
  716. .. note:: If the running minimum equals to the running maximum, the scales
  717. and zero_points are set to 1.0 and 0.
  718. """
  719. def __init__(
  720. self,
  721. averaging_constant=0.01,
  722. ch_axis=0,
  723. dtype=torch.quint8,
  724. qscheme=torch.per_channel_affine,
  725. reduce_range=False,
  726. quant_min=None,
  727. quant_max=None,
  728. eps=torch.finfo(torch.float32).eps,
  729. **kwargs
  730. ) -> None:
  731. if not is_per_channel(qscheme):
  732. raise NotImplementedError(
  733. "MovingAveragePerChannelMinMaxObserver's qscheme only support \
  734. torch.per_channel_symmetric, torch.per_channel_affine and torch.per_channel_affine_float_qparams."
  735. )
  736. super().__init__(
  737. ch_axis=ch_axis,
  738. dtype=dtype,
  739. qscheme=qscheme,
  740. reduce_range=reduce_range,
  741. quant_min=quant_min,
  742. quant_max=quant_max,
  743. eps=eps,
  744. **kwargs
  745. )
  746. self.averaging_constant = averaging_constant
  747. def forward(self, x_orig):
  748. if x_orig.numel() == 0:
  749. return x_orig
  750. x = x_orig.detach() # avoid keeping autograd tape
  751. x = x.to(self.min_val.dtype)
  752. min_val = self.min_val
  753. max_val = self.max_val
  754. x_dim = x.size()
  755. new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
  756. new_axis_list[self.ch_axis] = 0
  757. new_axis_list[0] = self.ch_axis
  758. y = x.permute(new_axis_list)
  759. y = torch.flatten(y, start_dim=1)
  760. if min_val.numel() == 0 or max_val.numel() == 0:
  761. min_val, max_val = torch.aminmax(y, dim=1)
  762. else:
  763. min_val_cur, max_val_cur = torch.aminmax(y, dim=1)
  764. min_val = min_val + self.averaging_constant * (min_val_cur - min_val)
  765. max_val = max_val + self.averaging_constant * (max_val_cur - max_val)
  766. self.min_val.resize_(min_val.shape)
  767. self.max_val.resize_(max_val.shape)
  768. self.min_val.copy_(min_val)
  769. self.max_val.copy_(max_val)
  770. return x_orig
  771. class HistogramObserver(UniformQuantizationObserverBase):
  772. r"""
  773. The module records the running histogram of tensor values along with
  774. min/max values. ``calculate_qparams`` will calculate scale and zero_point.
  775. Args:
  776. bins: Number of bins to use for the histogram
  777. upsample_rate: Factor by which the histograms are upsampled, this is
  778. used to interpolate histograms with varying ranges across observations
  779. dtype: dtype argument to the `quantize` node needed to implement the
  780. reference model spec
  781. qscheme: Quantization scheme to be used
  782. reduce_range: Reduces the range of the quantized data type by 1 bit
  783. eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
  784. The scale and zero point are computed as follows:
  785. 1. Create the histogram of the incoming inputs.
  786. The histogram is computed continuously, and the ranges per bin change
  787. with every new tensor observed.
  788. 2. Search the distribution in the histogram for optimal min/max values.
  789. The search for the min/max values ensures the minimization of the
  790. quantization error with respect to the floating point model.
  791. 3. Compute the scale and zero point the same way as in the
  792. :class:`~torch.ao.quantization.MinMaxObserver`
  793. """
  794. histogram: torch.Tensor
  795. min_val: torch.Tensor
  796. max_val: torch.Tensor
  797. def __init__(
  798. self,
  799. bins: int = 2048,
  800. upsample_rate: int = 128,
  801. dtype: torch.dtype = torch.quint8,
  802. qscheme=torch.per_tensor_affine,
  803. reduce_range=False,
  804. quant_min=None,
  805. quant_max=None,
  806. factory_kwargs=None,
  807. eps=torch.finfo(torch.float32).eps,
  808. ) -> None:
  809. if not is_per_tensor(qscheme):
  810. raise NotImplementedError(
  811. "HistogramObserver's qscheme only support torch.per_tensor_symmetric \
  812. and torch.per_tensor_affine."
  813. )
  814. # bins: The number of bins used for histogram calculation.
  815. super().__init__(
  816. dtype=dtype,
  817. qscheme=qscheme,
  818. reduce_range=reduce_range,
  819. quant_min=quant_min,
  820. quant_max=quant_max,
  821. factory_kwargs=factory_kwargs,
  822. eps=eps,
  823. )
  824. factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
  825. self.bins = bins
  826. self.register_buffer("histogram", torch.zeros(self.bins, **factory_kwargs))
  827. self.register_buffer("min_val", torch.tensor(float("inf"), **factory_kwargs))
  828. self.register_buffer("max_val", torch.tensor(float("-inf"), **factory_kwargs))
  829. self.dst_nbins = 2 ** torch.iinfo(self.dtype).bits
  830. self.upsample_rate = upsample_rate
  831. def _get_norm(
  832. self, delta_begin: torch.Tensor, delta_end: torch.Tensor, density: torch.Tensor
  833. ) -> torch.Tensor:
  834. r"""
  835. Compute the norm of the values uniformaly distributed between
  836. delta_begin and delta_end.
  837. Currently only L2 norm is supported.
  838. norm = density * (integral_{begin, end} x^2)
  839. = density * (end^3 - begin^3) / 3
  840. """
  841. norm = (
  842. delta_end * delta_end * delta_end - delta_begin * delta_begin * delta_begin
  843. ) / 3
  844. return density * norm
  845. def _compute_quantization_error(self, next_start_bin: int, next_end_bin: int):
  846. r"""
  847. Compute the quantization error if we use start_bin to end_bin as the
  848. min and max to do the quantization.
  849. """
  850. bin_width = (self.max_val.item() - self.min_val.item()) / self.bins
  851. dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins
  852. if dst_bin_width == 0.0:
  853. return 0.0
  854. src_bin = torch.arange(self.bins, device=self.histogram.device)
  855. # distances from the beginning of first dst_bin to the beginning and
  856. # end of src_bin
  857. src_bin_begin = (src_bin - next_start_bin) * bin_width
  858. src_bin_end = src_bin_begin + bin_width
  859. # which dst_bins the beginning and end of src_bin belong to?
  860. dst_bin_of_begin = torch.clamp(
  861. torch.div(src_bin_begin, dst_bin_width, rounding_mode='floor'), 0, self.dst_nbins - 1
  862. )
  863. dst_bin_of_begin_center = (dst_bin_of_begin + 0.5) * dst_bin_width
  864. dst_bin_of_end = torch.clamp(
  865. torch.div(src_bin_end, dst_bin_width, rounding_mode='floor'), 0, self.dst_nbins - 1
  866. )
  867. dst_bin_of_end_center = (dst_bin_of_end + 0.5) * dst_bin_width
  868. density = self.histogram / bin_width
  869. norm = torch.zeros(self.bins, device=self.histogram.device)
  870. delta_begin = src_bin_begin - dst_bin_of_begin_center
  871. delta_end = dst_bin_width / 2
  872. norm += self._get_norm(delta_begin,
  873. torch.ones(self.bins, device=self.histogram.device) * delta_end,
  874. density)
  875. norm += (dst_bin_of_end - dst_bin_of_begin - 1) * self._get_norm(
  876. torch.tensor(-dst_bin_width / 2), torch.tensor(dst_bin_width / 2), density
  877. )
  878. dst_bin_of_end_center = dst_bin_of_end * dst_bin_width + dst_bin_width / 2
  879. delta_begin = -dst_bin_width / 2
  880. delta_end = src_bin_end - dst_bin_of_end_center
  881. norm += self._get_norm(torch.tensor(delta_begin), delta_end, density)
  882. return norm.sum().item()
  883. def _non_linear_param_search(self) -> Tuple[torch.Tensor, torch.Tensor]:
  884. r"""Non-linear parameter search.
  885. An approximation for L2 error minimization for selecting min/max.
  886. By selecting new min/max, we filter out outliers in input distribution.
  887. This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in
  888. caffe2/quantization/server/norm_minimization.cc
  889. """
  890. assert self.histogram.size()[0] == self.bins, "bins mismatch"
  891. bin_width = (self.max_val - self.min_val) / self.bins
  892. # cumulative sum
  893. total = torch.sum(self.histogram).item()
  894. cSum = torch.cumsum(self.histogram, dim=0)
  895. stepsize = 1e-5 # granularity
  896. alpha = 0.0 # lower bound
  897. beta = 1.0 # upper bound
  898. start_bin = 0
  899. end_bin = self.bins - 1
  900. norm_min = float("inf")
  901. while alpha < beta:
  902. # Find the next step
  903. next_alpha = alpha + stepsize
  904. next_beta = beta - stepsize
  905. # find the left and right bins between the quantile bounds
  906. l = start_bin
  907. r = end_bin
  908. while l < end_bin and cSum[l] < next_alpha * total:
  909. l = l + 1
  910. while r > start_bin and cSum[r] > next_beta * total:
  911. r = r - 1
  912. # decide the next move
  913. next_start_bin = start_bin
  914. next_end_bin = end_bin
  915. if (l - start_bin) > (end_bin - r):
  916. # move the start bin
  917. next_start_bin = l
  918. alpha = next_alpha
  919. else:
  920. # move the end bin
  921. next_end_bin = r
  922. beta = next_beta
  923. if next_start_bin == start_bin and next_end_bin == end_bin:
  924. continue
  925. # calculate the quantization error using next_start_bin and next_end_bin
  926. norm = self._compute_quantization_error(next_start_bin, next_end_bin)
  927. if norm > norm_min:
  928. break
  929. norm_min = norm
  930. start_bin = next_start_bin
  931. end_bin = next_end_bin
  932. new_min = self.min_val + bin_width * start_bin
  933. new_max = self.min_val + bin_width * (end_bin + 1)
  934. return new_min, new_max
  935. def _adjust_min_max(
  936. self, combined_min: torch.Tensor, combined_max: torch.Tensor, upsample_rate: int
  937. ) -> Tuple[torch.Tensor, torch.Tensor, int, int]:
  938. # We ensure that:
  939. # (combined_max - combined_min)/(downsample_rate*Nbins) = (max - min)/(upsample_rate*Nbins)
  940. # This allows us to have a common grid of resolution s, where we can align
  941. # the input histogram
  942. # start_idx maps min_val to the histogram bin index.
  943. # Compute the width of histogram bins is a straightforward solution, where
  944. # hist_bin_width = (self.max_val - self.min_val) / (self.bins * upsample_rate)
  945. # Underflow happens if the numerator is close to the smallest positive subnormal number of FP32
  946. # Therefore, we avoid such division operation.
  947. downsample_rate = int(
  948. torch.ceil(
  949. (combined_max - combined_min) * upsample_rate / (self.max_val - self.min_val)
  950. ).item()
  951. )
  952. e = downsample_rate * (self.max_val - self.min_val) / upsample_rate - (combined_max - combined_min)
  953. start_idx = int(
  954. torch.round((self.min_val - combined_min) * self.bins * upsample_rate / (self.max_val - self.min_val)).item()
  955. )
  956. combined_max = combined_max + e
  957. combined_min = combined_min
  958. return combined_min, combined_max, downsample_rate, start_idx
  959. def _combine_histograms(
  960. self,
  961. orig_hist: torch.Tensor,
  962. new_hist: torch.Tensor,
  963. upsample_rate: int,
  964. downsample_rate: int,
  965. start_idx: int,
  966. Nbins: int,
  967. ) -> torch.Tensor:
  968. # First up-sample the histogram with new data by a factor of L
  969. # This creates an approximate probability density thats piecwise constant
  970. upsampled_histogram = new_hist.repeat_interleave(upsample_rate)
  971. # Now insert the upsampled histogram into the output
  972. # histogram, which is initialized with zeros.
  973. # The offset at which the histogram is introduced is determined
  974. # by the start index as the output histogram can cover a wider range
  975. histogram_with_output_range = torch.zeros(
  976. (Nbins * downsample_rate), device=orig_hist.device
  977. )
  978. histogram_with_output_range[
  979. start_idx : Nbins * upsample_rate + start_idx
  980. ] = upsampled_histogram
  981. # Compute integral histogram, double precision is needed to ensure
  982. # that there are no overflows
  983. integral_histogram = torch.cumsum(
  984. histogram_with_output_range, 0, dtype=torch.double
  985. )[downsample_rate - 1 :: downsample_rate]
  986. # Finally perform interpolation
  987. shifted_integral_histogram = torch.zeros((Nbins), device=orig_hist.device)
  988. shifted_integral_histogram[1:Nbins] = integral_histogram[0:-1]
  989. interpolated_histogram = (
  990. integral_histogram - shifted_integral_histogram
  991. ) / upsample_rate
  992. orig_hist = orig_hist + interpolated_histogram.to(torch.float)
  993. return orig_hist
  994. def forward(self, x_orig: torch.Tensor) -> torch.Tensor:
  995. if x_orig.numel() == 0:
  996. return x_orig
  997. x = x_orig.detach()
  998. min_val = self.min_val
  999. max_val = self.max_val
  1000. same_values = min_val.item() == max_val.item()
  1001. is_uninitialized = min_val == float("inf") and max_val == float("-inf")
  1002. if is_uninitialized or same_values:
  1003. min_val, max_val = torch.aminmax(x)
  1004. self.min_val.resize_(min_val.shape)
  1005. self.min_val.copy_(min_val)
  1006. self.max_val.resize_(max_val.shape)
  1007. self.max_val.copy_(max_val)
  1008. assert (
  1009. min_val.numel() == 1 and max_val.numel() == 1
  1010. ), "histogram min/max values must be scalar."
  1011. torch.histc(
  1012. x, self.bins, min=min_val, max=max_val, out=self.histogram # type: ignore[arg-type]
  1013. )
  1014. else:
  1015. new_min, new_max = torch.aminmax(x)
  1016. combined_min = torch.min(new_min, min_val)
  1017. combined_max = torch.max(new_max, max_val)
  1018. # combine the existing histogram and new histogram into 1 histogram
  1019. # We do this by first upsampling the histogram to a dense grid
  1020. # and then downsampling the histogram efficiently
  1021. (
  1022. combined_min,
  1023. combined_max,
  1024. downsample_rate,
  1025. start_idx,
  1026. ) = self._adjust_min_max(combined_min, combined_max, self.upsample_rate)
  1027. assert (
  1028. combined_min.numel() == 1 and combined_max.numel() == 1
  1029. ), "histogram min/max values must be scalar."
  1030. combined_histogram = torch.histc(
  1031. x, self.bins, min=combined_min, max=combined_max # type: ignore[arg-type]
  1032. )
  1033. if combined_min == min_val and combined_max == max_val:
  1034. combined_histogram += self.histogram
  1035. else:
  1036. combined_histogram = self._combine_histograms(
  1037. combined_histogram,
  1038. self.histogram,
  1039. self.upsample_rate,
  1040. downsample_rate,
  1041. start_idx,
  1042. self.bins,
  1043. )
  1044. self.histogram.detach_().resize_(combined_histogram.shape)
  1045. self.histogram.copy_(combined_histogram)
  1046. self.min_val.detach_().resize_(combined_min.shape)
  1047. self.min_val.copy_(combined_min)
  1048. self.max_val.detach_().resize_(combined_max.shape)
  1049. self.max_val.copy_(combined_max)
  1050. return x_orig
  1051. @torch.jit.export
  1052. def calculate_qparams(self):
  1053. is_uninitialized = self.min_val == float("inf") and self.max_val == float(
  1054. "-inf"
  1055. )
  1056. if is_uninitialized:
  1057. warnings.warn(
  1058. "must run observer before calling calculate_qparams.\
  1059. Returning default scale and zero point "
  1060. )
  1061. return torch.tensor([1.0], device=self.min_val.device.type), torch.tensor([0], device=self.min_val.device.type)
  1062. assert self.bins == len(self.histogram), (
  1063. "The number of bins in histogram should be equal to the number of bins "
  1064. "supplied while making this observer"
  1065. )
  1066. new_min, new_max = self._non_linear_param_search()
  1067. return self._calculate_qparams(new_min, new_max)
  1068. def _save_to_state_dict(self, destination, prefix, keep_vars):
  1069. super()._save_to_state_dict(destination, prefix, keep_vars)
  1070. destination[prefix + "min_val"] = self.min_val
  1071. destination[prefix + "max_val"] = self.max_val
  1072. def _load_from_state_dict(
  1073. self,
  1074. state_dict,
  1075. prefix,
  1076. local_metadata,
  1077. strict,
  1078. missing_keys,
  1079. unexpected_keys,
  1080. error_msgs,
  1081. ):
  1082. version = local_metadata.get("version", None)
  1083. if version is None or version < 3:
  1084. # if min_val and max_val are not initialized, update their shape
  1085. # to account for the differences between v2 and v3
  1086. min_val_name, max_val_name = prefix + "min_val", prefix + "max_val"
  1087. if min_val_name in state_dict:
  1088. if state_dict[min_val_name].shape == torch.Size([0]):
  1089. state_dict[min_val_name] = torch.tensor(float("inf"))
  1090. if max_val_name in state_dict:
  1091. if state_dict[max_val_name].shape == torch.Size([0]):
  1092. state_dict[max_val_name] = torch.tensor(float("-inf"))
  1093. local_state = ["min_val", "max_val"]
  1094. for name in local_state:
  1095. key = prefix + name
  1096. if key in state_dict:
  1097. val = state_dict[key]
  1098. setattr(self, name, val)
  1099. elif strict:
  1100. missing_keys.append(key)
  1101. super()._load_from_state_dict(
  1102. state_dict,
  1103. prefix,
  1104. local_metadata,
  1105. strict,
  1106. missing_keys,
  1107. unexpected_keys,
  1108. error_msgs,
  1109. )
  1110. def extra_repr(self):
  1111. return "min_val={}, max_val={}".format(self.min_val, self.max_val)
  1112. class FixedQParamsObserver(ObserverBase):
  1113. r"""
  1114. Observer that simulates quantize and dequantize with fixed
  1115. quantization parameters in training time. Only per tensor
  1116. quantization is supported.
  1117. Args:
  1118. `scale` (float): fixed scale for the observer
  1119. `zero_point` (int): fixed zero point for the observer
  1120. `dtype`, `qscheme`, `quant_min`, `quant_max`
  1121. """
  1122. scale: torch.Tensor
  1123. zero_point: torch.Tensor
  1124. def __init__(self,
  1125. scale,
  1126. zero_point,
  1127. dtype=torch.quint8,
  1128. qscheme=torch.per_tensor_affine,
  1129. quant_min=0,
  1130. quant_max=255):
  1131. super().__init__(dtype=dtype)
  1132. self.quant_min = quant_min
  1133. self.quant_max = quant_max
  1134. self.register_buffer('scale', torch.tensor([scale], dtype=torch.float))
  1135. self.register_buffer('zero_point', torch.tensor([zero_point], dtype=torch.int))
  1136. self.dtype = dtype
  1137. self.qscheme = qscheme
  1138. def forward(self, X):
  1139. return X
  1140. @torch.jit.export
  1141. def calculate_qparams(self):
  1142. return self.scale, self.zero_point
  1143. class PlaceholderObserver(ObserverBase):
  1144. r"""
  1145. Observer that doesn't do anything and just passes its configuration to the
  1146. quantized module's ``.from_float()``.
  1147. Can be used for quantization to float16 which doesn't require determining
  1148. ranges.
  1149. Args:
  1150. dtype: dtype argument to the `quantize` node needed to implement the
  1151. reference model spec.
  1152. quant_min: minimum value in quantized domain (TODO: align behavior with other observers)
  1153. quant_min: maximum value in quantized domain
  1154. custom_op_name: (temporary) specify this observer for an operator that doesn't require any observation
  1155. (Can be used in Graph Mode Passes for special case ops).
  1156. compute_dtype (deprecated): if set, marks the future quantize function to use
  1157. dynamic quantization instead of static quantization.
  1158. This field is deprecated, use `is_dynamic=True` instead.
  1159. is_dynamic: if True, the `quantize` function in the reference model
  1160. representation taking stats from this observer instance will
  1161. use dynamic quantization.
  1162. """
  1163. def __init__(
  1164. self, dtype=torch.float32, custom_op_name="", compute_dtype=None,
  1165. quant_min=None, quant_max=None, is_dynamic=False,
  1166. ) -> None:
  1167. super().__init__(dtype=dtype)
  1168. # dtype of input of the target operator, e.g. for dynamic quantization
  1169. # ops, the dtype will be float32
  1170. self.dtype = dtype
  1171. self.quant_min = quant_min
  1172. self.quant_max = quant_max
  1173. self.custom_op = custom_op_name
  1174. # used for configuration of computation type for dynamic quantization
  1175. if compute_dtype:
  1176. is_dynamic = True
  1177. warnings.warn(
  1178. "Please use `is_dynamic` instead of `compute_dtype`. \
  1179. `compute_dtype` will be deprecated in a future release \
  1180. of PyTorch."
  1181. )
  1182. self.is_dynamic = is_dynamic
  1183. def forward(self, x):
  1184. return x
  1185. @torch.jit.export
  1186. def calculate_qparams(self):
  1187. raise Exception(
  1188. "calculate_qparams should not be called for PlaceholderObserver"
  1189. )
  1190. class RecordingObserver(ObserverBase):
  1191. r"""
  1192. The module is mainly for debug and records the tensor values during runtime.
  1193. Args:
  1194. dtype: Quantized data type
  1195. qscheme: Quantization scheme to be used
  1196. reduce_range: Reduces the range of the quantized data type by 1 bit
  1197. """
  1198. __annotations__ = {"tensor_val": List[Optional[torch.Tensor]]}
  1199. def __init__(self, dtype=torch.quint8, **kwargs):
  1200. super().__init__(dtype=dtype, **kwargs) # type: ignore[call-arg]
  1201. self.tensor_val = []
  1202. def forward(self, x):
  1203. self.tensor_val.append(x.clone())
  1204. return x
  1205. @torch.jit.export
  1206. def calculate_qparams(self):
  1207. raise Exception("calculate_qparams should not be called for RecordingObserver")
  1208. @torch.jit.export
  1209. def get_tensor_value(self):
  1210. return self.tensor_val
  1211. class NoopObserver(ObserverBase):
  1212. r"""
  1213. Observer that doesn't do anything and just passes its configuration to the
  1214. quantized module's ``.from_float()``.
  1215. Primarily used for quantization to float16 which doesn't require determining
  1216. ranges.
  1217. Args:
  1218. dtype: Quantized data type
  1219. custom_op_name: (temporary) specify this observer for an operator that doesn't require any observation
  1220. (Can be used in Graph Mode Passes for special case ops).
  1221. """
  1222. def __init__(self, dtype=torch.float16, custom_op_name="") -> None:
  1223. super().__init__(dtype=dtype)
  1224. self.dtype = dtype
  1225. self.custom_op = custom_op_name
  1226. def forward(self, x):
  1227. return x
  1228. @torch.jit.export
  1229. def calculate_qparams(self):
  1230. raise Exception("calculate_qparams should not be called for NoopObserver")
  1231. class ReuseInputObserver(ObserverBase):
  1232. r""" This observer is used when we want to reuse the observer from the operator
  1233. that produces the input Tensor, typically used for operators like reshape, e.g.
  1234. ```
  1235. x0 = ...
  1236. x1 = x0.reshape()
  1237. ```
  1238. if we configure x0 to be observed by some observer, let's say MinMaxObserver,
  1239. and reshape is configured with ReuseInputObserver, we'll reuse the observer instance
  1240. for x0 for x1 (output of reshape). If x0 is not observed, we also won't observe x1.
  1241. Note: this is only enabled in FX Graph Mode Quantization
  1242. """
  1243. def __init__(self):
  1244. super().__init__(torch.quint8)
  1245. def forward(self, x):
  1246. return x
  1247. @torch.jit.export
  1248. def calculate_qparams(self):
  1249. raise Exception("calculate_qparams should not be called for ReuseInputObserver")
  1250. def _is_observer_script_module(mod, obs_type_name):
  1251. """Returns true if given mod is an instance of Observer script module."""
  1252. if isinstance(mod, torch.jit.RecursiveScriptModule):
  1253. # qualified name looks like '__torch__.torch.ao.quantization.observer.___torch_mangle_2.MinMaxObserver'
  1254. suffix = mod._c.qualified_name.split(".", 1)[1]
  1255. name = re.sub(r"\.___torch_mangle_\d+", "", suffix)
  1256. return obs_type_name in name
  1257. return False
  1258. def _is_activation_post_process(module):
  1259. return (
  1260. isinstance(module, (torch.ao.quantization.ObserverBase,
  1261. torch.ao.quantization.FakeQuantizeBase)) or _is_observer_script_module(module, "quantization.observer")
  1262. )
  1263. def _is_per_channel_script_obs_instance(module):
  1264. if isinstance(module, torch.jit.RecursiveScriptModule):
  1265. return _is_observer_script_module(
  1266. module, "quantization.observer.PerChannelMinMaxObserver"
  1267. ) or _is_observer_script_module(
  1268. module, "quantization.observer.MovingAveragePerChannelMinMaxObserver"
  1269. )
  1270. return False
  1271. def get_observer_state_dict(mod):
  1272. r"""
  1273. Returns the state dict corresponding to the observer stats.
  1274. Traverse the model state_dict and extract out the stats.
  1275. """
  1276. od = OrderedDict()
  1277. if isinstance(mod, torch.jit.RecursiveScriptModule):
  1278. for k, v in mod.state_dict().items():
  1279. if "observer" in k:
  1280. od[k] = v
  1281. else:
  1282. # path for GraphModule and nn.Module (eager mode)
  1283. for k, v in mod.state_dict().items():
  1284. if "activation_post_process" in k:
  1285. od[k] = v
  1286. od._metadata = mod.state_dict()._metadata # type: ignore[attr-defined]
  1287. return od
  1288. def load_observer_state_dict(mod, obs_dict):
  1289. r"""
  1290. Given input model and a state_dict containing model observer stats,
  1291. load the stats back into the model. The observer state_dict can be saved
  1292. using torch.ao.quantization.get_observer_state_dict
  1293. """
  1294. missing_keys: List[str] = []
  1295. unexpected_keys: List[str] = []
  1296. for name, module in mod.named_modules():
  1297. prefix = name + "."
  1298. if _is_activation_post_process(module):
  1299. if _is_per_channel_script_obs_instance(module):
  1300. # For per-channel observers we need to call a custom load_from_state_dict to resize the tensor.
  1301. # However this is not called when the module is scripted and we end up calling the default one in module.py
  1302. module._load_from_state_dict_script(
  1303. obs_dict, prefix, {}, True, missing_keys, unexpected_keys, []
  1304. )
  1305. else:
  1306. module._load_from_state_dict(
  1307. obs_dict, prefix, {}, False, missing_keys, unexpected_keys, []
  1308. )
  1309. for k in missing_keys:
  1310. if "observer" in k or "activation_post_process" in k:
  1311. raise Exception("Missing keys for observer {} in state_dict".format(k))
  1312. for k in unexpected_keys:
  1313. if "observer" in k or "activation_post_process" in k:
  1314. raise Exception("Unexpected keys for observer {} in state_dict".format(k))
  1315. # Restrict activations to be in the range (0,127)
  1316. default_observer = MinMaxObserver.with_args(quant_min=0, quant_max=127)
  1317. """
  1318. Default observer for static quantization, usually used for debugging.
  1319. """
  1320. default_placeholder_observer = PlaceholderObserver
  1321. """
  1322. Default placeholder observer, usually used for quantization to torch.float16.
  1323. """
  1324. default_debug_observer = RecordingObserver
  1325. """
  1326. Default debug-only observer.
  1327. """
  1328. default_weight_observer = MinMaxObserver.with_args(
  1329. dtype=torch.qint8, qscheme=torch.per_tensor_symmetric
  1330. )
  1331. """
  1332. Default weight observer.
  1333. """
  1334. weight_observer_range_neg_127_to_127 = MinMaxObserver.with_args(
  1335. dtype=torch.qint8, qscheme=torch.per_tensor_symmetric,
  1336. quant_min=-127, quant_max=127, eps=2 ** -12)
  1337. """
  1338. Symmetric weight observer with the 8-bit values restricted to [-127, +127], excluding -128.
  1339. """
  1340. default_histogram_observer = HistogramObserver.with_args(quant_min=0, quant_max=127)
  1341. """
  1342. Default histogram observer, usually used for PTQ.
  1343. """
  1344. default_per_channel_weight_observer = PerChannelMinMaxObserver.with_args(
  1345. dtype=torch.qint8, qscheme=torch.per_channel_symmetric
  1346. )
  1347. """
  1348. Default per-channel weight observer, usually used on backends where per-channel
  1349. weight quantization is supported, such as `fbgemm`.
  1350. """
  1351. per_channel_weight_observer_range_neg_127_to_127 = PerChannelMinMaxObserver.with_args(
  1352. dtype=torch.qint8, qscheme=torch.per_channel_symmetric,
  1353. quant_min=-127, quant_max=127, eps=2 ** -12)
  1354. """
  1355. Per-channel, symmetric weight observer with the 8-bit values restricted to [-127, +127], excluding -128.
  1356. """
  1357. default_dynamic_quant_observer = PlaceholderObserver.with_args(
  1358. dtype=torch.quint8, quant_min=0, quant_max=255, is_dynamic=True,
  1359. )
  1360. """
  1361. Default observer for dynamic quantization.
  1362. """
  1363. default_float_qparams_observer = PerChannelMinMaxObserver.with_args(
  1364. dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0
  1365. )
  1366. """
  1367. Default observer for a floating point zero-point.
  1368. """
  1369. default_float_qparams_observer_4bit = PerChannelMinMaxObserver.with_args(
  1370. dtype=torch.quint4x2, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0
  1371. )
  1372. """
  1373. Default observer for a floating point zero-point and 4 bit activations.
  1374. """
  1375. # TODO(future PR): remove these defaults and enforce activation functions
  1376. # to explicitly specify their output range
  1377. default_fixed_qparams_range_neg1to1_observer = FixedQParamsObserver.with_args(
  1378. scale=2.0 / 256.0, zero_point=128, dtype=torch.quint8, quant_min=0, quant_max=255)
  1379. default_fixed_qparams_range_0to1_observer = FixedQParamsObserver.with_args(
  1380. scale=1.0 / 256.0, zero_point=0, dtype=torch.quint8, quant_min=0, quant_max=255)
  1381. # TODO: the following 2 variables are kept for backwards compatibility; remove after a few releases
  1382. default_symmetric_fixed_qparams_observer = default_fixed_qparams_range_neg1to1_observer
  1383. default_affine_fixed_qparams_observer = default_fixed_qparams_range_0to1_observer
  1384. """
  1385. Default observers for fixed qparams operations.
  1386. """
  1387. default_reuse_input_observer = ReuseInputObserver
  1388. """
  1389. Default observer for operators like reshape that reuses the observer of input to
  1390. the operator
  1391. """