utils.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. import enum
  2. import operator
  3. import torch
  4. import torch.nn as nn
  5. import torch.ao.nn.intrinsic.quantized as nniq
  6. import torch.ao.nn.quantized as nnq
  7. toq = torch.ops.quantized
  8. from typing import Tuple, Callable, Dict, Set, List, Optional, Union
  9. from torch.fx import GraphModule
  10. from torch.fx.graph import Node
  11. from torch.ao.quantization import (
  12. ObserverBase,
  13. FakeQuantizeBase,
  14. )
  15. from torch.ao.quantization.utils import getattr_from_fqn
  16. from torch.ao.quantization.observer import _is_activation_post_process
  17. from .ns_types import NSNodeTargetType, NSResultsType
  18. # TODO(future PR): consider deleting this enum and using the torch types
  19. # directly. This might be tricky because it is not a one to one mapping.
  20. class NodeInputOrOutputType(enum.Enum):
  21. FP32 = enum.auto() # torch.float
  22. INT8 = enum.auto() # torch.qint8 or torch.quint8
  23. FP16 = enum.auto() # torch.float16
  24. UNKNOWN = enum.auto() # we cannot determine input/output dtype
  25. # TODO(future PR): while these functions can support multiple dtypes,
  26. # for the purposes of numerical debugging we want to get the actual
  27. # dtype used in the model. We will likely need some kind of dtype
  28. # propagation to estimate this.
  29. FP32_OR_INT8 = enum.auto() # either torch.float or torch.quint8 or torch.qint8
  30. # TODO(future PRs): dynamic quant, fake quant, etc
  31. def get_node_first_input_and_output_type(
  32. node: Node,
  33. gm: GraphModule,
  34. logger_cls: Callable,
  35. node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]],
  36. ) -> Tuple[NodeInputOrOutputType, NodeInputOrOutputType]:
  37. # TODO(future PR): clean this up
  38. FUNS_IO_TYPE_FP32 = node_type_to_io_type_map["funs_io_type_fp32"]
  39. FUNS_IO_TYPE_FP16 = node_type_to_io_type_map["funs_io_type_fp16"]
  40. FUNS_IO_TYPE_INT8 = node_type_to_io_type_map["funs_io_type_int8"]
  41. FUNS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["funs_io_type_fp32_or_int8"]
  42. MODS_IO_TYPE_FP32 = node_type_to_io_type_map["mods_io_type_fp32"]
  43. MODS_IO_TYPE_INT8 = node_type_to_io_type_map["mods_io_type_int8"]
  44. MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["mods_io_type_fp32_or_int8"]
  45. METHS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["meths_io_type_fp32_or_int8"]
  46. if node.op == "call_function":
  47. if node.target in FUNS_IO_TYPE_FP32:
  48. return (NodeInputOrOutputType.FP32, NodeInputOrOutputType.FP32)
  49. if node.target in FUNS_IO_TYPE_FP16:
  50. return (NodeInputOrOutputType.FP16, NodeInputOrOutputType.FP16)
  51. elif node.target in FUNS_IO_TYPE_INT8:
  52. return (NodeInputOrOutputType.INT8, NodeInputOrOutputType.INT8)
  53. elif node.target in FUNS_IO_TYPE_FP32_OR_INT8:
  54. first_arg = get_normalized_nth_input(node, gm, 0)
  55. assert isinstance(first_arg, Node)
  56. (
  57. _prev_node_input_type,
  58. prev_node_output_type,
  59. ) = get_node_first_input_and_output_type(
  60. first_arg, gm, logger_cls, node_type_to_io_type_map
  61. )
  62. return (prev_node_output_type, prev_node_output_type)
  63. else:
  64. return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
  65. elif node.op == "call_module":
  66. assert node.op == "call_module"
  67. assert isinstance(node.target, str)
  68. mod = getattr_from_fqn(gm, node.target)
  69. is_known_fp32_or_int8_input_module = any(
  70. isinstance(mod, target_type) for target_type in MODS_IO_TYPE_FP32_OR_INT8 # type: ignore[arg-type]
  71. )
  72. if (
  73. isinstance(mod, (logger_cls, ObserverBase, FakeQuantizeBase)) # type: ignore[arg-type]
  74. or is_known_fp32_or_int8_input_module
  75. ):
  76. # A logger or observer's input and output type is the output
  77. # type of the preceding node.
  78. first_arg = get_normalized_nth_input(node, gm, 0)
  79. assert isinstance(first_arg, Node)
  80. (
  81. _prev_node_input_type,
  82. prev_node_output_type,
  83. ) = get_node_first_input_and_output_type(
  84. first_arg, gm, logger_cls, node_type_to_io_type_map
  85. )
  86. return (prev_node_output_type, prev_node_output_type)
  87. is_known_fp32_input_module = any(
  88. isinstance(mod, target_type) for target_type in MODS_IO_TYPE_FP32 # type: ignore[arg-type]
  89. )
  90. is_known_int8_input_module = any(
  91. isinstance(mod, target_type) for target_type in MODS_IO_TYPE_INT8 # type: ignore[arg-type]
  92. )
  93. if is_known_fp32_input_module:
  94. return (NodeInputOrOutputType.FP32, NodeInputOrOutputType.FP32)
  95. elif is_known_int8_input_module:
  96. return (NodeInputOrOutputType.INT8, NodeInputOrOutputType.INT8)
  97. else:
  98. return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
  99. elif node.op == "call_method":
  100. if node.target == "dequantize":
  101. # Dequantize is a special node because it allows multiple input types.
  102. # So, we look up the output type of the previous node and return that
  103. # as the input type of this node instance.
  104. prev_node = get_normalized_nth_input(node, gm, 0)
  105. assert isinstance(prev_node, Node)
  106. (
  107. _prev_node_input_type,
  108. prev_node_output_type,
  109. ) = get_node_first_input_and_output_type(
  110. prev_node, gm, logger_cls, node_type_to_io_type_map
  111. )
  112. return (prev_node_output_type, NodeInputOrOutputType.FP32)
  113. elif node.target == "to":
  114. # to is a special node because it allows multiple input types.
  115. # So, we look up the output type of the previous node and return that
  116. # as the input type of this node instance. We also look up the target
  117. # of to and return the correct output type.
  118. prev_node = get_normalized_nth_input(node, gm, 0)
  119. assert isinstance(prev_node, Node)
  120. (
  121. _prev_node_input_type,
  122. prev_node_output_type,
  123. ) = get_node_first_input_and_output_type(
  124. prev_node, gm, logger_cls, node_type_to_io_type_map
  125. )
  126. cur_node_dtype_target = get_normalized_nth_input(node, gm, 1)
  127. assert (
  128. cur_node_dtype_target is torch.float16
  129. ), f"{cur_node_dtype_target} handling needs to be added"
  130. return (prev_node_output_type, NodeInputOrOutputType.FP16)
  131. elif node.target in METHS_IO_TYPE_FP32_OR_INT8:
  132. first_arg = get_normalized_nth_input(node, gm, 0)
  133. assert isinstance(first_arg, Node)
  134. (
  135. _prev_node_input_type,
  136. prev_node_output_type,
  137. ) = get_node_first_input_and_output_type(
  138. first_arg, gm, logger_cls, node_type_to_io_type_map
  139. )
  140. return (prev_node_output_type, prev_node_output_type)
  141. return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
  142. else:
  143. return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
  144. def get_node_input_qparams(
  145. node: Node,
  146. gm: GraphModule,
  147. node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]],
  148. ) -> Optional[Tuple[Union[torch.Tensor, float], Union[torch.Tensor, int]]]:
  149. """
  150. Returns the qparams (scale, zero_point) of the first input to `node`,
  151. if they can be inferred from the graph.
  152. """
  153. prev_node = get_normalized_nth_input(node, gm, 0)
  154. if not isinstance(prev_node, Node):
  155. return None
  156. MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["mods_io_type_fp32_or_int8"]
  157. def _get_scale_zp_from_function_args(node, gm, scale_arg_idx, zp_arg_idx):
  158. scale_node = get_normalized_nth_input(node, gm, scale_arg_idx)
  159. zp_node = get_normalized_nth_input(node, gm, zp_arg_idx)
  160. assert isinstance(scale_node, Node) and isinstance(scale_node.target, str)
  161. assert isinstance(zp_node, Node) and isinstance(zp_node.target, str)
  162. scale_obj = getattr_from_fqn(gm, scale_node.target)
  163. zp_obj = getattr_from_fqn(gm, zp_node.target)
  164. return (scale_obj, zp_obj)
  165. if prev_node.op == "call_function":
  166. # quantize - read the args directly
  167. if prev_node.target == torch.quantize_per_tensor:
  168. return _get_scale_zp_from_function_args(prev_node, gm, 1, 2)
  169. elif prev_node.target in (toq.add, toq.add_relu, toq.mul, toq.mul_relu):
  170. return _get_scale_zp_from_function_args(prev_node, gm, 2, 3)
  171. return None
  172. # TODO(future PR): handle more functionals
  173. # TODO(future PR): handle functional ops which inherit qparams from input
  174. elif prev_node.op == "call_module":
  175. # get type of the module
  176. assert isinstance(prev_node.target, str)
  177. module_obj = getattr_from_fqn(gm, prev_node.target)
  178. if isinstance(
  179. module_obj,
  180. (
  181. nnq.Linear,
  182. nnq.Conv1d,
  183. nnq.Conv2d,
  184. nniq.ConvReLU2d,
  185. nnq.Conv3d,
  186. nnq.BatchNorm2d,
  187. nnq.BatchNorm3d,
  188. nnq.ConvTranspose1d,
  189. nnq.ConvTranspose2d,
  190. nnq.ELU,
  191. nnq.GroupNorm,
  192. nnq.InstanceNorm1d,
  193. nnq.InstanceNorm2d,
  194. nnq.InstanceNorm3d,
  195. nnq.LayerNorm,
  196. nnq.Hardswish,
  197. nnq.LeakyReLU,
  198. nnq.ReLU6,
  199. nniq.BNReLU2d,
  200. nniq.BNReLU3d,
  201. nniq.ConvReLU1d,
  202. nniq.ConvReLU2d,
  203. nniq.ConvReLU3d,
  204. nniq.LinearReLU,
  205. ),
  206. ):
  207. return (module_obj.scale, module_obj.zero_point) # type: ignore[return-value]
  208. is_known_fp32_or_int8_input_module = any(
  209. isinstance(module_obj, target_type) for target_type in MODS_IO_TYPE_FP32_OR_INT8 # type: ignore[arg-type]
  210. )
  211. if is_known_fp32_or_int8_input_module:
  212. return get_node_input_qparams(prev_node, gm, node_type_to_io_type_map)
  213. return None
  214. def return_first_non_observer_node(
  215. node: Node,
  216. gm: GraphModule,
  217. ) -> Node:
  218. """
  219. If node is not an observer, returns it. If node is an observer,
  220. navigates up the graph and returns the first parent which is not an
  221. observer. For example,
  222. graph: (node_non_obs), node = node_non_obs : returns node_non_obs
  223. graph: (node_non_obs -> obs0), node = obs0 : returns node_non_obs
  224. graph: (node_non_obs -> obs0 -> fq0), node = fq0 : returns node_non_obs
  225. """
  226. if node.op == "call_module":
  227. node_obj = getattr_from_fqn(gm, node.target) # type: ignore[arg-type]
  228. if _is_activation_post_process(node_obj):
  229. assert len(node.args) == 1
  230. assert isinstance(node.args[0], Node)
  231. node = node.args[0]
  232. # code duplication intended, not worth refactoring
  233. assert isinstance(node.target, str)
  234. node_obj = getattr_from_fqn(gm, node.target)
  235. if _is_activation_post_process(node_obj):
  236. assert len(node.args) == 1
  237. assert isinstance(node.args[0], Node)
  238. node = node.args[0]
  239. return node
  240. def get_number_of_non_param_args(
  241. node: Node,
  242. gm: GraphModule,
  243. ) -> int:
  244. """
  245. Assumes that all non-param args occur first. Returns the number of
  246. non-param args expected for a node. For example, for
  247. F.linear(x, weight, bias)
  248. Returns 1, because x is a non-param arg and weight and bias are params.
  249. For
  250. lstm_mod(x, hid)
  251. Returns 2, because both x and hid are non-param args.
  252. """
  253. if node.op == "call_module":
  254. node_obj = getattr_from_fqn(gm, node.target) # type: ignore[arg-type]
  255. if isinstance(node_obj, nn.LSTM):
  256. return 2
  257. # default is 1
  258. return 1
  259. def get_arg_indices_of_inputs_to_log(node: Node) -> List[int]:
  260. """
  261. Returns the indices of args of the node which we should attach
  262. loggers to, if input logging is enabled.
  263. For example,
  264. * for (x + y), returns [0, 1]
  265. * for (1 + y), returns [1]
  266. * for (x + 1), returns [0]
  267. * for (linear(x, w, b)) returns [0]
  268. * by default, returns [0]
  269. """
  270. if len(node.args) == 0:
  271. return []
  272. if node.op == "call_function" and (
  273. # TODO(future PR): use relationship map instead of hardcoding
  274. node.target in (torch.add, torch.ops.quantized.add, operator.add)
  275. or node.target in (torch.mul, torch.ops.quantized.mul, operator.mul)
  276. ):
  277. result = []
  278. for i in range(2):
  279. if type(node.args[i]) == Node:
  280. result.append(i)
  281. return result
  282. return [0]
  283. def get_target_type_str(node: Node, gm: GraphModule) -> str:
  284. """
  285. Returns a string representation of the type of the function or module
  286. pointed to by this node, or '' for other node types.
  287. """
  288. target_type = ""
  289. if node.op in ("call_function", "call_method"):
  290. target_type = torch.typename(node.target)
  291. elif node.op == "call_module":
  292. assert isinstance(node.target, str)
  293. target_mod = getattr_from_fqn(gm, node.target)
  294. target_type = torch.typename(target_mod)
  295. return target_type
  296. def rekey_logger_info_on_node_name_of_model(
  297. results: NSResultsType,
  298. model_name: str,
  299. ) -> NSResultsType:
  300. """
  301. Rekeys the layer name of a results dictionary to use node names
  302. from `model_name`.
  303. For example, transforms
  304. {'base_op_1_0': {'node_output': {'model_a':
  305. [{'ref_node_name': 'linear1', ...}]}}}
  306. into
  307. {'linear1': {'node_output': {'model_a':
  308. [{'ref_node_name': 'linear1', ...}]}}}
  309. Note: we cannot use these node names directly because they are not
  310. guaranteed to be consistent across models. This is why we extract
  311. the results first and rekey afterwards.
  312. """
  313. new_results = {}
  314. for old_layer_name, result_type_to_results in results.items():
  315. new_layer_name = None
  316. for _result_type, model_name_to_results in result_type_to_results.items():
  317. for cur_model_name, list_of_results in model_name_to_results.items():
  318. if cur_model_name == model_name:
  319. assert len(list_of_results)
  320. new_layer_name = list_of_results[0]["ref_node_name"]
  321. else:
  322. continue
  323. if new_layer_name is not None:
  324. new_results[new_layer_name] = result_type_to_results
  325. else:
  326. new_results[old_layer_name] = result_type_to_results
  327. return new_results
  328. def maybe_add_missing_fqns(results: NSResultsType) -> None:
  329. """
  330. If `fqn` entries are filled in for one of the models in `results`, copies
  331. them over to any models which do not have them filled out.
  332. A common use case benefitting from this is comparing a model prepared by
  333. quantization to a quantized model. In this case, the model prepared by
  334. quantization would have `fqn` entries, and the quantized model would not.
  335. """
  336. # Check in the first result to find any model with fqn entries defined.
  337. model_name_with_fqns = None
  338. for layer_name, result_type_to_results in results.items():
  339. for result_type, model_name_to_results in result_type_to_results.items():
  340. for model_name, model_results in model_name_to_results.items():
  341. if len(model_results) > 0:
  342. if model_results[0]["fqn"] is not None:
  343. model_name_with_fqns = model_name
  344. break
  345. break
  346. break
  347. if model_name_with_fqns:
  348. for layer_name, result_type_to_results in results.items():
  349. for result_type, model_name_to_results in result_type_to_results.items():
  350. ref_model_results = model_name_to_results[model_name_with_fqns]
  351. for model_name, model_results in model_name_to_results.items():
  352. if model_name == model_name_with_fqns:
  353. continue
  354. for i in range(len(model_results)):
  355. fqn = ref_model_results[i]["fqn"]
  356. model_results[i]["fqn"] = fqn
  357. def maybe_dequantize_first_two_tensor_args_and_handle_tuples(f):
  358. def inner(*args, **kwargs):
  359. a0, a1, *a_other = args
  360. if (isinstance(a0, tuple) and isinstance(a1, tuple)) or (
  361. isinstance(a0, list) and isinstance(a1, list)
  362. ):
  363. results = []
  364. for el0, el1 in zip(a0, a1):
  365. new_args = (el0, el1, *a_other)
  366. results.append(inner(*new_args, **kwargs))
  367. return results
  368. elif isinstance(a0, torch.Tensor) and isinstance(a1, torch.Tensor):
  369. if a0.is_quantized:
  370. a0 = a0.dequantize()
  371. if a1.is_quantized:
  372. a1 = a1.dequantize()
  373. # for the purposes of this util, only handle floats
  374. if a0.dtype != torch.float or a1.dtype != torch.float:
  375. return None
  376. new_args = (a0, a1, *a_other)
  377. return f(*new_args, **kwargs)
  378. return inner
  379. @maybe_dequantize_first_two_tensor_args_and_handle_tuples
  380. def compute_sqnr(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
  381. """
  382. Computes the SQNR between `x` and `y`.
  383. Args:
  384. x: Tensor or tuple of tensors
  385. y: Tensor or tuple of tensors
  386. Return:
  387. float or tuple of floats
  388. """
  389. Ps = torch.norm(x)
  390. Pn = torch.norm(x - y)
  391. return 20 * torch.log10(Ps / Pn)
  392. @maybe_dequantize_first_two_tensor_args_and_handle_tuples
  393. def compute_normalized_l2_error(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
  394. """
  395. Computes the normalized L2 error between `x` and `y`.
  396. Args:
  397. x: Tensor or tuple of tensors
  398. y: Tensor or tuple of tensors
  399. Return:
  400. float or tuple of floats
  401. """
  402. return torch.sqrt(((x - y) ** 2).sum() / (x ** 2).sum())
  403. @maybe_dequantize_first_two_tensor_args_and_handle_tuples
  404. def compute_cosine_similarity(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
  405. """
  406. Computes the cosine similarity between `x` and `y`.
  407. Args:
  408. x: Tensor or tuple of tensors
  409. y: Tensor or tuple of tensors
  410. Return:
  411. float or tuple of floats
  412. """
  413. # For convolutions, the shape of the quantized weight has one additional
  414. # dimension compared to the shape of the fp32 weight. Match the shapes
  415. # to enable cosine similarity comparison.
  416. x = x.reshape(1, -1)
  417. y = y.reshape(1, -1)
  418. return torch.nn.functional.cosine_similarity(x, y)
  419. def op_type_supports_shadowing(node: Node) -> bool:
  420. if node.op == 'call_function':
  421. if node.target in (torch.add, torch.mul, operator.add, operator.mul, torch.cat, torch.stack):
  422. # shadowing for ops with multiple tensor inputs is not implemented yet
  423. return False
  424. return True
  425. def get_normalized_nth_input(node: Node, gm: GraphModule, idx: int) -> Node:
  426. """
  427. Given a node, gets the n'th input to that node, normalizing
  428. args and kwargs to the best of its ability.
  429. """
  430. try:
  431. norm_args_and_kwargs = node.normalized_arguments(
  432. gm, normalize_to_only_use_kwargs=True)
  433. if norm_args_and_kwargs is not None:
  434. norm_args, norm_kwargs = norm_args_and_kwargs
  435. assert len(norm_args) + len(norm_kwargs) > idx
  436. if idx < len(norm_args):
  437. return norm_args[idx]
  438. else:
  439. # note: in Python 3.7+ dicts are ordered
  440. return list(norm_kwargs.values())[idx]
  441. else:
  442. assert len(node.args) + len(node.kwargs) > idx
  443. if idx < len(node.args):
  444. return node.args[idx] # type: ignore[return-value]
  445. else:
  446. kwargs_idx = idx + len(node.args)
  447. return list(node.kwargs.values())[kwargs_idx] # type: ignore[return-value]
  448. except RuntimeError:
  449. # this RuntimeError happens when node argument normalization
  450. # requires typehints to proceed, such as for torch.add where
  451. # either the first, second or both arguments could be tensors
  452. assert len(node.args) + len(node.kwargs) > idx
  453. if idx < len(node.args):
  454. return node.args[idx] # type: ignore[return-value]
  455. else:
  456. kwargs_idx = idx + len(node.args)
  457. return list(node.kwargs.values())[kwargs_idx] # type: ignore[return-value]