weight_utils.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. import torch.ao.nn.quantized.dynamic as nnqd
  5. import torch.ao.nn.quantized as nnq
  6. import torch.ao.nn.intrinsic.qat as nniqat
  7. import torch.ao.nn.qat as nnqat
  8. import torch.ao.nn.intrinsic as nni
  9. import torch.ao.nn.intrinsic.quantized as nniq
  10. toq = torch.ops.quantized
  11. from torch.fx import GraphModule
  12. from torch.fx.graph import Node
  13. from .utils import (
  14. get_target_type_str,
  15. getattr_from_fqn,
  16. return_first_non_observer_node,
  17. )
  18. from .ns_types import (
  19. NSSingleResultValuesType,
  20. NSSingleResultType,
  21. )
  22. from typing import List, Optional, Dict, Callable
  23. def mod_weight_detach(mod: nn.Module) -> torch.Tensor:
  24. return mod.weight.detach() # type: ignore[operator]
  25. def mod_0_weight_detach(mod: nn.Module) -> torch.Tensor:
  26. return mod[0].weight.detach() # type: ignore[index]
  27. def mod_weight_bias_0(mod: nn.Module) -> torch.Tensor:
  28. return mod._weight_bias()[0] # type: ignore[operator]
  29. def get_lstm_weight(mod: nn.Module) -> List[torch.Tensor]:
  30. res = []
  31. for idx, param_name in enumerate(mod._flat_weights_names): # type: ignore[arg-type]
  32. if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
  33. param_value = mod._flat_weights[idx].detach() # type: ignore[index]
  34. res.append(param_value)
  35. return res
  36. def get_qlstm_weight(mod: nn.Module) -> List[torch.Tensor]:
  37. res = []
  38. for weight_value in mod._all_weight_values: # type: ignore[union-attr]
  39. res.append(weight_value.param.__getstate__()[0][4][0].__getstate__()[0][0])
  40. res.append(weight_value.param.__getstate__()[0][4][1].__getstate__()[0][0])
  41. return res
  42. def get_conv_mod_weight(mod: nn.Module) -> torch.Tensor:
  43. if (
  44. isinstance(mod, (nn.Conv1d, nn.Conv2d, nn.Conv3d))
  45. ):
  46. return mod.weight.detach()
  47. elif (
  48. isinstance(mod, (nni.ConvReLU1d, nni.ConvReLU2d, nni.ConvReLU3d))
  49. ):
  50. return mod[0].weight.detach()
  51. else:
  52. return mod._weight_bias()[0] # type: ignore[operator]
  53. def get_linear_mod_weight(mod: nn.Module) -> torch.Tensor:
  54. if isinstance(mod, nn.Linear):
  55. return mod.weight.detach()
  56. elif isinstance(mod, nni.LinearReLU):
  57. return mod[0].weight.detach()
  58. else:
  59. return mod._weight_bias()[0] # type: ignore[operator]
  60. def get_lstm_mod_weights(mod: nn.Module) -> List[torch.Tensor]:
  61. # TODO(future PR): make more generic, handle everything
  62. if isinstance(mod, nn.LSTM):
  63. res = []
  64. for idx, param_name in enumerate(mod._flat_weights_names):
  65. if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
  66. param_value = mod._flat_weights[idx].detach()
  67. res.append(param_value)
  68. return res
  69. else:
  70. assert isinstance(mod, nnqd.LSTM), f"type {type(res)} not handled yet"
  71. res = []
  72. for weight_value in mod._all_weight_values:
  73. res.append(weight_value.param.__getstate__()[0][4][0].__getstate__()[0][0])
  74. res.append(weight_value.param.__getstate__()[0][4][1].__getstate__()[0][0])
  75. return res
  76. def get_conv_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
  77. # traverse backwards from the weight arg, accounting for any observers
  78. weight_arg_node = node.args[1]
  79. assert isinstance(weight_arg_node, Node)
  80. weight_node = return_first_non_observer_node(weight_arg_node, gm)
  81. assert isinstance(weight_node, Node)
  82. assert weight_node.op == 'get_attr'
  83. weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
  84. return weight.detach()
  85. def get_qconv_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
  86. # qconv state is arg 1
  87. qconv_state_node = node.args[1]
  88. assert isinstance(qconv_state_node, Node)
  89. assert qconv_state_node.op == 'get_attr'
  90. qconv_state_obj = getattr_from_fqn(gm, qconv_state_node.target) # type: ignore[arg-type]
  91. return qconv_state_obj.weight()
  92. def get_linear_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
  93. # traverse backwards from the weight arg, accounting for any observers
  94. # supported patterns:
  95. # weight -> obs -> linear
  96. # weight -> to(torch.float16) -> dequantize -> linear
  97. linear_second_arg = node.args[1]
  98. assert isinstance(linear_second_arg, Node)
  99. if linear_second_arg.op == 'call_module':
  100. # weight -> obs -> linear
  101. weight_arg_node = node.args[1]
  102. assert isinstance(weight_arg_node, Node)
  103. weight_node = weight_arg_node.args[0]
  104. assert isinstance(weight_node, Node)
  105. assert weight_node.op == 'get_attr'
  106. weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
  107. return weight.detach()
  108. elif linear_second_arg.op == 'call_method':
  109. # weight -> to(torch.float16) -> dequantize -> linear
  110. assert linear_second_arg.op == 'call_method'
  111. dequant_node = node.args[1]
  112. assert isinstance(dequant_node, Node)
  113. to_fp16_node = dequant_node.args[0]
  114. assert isinstance(to_fp16_node, Node)
  115. # extract the dtype, so we can cast to it before returning
  116. target_dtype = to_fp16_node.args[1]
  117. weight_node = to_fp16_node.args[0]
  118. assert isinstance(weight_node, Node)
  119. assert weight_node.op == 'get_attr'
  120. weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
  121. # return the weight with fp16 cast
  122. return weight.detach().to(target_dtype)
  123. else:
  124. assert linear_second_arg.op == 'get_attr'
  125. weight = getattr_from_fqn(gm, linear_second_arg.target) # type: ignore[arg-type]
  126. return weight.detach()
  127. def get_qlinear_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
  128. # packed weight is arg 1
  129. packed_weight_node = node.args[1]
  130. assert isinstance(packed_weight_node, Node)
  131. assert packed_weight_node.op == 'get_attr'
  132. packed_weight = getattr_from_fqn(gm, packed_weight_node.target) # type: ignore[arg-type]
  133. # TODO(future PR): why does packed_weight.unpack() not work?
  134. (weight, _bias), _name = packed_weight.__getstate__()
  135. return weight
  136. def get_op_to_type_to_weight_extraction_fn() -> Dict[str, Dict[Callable, Callable]]:
  137. op_to_type_to_weight_extraction_fn: Dict[str, Dict[Callable, Callable]] = {
  138. 'call_module': {
  139. # Conv1d
  140. nn.Conv1d: mod_weight_detach,
  141. nni.ConvReLU1d: mod_0_weight_detach,
  142. nnq.Conv1d: mod_weight_bias_0,
  143. nnqat.Conv1d: mod_weight_detach,
  144. nniqat.ConvBn1d: mod_weight_detach,
  145. nniqat.ConvBnReLU1d: mod_weight_detach,
  146. nniqat.ConvReLU1d: mod_weight_detach,
  147. nniq.ConvReLU1d: mod_weight_bias_0,
  148. # Conv2d
  149. nn.Conv2d: mod_weight_detach,
  150. nni.ConvReLU2d: mod_0_weight_detach,
  151. nnq.Conv2d: mod_weight_bias_0,
  152. nnqat.Conv2d: mod_weight_detach,
  153. nniqat.ConvBn2d: mod_weight_detach,
  154. nniqat.ConvBnReLU2d: mod_weight_detach,
  155. nniqat.ConvReLU2d: mod_weight_detach,
  156. nniq.ConvReLU2d: mod_weight_bias_0,
  157. # Conv3d
  158. nn.Conv3d: mod_weight_detach,
  159. nni.ConvReLU3d: mod_0_weight_detach,
  160. nnq.Conv3d: mod_weight_bias_0,
  161. nnqat.Conv3d: mod_weight_detach,
  162. nniqat.ConvBn3d: mod_weight_detach,
  163. nniqat.ConvBnReLU3d: mod_weight_detach,
  164. nniqat.ConvReLU3d: mod_weight_detach,
  165. nniq.ConvReLU3d: mod_weight_bias_0,
  166. # Linear
  167. nn.Linear: mod_weight_detach,
  168. nnq.Linear: mod_weight_bias_0,
  169. nni.LinearReLU: mod_0_weight_detach,
  170. nniq.LinearReLU: mod_weight_bias_0,
  171. nnqat.Linear: mod_weight_detach,
  172. nnqd.Linear: mod_weight_bias_0,
  173. nniqat.LinearReLU: mod_weight_detach,
  174. nniqat.LinearBn1d: mod_weight_detach,
  175. nn.modules.linear.NonDynamicallyQuantizableLinear: mod_weight_detach,
  176. # LSTM
  177. nn.LSTM: get_lstm_weight,
  178. nnqd.LSTM: get_qlstm_weight,
  179. },
  180. 'call_function': {
  181. # Conv
  182. F.conv1d: get_conv_fun_weight,
  183. F.conv2d: get_conv_fun_weight,
  184. F.conv3d: get_conv_fun_weight,
  185. toq.conv1d: get_qconv_fun_weight,
  186. toq.conv2d: get_qconv_fun_weight,
  187. toq.conv3d: get_qconv_fun_weight,
  188. toq.conv1d_relu: get_qconv_fun_weight,
  189. toq.conv2d_relu: get_qconv_fun_weight,
  190. toq.conv3d_relu: get_qconv_fun_weight,
  191. # Linear
  192. F.linear: get_linear_fun_weight,
  193. toq.linear: get_qlinear_fun_weight,
  194. toq.linear_relu: get_qlinear_fun_weight,
  195. },
  196. }
  197. return op_to_type_to_weight_extraction_fn
  198. def extract_weight_from_node(
  199. node: Node,
  200. gm: GraphModule,
  201. op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
  202. ) -> Optional[NSSingleResultType]:
  203. res_type = NSSingleResultValuesType.WEIGHT.value
  204. # Not all graphmodules have _node_name_to_scope, so only fill it
  205. # out if it exists.
  206. fqn = None
  207. if hasattr(gm, '_node_name_to_scope'):
  208. fqn = gm._node_name_to_scope[node.name][0] # type: ignore[index]
  209. if op_to_type_to_weight_extraction_fn is None:
  210. op_to_type_to_weight_extraction_fn = get_op_to_type_to_weight_extraction_fn()
  211. ref_node_type = get_target_type_str(node, gm)
  212. # for extracting weights, these are always the same
  213. prev_node_type = ref_node_type
  214. if node.op == 'call_function':
  215. function_mapping = op_to_type_to_weight_extraction_fn['call_function']
  216. for target_fn_type, weight_extraction_fn in function_mapping.items():
  217. if node.target == target_fn_type:
  218. weight = weight_extraction_fn(node, gm)
  219. return {
  220. 'type': res_type,
  221. 'values': [weight],
  222. 'prev_node_name': node.name,
  223. 'prev_node_target_type': prev_node_type,
  224. 'ref_node_name': node.name,
  225. 'ref_node_target_type': ref_node_type,
  226. 'index_within_arg': 0,
  227. 'index_of_arg': 0,
  228. 'fqn': fqn,
  229. }
  230. elif node.op == 'call_module':
  231. # for call_module, we need to look up the modules to do the type check
  232. assert isinstance(node.target, str)
  233. mod = getattr_from_fqn(gm, node.target)
  234. module_mapping = op_to_type_to_weight_extraction_fn['call_module']
  235. for target_mod_type, weight_extraction_fn in module_mapping.items():
  236. if type(mod) == target_mod_type:
  237. weight = weight_extraction_fn(mod)
  238. return {
  239. 'type': res_type,
  240. 'values': [weight],
  241. 'prev_node_name': node.name,
  242. 'prev_node_target_type': prev_node_type,
  243. 'ref_node_name': node.name,
  244. 'ref_node_target_type': ref_node_type,
  245. 'index_within_arg': 0,
  246. 'index_of_arg': 0,
  247. 'fqn': fqn,
  248. }
  249. return None