_freeze.py 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. """Freezing
  2. This is not intended to be imported directly; please use the exposed
  3. functionalities in `torch.jit`.
  4. """
  5. from typing import Optional, List
  6. import torch
  7. from torch.jit._script import RecursiveScriptModule, ScriptModule
  8. def freeze(mod, preserved_attrs: Optional[List[str]] = None, optimize_numerics: bool = True):
  9. r"""
  10. Freezing a :class:`ScriptModule` will clone it and attempt to inline the cloned
  11. module's submodules, parameters, and attributes as constants in the TorchScript IR Graph.
  12. By default, `forward` will be preserved, as well as attributes & methods specified in
  13. `preserved_attrs`. Additionally, any attribute that is modified within a preserved
  14. method will be preserved.
  15. Freezing currently only accepts ScriptModules that are in eval mode.
  16. Freezing applies generic optimization that will speed up your model regardless of machine.
  17. To further optimize using server-specific settings, run `optimize_for_inference` after
  18. freezing.
  19. Args:
  20. mod (:class:`ScriptModule`): a module to be frozen
  21. preserved_attrs (Optional[List[str]]): a list of attributes to preserve in addition to the forward method.
  22. Attributes modified in preserved methods will also be preserved.
  23. optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
  24. preserve numerics. Full details of optimization can be found at `torch.jit.run_frozen_optimizations`.
  25. Returns:
  26. Frozen :class:`ScriptModule`.
  27. Example (Freezing a simple module with a Parameter):
  28. .. testcode::
  29. import torch
  30. class MyModule(torch.nn.Module):
  31. def __init__(self, N, M):
  32. super().__init__()
  33. self.weight = torch.nn.Parameter(torch.rand(N, M))
  34. self.linear = torch.nn.Linear(N, M)
  35. def forward(self, input):
  36. output = self.weight.mm(input)
  37. output = self.linear(output)
  38. return output
  39. scripted_module = torch.jit.script(MyModule(2, 3).eval())
  40. frozen_module = torch.jit.freeze(scripted_module)
  41. # parameters have been removed and inlined into the Graph as constants
  42. assert len(list(frozen_module.named_parameters())) == 0
  43. # See the compiled graph as Python code
  44. print(frozen_module.code)
  45. Example (Freezing a module with preserved attributes)
  46. .. testcode::
  47. import torch
  48. class MyModule2(torch.nn.Module):
  49. def __init__(self):
  50. super().__init__()
  51. self.modified_tensor = torch.tensor(10.)
  52. self.version = 1
  53. def forward(self, input):
  54. self.modified_tensor += 1
  55. return input + self.modified_tensor
  56. scripted_module = torch.jit.script(MyModule2().eval())
  57. frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["version"])
  58. # we've manually preserved `version`, so it still exists on the frozen module and can be modified
  59. assert frozen_module.version == 1
  60. frozen_module.version = 2
  61. # `modified_tensor` is detected as being mutated in the forward, so freezing preserves
  62. # it to retain model semantics
  63. assert frozen_module(torch.tensor(1)) == torch.tensor(12)
  64. # now that we've run it once, the next result will be incremented by one
  65. assert frozen_module(torch.tensor(1)) == torch.tensor(13)
  66. Note:
  67. Freezing submodule attributes is also supported:
  68. frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["submodule.version"])
  69. Note:
  70. If you're not sure why an attribute is not being inlined as a constant, you can run
  71. `dump_alias_db` on frozen_module.forward.graph to see if freezing has detected the
  72. attribute is being modified.
  73. Note:
  74. Because freezing makes weights constants and removes module hierarchy, `to` and other
  75. nn.Module methods to manipulate device or dtype no longer work. As a workaround,
  76. You can remap devices by specifying `map_location` in `torch.jit.load`, however
  77. device-specific logic may have been baked into the model.
  78. """
  79. if not isinstance(mod, ScriptModule):
  80. raise RuntimeError(
  81. "Freezing expects a ScriptModule as input. "
  82. "Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'."
  83. )
  84. if mod.training:
  85. raise RuntimeError(
  86. "Freezing is currently only implemented for modules in eval mode. "
  87. "Please call .eval() on your module before freezing."
  88. )
  89. preserved_attrs = preserved_attrs if preserved_attrs is not None else []
  90. out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))
  91. RecursiveScriptModule._finalize_scriptmodule(out)
  92. preserved_methods = [x for x in preserved_attrs if mod._c._has_method(x)]
  93. run_frozen_optimizations(out, optimize_numerics, preserved_methods)
  94. return out
  95. def run_frozen_optimizations(
  96. mod, optimize_numerics: bool = True, preserved_methods: Optional[List[str]] = None
  97. ):
  98. r"""
  99. Runs a series of optimizations looking for patterns that occur in frozen graphs.
  100. The current set of optimizations includes:
  101. - Dropout Removal
  102. - Pretranspose Linear Layers
  103. - Concat Linear Layers with same input Tensor
  104. - Conv -> Batchnorm folding
  105. - Conv -> Add/Sub folding
  106. - Conv -> Mul/Div folding
  107. Args:
  108. mod (:class:`ScriptModule`): a frozen module to be optimized
  109. optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
  110. preserve numerics. These optimizations preserve default rtol and atol of `torch.testing.assert_close`
  111. when applied on a single transformation, however in a module where many transformations are applied
  112. the rtol or atol may no longer fall within the default `assert_close` tolerance. Conv -> Batchnorm folding,
  113. Conv-Add/Sub, and Conv -> Mul/Div folding all may alter numerics.
  114. Returns:
  115. None
  116. Note:
  117. In rare occassions, this can result in slower execution.
  118. Example (Freezing a module with Conv->Batchnorm)
  119. .. code-block:: python
  120. import torch
  121. in_channels, out_channels = 3, 32
  122. conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
  123. bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
  124. mod = torch.nn.Sequential(conv, bn)
  125. # set optimize to False here, by default freezing runs run_frozen_optimizations
  126. frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()), optimize=False)
  127. # inspect frozen mod
  128. assert "batch_norm" in str(frozen_mod.graph)
  129. torch.jit.run_frozen_optimizations(frozen_mod)
  130. assert "batch_norm" not in str(frozen_mod.graph)
  131. """
  132. if mod._c._has_method("forward"):
  133. torch._C._jit_pass_optimize_frozen_graph(mod.graph, optimize_numerics)
  134. if preserved_methods is None:
  135. preserved_methods = []
  136. for method in preserved_methods:
  137. torch._C._jit_pass_optimize_frozen_graph(
  138. mod.__getattr__(method).graph, optimize_numerics
  139. )
  140. def optimize_for_inference(mod: ScriptModule, other_methods: Optional[List[str]] = None) -> ScriptModule:
  141. """
  142. Performs a set of optimization passes to optimize a model for the
  143. purposes of inference. If the model is not already frozen, optimize_for_inference
  144. will invoke `torch.jit.freeze` automatically.
  145. In addition to generic optimizations that should speed up your model regardless
  146. of environment, prepare for inference will also bake in build specific settings
  147. such as the presence of CUDNN or MKLDNN, and may in the future make transformations
  148. which speed things up on one machine but slow things down on another. Accordingly,
  149. serialization is not implemented following invoking `optimize_for_inference` and
  150. is not guaranteed.
  151. This is still in prototype, and may have the potential to slow down your model.
  152. Primary use cases that have been targeted so far have been vision models on cpu
  153. and gpu to a lesser extent.
  154. Example (optimizing a module with Conv->Batchnorm)::
  155. import torch
  156. in_channels, out_channels = 3, 32
  157. conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
  158. bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
  159. mod = torch.nn.Sequential(conv, bn)
  160. frozen_mod = torch.jit.optimize_for_inference(torch.jit.script(mod.eval()))
  161. assert "batch_norm" not in str(frozen_mod.graph)
  162. # if built with MKLDNN, convolution will be run with MKLDNN weights
  163. assert "MKLDNN" in frozen_mod.graph
  164. """
  165. if not isinstance(mod, ScriptModule):
  166. raise RuntimeError(
  167. "optimize_for_inference expects a ScriptModule as input. "
  168. "Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'.")
  169. if other_methods is None:
  170. other_methods = []
  171. if hasattr(mod, "training"):
  172. mod = freeze(mod.eval(), preserved_attrs=other_methods)
  173. torch._C._jit_pass_optimize_for_inference(mod._c, other_methods)
  174. return mod