lazy.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. import itertools
  2. import warnings
  3. from typing import Protocol
  4. import torch
  5. from ..parameter import is_lazy
  6. __all__ = ['LazyModuleMixin']
  7. class _LazyProtocol(Protocol):
  8. """This is to avoid errors with mypy checks for
  9. The attributes in a mixin:
  10. https://mypy.readthedocs.io/en/latest/more_types.html#mixin-classes
  11. """
  12. def _register_load_state_dict_pre_hook(self, hook):
  13. ...
  14. def register_forward_pre_hook(self, hook):
  15. ...
  16. def _lazy_load_hook(
  17. self, state_dict, prefix, local_metadata, strict,
  18. missing_keys, unexpected_keys, error_msgs):
  19. ...
  20. def _get_name(self):
  21. ...
  22. def _infer_parameters(self, module, input):
  23. ...
  24. @property
  25. def _parameters(self):
  26. ...
  27. @property
  28. def _buffers(self):
  29. ...
  30. @property
  31. def _non_persistent_buffers_set(self):
  32. ...
  33. @property
  34. def _load_hook(self):
  35. ...
  36. @property
  37. def _initialize_hook(self):
  38. ...
  39. class LazyModuleMixin:
  40. r"""A mixin for modules that lazily initialize parameters, also known as "lazy modules."
  41. .. warning:
  42. Lazy modules are an experimental new feature under active development,
  43. and their API is likely to change.
  44. Modules that lazily initialize parameters, or "lazy modules",
  45. derive the shapes of their parameters from the first input(s)
  46. to their forward method. Until that first forward they contain
  47. :class:`torch.nn.UninitializedParameter` s that should not be accessed
  48. or used, and afterward they contain regular :class:`torch.nn.Parameter` s.
  49. Lazy modules are convenient since they don't require computing some
  50. module arguments, like the :attr:`in_features` argument of a
  51. typical :class:`torch.nn.Linear`.
  52. After construction, networks with lazy modules should first
  53. be converted to the desired dtype and placed on the expected device.
  54. This is because lazy modules only perform shape inference so the usual dtype
  55. and device placement behavior applies.
  56. The lazy modules should then perform "dry runs" to initialize all the components in the module.
  57. These "dry runs" send inputs of the correct size, dtype, and device through
  58. the network and to each one of its lazy modules. After this the network can be used as usual.
  59. >>> # xdoctest: +SKIP
  60. >>> class LazyMLP(torch.nn.Module):
  61. ... def __init__(self):
  62. ... super().__init__()
  63. ... self.fc1 = torch.nn.LazyLinear(10)
  64. ... self.relu1 = torch.nn.ReLU()
  65. ... self.fc2 = torch.nn.LazyLinear(1)
  66. ... self.relu2 = torch.nn.ReLU()
  67. ...
  68. ... def forward(self, input):
  69. ... x = self.relu1(self.fc1(input))
  70. ... y = self.relu2(self.fc2(x))
  71. ... return y
  72. >>> # constructs a network with lazy modules
  73. >>> lazy_mlp = LazyMLP()
  74. >>> # transforms the network's device and dtype
  75. >>> # NOTE: these transforms can and should be applied after construction and before any 'dry runs'
  76. >>> lazy_mlp = lazy_mlp.cuda().double()
  77. >>> lazy_mlp
  78. LazyMLP( (fc1): LazyLinear(in_features=0, out_features=10, bias=True)
  79. (relu1): ReLU()
  80. (fc2): LazyLinear(in_features=0, out_features=1, bias=True)
  81. (relu2): ReLU()
  82. )
  83. >>> # performs a dry run to initialize the network's lazy modules
  84. >>> lazy_mlp(torch.ones(10,10).cuda())
  85. >>> # after initialization, LazyLinear modules become regular Linear modules
  86. >>> lazy_mlp
  87. LazyMLP(
  88. (fc1): Linear(in_features=10, out_features=10, bias=True)
  89. (relu1): ReLU()
  90. (fc2): Linear(in_features=10, out_features=1, bias=True)
  91. (relu2): ReLU()
  92. )
  93. >>> # attaches an optimizer, since parameters can now be used as usual
  94. >>> optim = torch.optim.SGD(mlp.parameters(), lr=0.01)
  95. A final caveat when using lazy modules is that the order of initialization of a network's
  96. parameters may change, since the lazy modules are always initialized after other modules.
  97. For example, if the LazyMLP class defined above had a :class:`torch.nn.LazyLinear` module
  98. first and then a regular :class:`torch.nn.Linear` second, the second module would be
  99. initialized on construction and the first module would be initialized during the first dry run.
  100. This can cause the parameters of a network using lazy modules to be initialized differently
  101. than the parameters of a network without lazy modules as the order of parameter initializations,
  102. which often depends on a stateful random number generator, is different.
  103. Check :doc:`/notes/randomness` for more details.
  104. Lazy modules can be serialized with a state dict like other modules. For example:
  105. >>> lazy_mlp = LazyMLP()
  106. >>> # The state dict shows the uninitialized parameters
  107. >>> lazy_mlp.state_dict()
  108. OrderedDict([('fc1.weight', Uninitialized parameter),
  109. ('fc1.bias',
  110. tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,
  111. 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),
  112. ('fc2.weight', Uninitialized parameter),
  113. ('fc2.bias', tensor([0.0019]))])
  114. Lazy modules can load regular :class:`torch.nn.Parameter` s (i.e. you can serialize/deserialize
  115. initialized LazyModules and they will remain initialized)
  116. >>> full_mlp = LazyMLP()
  117. >>> # Dry run to initialize another module
  118. >>> full_mlp.forward(torch.ones(10, 1))
  119. >>> # Load an initialized state into a lazy module
  120. >>> lazy_mlp.load_state_dict(full_mlp.state_dict())
  121. >>> # The state dict now holds valid values
  122. >>> lazy_mlp.state_dict()
  123. OrderedDict([('fc1.weight',
  124. tensor([[-0.3837],
  125. [ 0.0907],
  126. [ 0.6708],
  127. [-0.5223],
  128. [-0.9028],
  129. [ 0.2851],
  130. [-0.4537],
  131. [ 0.6813],
  132. [ 0.5766],
  133. [-0.8678]])),
  134. ('fc1.bias',
  135. tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,
  136. 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),
  137. ('fc2.weight',
  138. tensor([[ 0.1320, 0.2938, 0.0679, 0.2793, 0.1088, -0.1795, -0.2301, 0.2807,
  139. 0.2479, 0.1091]])),
  140. ('fc2.bias', tensor([0.0019]))])
  141. Note, however, that the loaded parameters will not be replaced when doing a "dry run" if they are initialized
  142. when the state is loaded. This prevents using initialized modules in different contexts.
  143. """
  144. # modules inheriting from this will change their __class__ to the specified
  145. # one after they are fully initialized
  146. cls_to_become = None
  147. def __init__(self: _LazyProtocol, *args, **kwargs):
  148. # Mypy doesnt like this super call in a mixin
  149. super().__init__(*args, **kwargs) # type: ignore[misc]
  150. self._load_hook = self._register_load_state_dict_pre_hook(self._lazy_load_hook)
  151. self._initialize_hook = self.register_forward_pre_hook(self._infer_parameters)
  152. warnings.warn('Lazy modules are a new feature under heavy development '
  153. 'so changes to the API or functionality can happen at any moment.')
  154. def _save_to_state_dict(self: _LazyProtocol, destination, prefix, keep_vars):
  155. # This should be ideally implemented as a hook,
  156. # but we should override `detach` in the UninitializedParameter to return itself
  157. # which is not clean
  158. for name, param in self._parameters.items():
  159. if param is not None:
  160. if not (is_lazy(param) or keep_vars):
  161. param = param.detach()
  162. destination[prefix + name] = param
  163. for name, buf in self._buffers.items():
  164. if buf is not None and name not in self._non_persistent_buffers_set:
  165. if not (is_lazy(buf) or keep_vars):
  166. buf = buf.detach()
  167. destination[prefix + name] = buf
  168. def _lazy_load_hook(
  169. self: _LazyProtocol, state_dict, prefix, local_metadata, strict,
  170. missing_keys, unexpected_keys, error_msgs):
  171. """load_state_dict pre-hook function for lazy buffers and parameters.
  172. The purpose of this hook is to adjust the current state and/or
  173. ``state_dict`` being loaded so that a module instance serialized in
  174. both un/initialized state can be deserialized onto both un/initialized
  175. module instance.
  176. See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``
  177. for the details of the hook specification.
  178. """
  179. for name, param in itertools.chain(self._parameters.items(), self._buffers.items()):
  180. key = prefix + name
  181. if key in state_dict and param is not None:
  182. input_param = state_dict[key]
  183. if is_lazy(param):
  184. # The current parameter is not initialized but the one being loaded one is
  185. # create a new parameter based on the uninitialized one
  186. if not is_lazy(input_param):
  187. with torch.no_grad():
  188. param.materialize(input_param.shape)
  189. def initialize_parameters(self: _LazyProtocol, *args, **kwargs):
  190. r"""Initialize parameters according to the input batch properties.
  191. This adds an interface to isolate parameter initialization from the
  192. forward pass when doing parameter shape inference.
  193. """
  194. raise NotImplementedError('initialize_parameters is not implemented for {}'.format(self.__class__.__name__))
  195. def has_uninitialized_params(self: _LazyProtocol):
  196. r"""Check if a module has parameters that are not initialized
  197. """
  198. # This is to avoid the JIT to track this parameter and force
  199. # custom modules __setstate__ to add it
  200. params = self._parameters.values()
  201. buffers = self._buffers.values()
  202. for param in itertools.chain(params, buffers):
  203. if is_lazy(param):
  204. return True
  205. return False
  206. def _infer_parameters(self: _LazyProtocol, module, input):
  207. r"""Infers the size and initializes the parameters according to the
  208. provided input batch.
  209. Given a module that contains parameters that were declared inferrable
  210. using :class:`torch.nn.parameter.ParameterMode.Infer`, runs a forward pass
  211. in the complete module using the provided input to initialize all the parameters
  212. as needed.
  213. The module is set into evaluation mode before running the forward pass in order
  214. to avoid saving statistics or calculating gradients
  215. """
  216. module.initialize_parameters(*input)
  217. if module.has_uninitialized_params():
  218. raise RuntimeError('module {} has not been fully initialized'.format(self._get_name()))
  219. module._initialize_hook.remove()
  220. module._load_hook.remove()
  221. delattr(module, '_initialize_hook')
  222. delattr(module, '_load_hook')
  223. if module.cls_to_become is not None:
  224. module.__class__ = module.cls_to_become
  225. def _replicate_for_data_parallel(self: _LazyProtocol):
  226. raise RuntimeError('Modules with uninitialized parameters can\'t be used with `DataParallel`. '
  227. 'Run a dummy forward pass to correctly initialize the modules')