rnn.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. import numbers
  2. from typing import Optional, Tuple
  3. import warnings
  4. import torch
  5. from torch import Tensor
  6. """
  7. We will recreate all the RNN modules as we require the modules to be decomposed
  8. into its building blocks to be able to observe.
  9. """
  10. __all__ = [
  11. "LSTMCell",
  12. "LSTM"
  13. ]
  14. class LSTMCell(torch.nn.Module):
  15. r"""A quantizable long short-term memory (LSTM) cell.
  16. For the description and the argument types, please, refer to :class:`~torch.nn.LSTMCell`
  17. Examples::
  18. >>> import torch.ao.nn.quantizable as nnqa
  19. >>> rnn = nnqa.LSTMCell(10, 20)
  20. >>> input = torch.randn(6, 10)
  21. >>> hx = torch.randn(3, 20)
  22. >>> cx = torch.randn(3, 20)
  23. >>> output = []
  24. >>> for i in range(6):
  25. ... hx, cx = rnn(input[i], (hx, cx))
  26. ... output.append(hx)
  27. """
  28. _FLOAT_MODULE = torch.nn.LSTMCell
  29. def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
  30. device=None, dtype=None) -> None:
  31. factory_kwargs = {'device': device, 'dtype': dtype}
  32. super().__init__()
  33. self.input_size = input_dim
  34. self.hidden_size = hidden_dim
  35. self.bias = bias
  36. self.igates = torch.nn.Linear(input_dim, 4 * hidden_dim, bias=bias, **factory_kwargs)
  37. self.hgates = torch.nn.Linear(hidden_dim, 4 * hidden_dim, bias=bias, **factory_kwargs)
  38. self.gates = torch.ao.nn.quantized.FloatFunctional()
  39. self.input_gate = torch.nn.Sigmoid()
  40. self.forget_gate = torch.nn.Sigmoid()
  41. self.cell_gate = torch.nn.Tanh()
  42. self.output_gate = torch.nn.Sigmoid()
  43. self.fgate_cx = torch.ao.nn.quantized.FloatFunctional()
  44. self.igate_cgate = torch.ao.nn.quantized.FloatFunctional()
  45. self.fgate_cx_igate_cgate = torch.ao.nn.quantized.FloatFunctional()
  46. self.ogate_cy = torch.ao.nn.quantized.FloatFunctional()
  47. self.initial_hidden_state_qparams: Tuple[float, int] = (1.0, 0)
  48. self.initial_cell_state_qparams: Tuple[float, int] = (1.0, 0)
  49. self.hidden_state_dtype: torch.dtype = torch.quint8
  50. self.cell_state_dtype: torch.dtype = torch.quint8
  51. def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
  52. if hidden is None or hidden[0] is None or hidden[1] is None:
  53. hidden = self.initialize_hidden(x.shape[0], x.is_quantized)
  54. hx, cx = hidden
  55. igates = self.igates(x)
  56. hgates = self.hgates(hx)
  57. gates = self.gates.add(igates, hgates)
  58. input_gate, forget_gate, cell_gate, out_gate = gates.chunk(4, 1)
  59. input_gate = self.input_gate(input_gate)
  60. forget_gate = self.forget_gate(forget_gate)
  61. cell_gate = self.cell_gate(cell_gate)
  62. out_gate = self.output_gate(out_gate)
  63. fgate_cx = self.fgate_cx.mul(forget_gate, cx)
  64. igate_cgate = self.igate_cgate.mul(input_gate, cell_gate)
  65. fgate_cx_igate_cgate = self.fgate_cx_igate_cgate.add(fgate_cx, igate_cgate)
  66. cy = fgate_cx_igate_cgate
  67. tanh_cy = torch.tanh(cy)
  68. hy = self.ogate_cy.mul(out_gate, tanh_cy)
  69. return hy, cy
  70. def initialize_hidden(self, batch_size: int, is_quantized: bool = False) -> Tuple[Tensor, Tensor]:
  71. h, c = torch.zeros((batch_size, self.hidden_size)), torch.zeros((batch_size, self.hidden_size))
  72. if is_quantized:
  73. (h_scale, h_zp) = self.initial_hidden_state_qparams
  74. (c_scale, c_zp) = self.initial_cell_state_qparams
  75. h = torch.quantize_per_tensor(h, scale=h_scale, zero_point=h_zp, dtype=self.hidden_state_dtype)
  76. c = torch.quantize_per_tensor(c, scale=c_scale, zero_point=c_zp, dtype=self.cell_state_dtype)
  77. return h, c
  78. def _get_name(self):
  79. return 'QuantizableLSTMCell'
  80. @classmethod
  81. def from_params(cls, wi, wh, bi=None, bh=None):
  82. """Uses the weights and biases to create a new LSTM cell.
  83. Args:
  84. wi, wh: Weights for the input and hidden layers
  85. bi, bh: Biases for the input and hidden layers
  86. """
  87. assert (bi is None) == (bh is None) # Either both None or both have values
  88. input_size = wi.shape[1]
  89. hidden_size = wh.shape[1]
  90. cell = cls(input_dim=input_size, hidden_dim=hidden_size,
  91. bias=(bi is not None))
  92. cell.igates.weight = torch.nn.Parameter(wi)
  93. if bi is not None:
  94. cell.igates.bias = torch.nn.Parameter(bi)
  95. cell.hgates.weight = torch.nn.Parameter(wh)
  96. if bh is not None:
  97. cell.hgates.bias = torch.nn.Parameter(bh)
  98. return cell
  99. @classmethod
  100. def from_float(cls, other):
  101. assert type(other) == cls._FLOAT_MODULE
  102. assert hasattr(other, 'qconfig'), "The float module must have 'qconfig'"
  103. observed = cls.from_params(other.weight_ih, other.weight_hh,
  104. other.bias_ih, other.bias_hh)
  105. observed.qconfig = other.qconfig
  106. observed.igates.qconfig = other.qconfig
  107. observed.hgates.qconfig = other.qconfig
  108. return observed
  109. class _LSTMSingleLayer(torch.nn.Module):
  110. r"""A single one-directional LSTM layer.
  111. The difference between a layer and a cell is that the layer can process a
  112. sequence, while the cell only expects an instantaneous value.
  113. """
  114. def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
  115. device=None, dtype=None) -> None:
  116. factory_kwargs = {'device': device, 'dtype': dtype}
  117. super().__init__()
  118. self.cell = LSTMCell(input_dim, hidden_dim, bias=bias, **factory_kwargs)
  119. def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
  120. result = []
  121. for xx in x:
  122. hidden = self.cell(xx, hidden)
  123. result.append(hidden[0]) # type: ignore[index]
  124. result_tensor = torch.stack(result, 0)
  125. return result_tensor, hidden
  126. @classmethod
  127. def from_params(cls, *args, **kwargs):
  128. cell = LSTMCell.from_params(*args, **kwargs)
  129. layer = cls(cell.input_size, cell.hidden_size, cell.bias)
  130. layer.cell = cell
  131. return layer
  132. class _LSTMLayer(torch.nn.Module):
  133. r"""A single bi-directional LSTM layer."""
  134. def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
  135. batch_first: bool = False, bidirectional: bool = False,
  136. device=None, dtype=None) -> None:
  137. factory_kwargs = {'device': device, 'dtype': dtype}
  138. super().__init__()
  139. self.batch_first = batch_first
  140. self.bidirectional = bidirectional
  141. self.layer_fw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs)
  142. if self.bidirectional:
  143. self.layer_bw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs)
  144. def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
  145. if self.batch_first:
  146. x = x.transpose(0, 1)
  147. if hidden is None:
  148. hx_fw, cx_fw = (None, None)
  149. else:
  150. hx_fw, cx_fw = hidden
  151. hidden_bw: Optional[Tuple[Tensor, Tensor]] = None
  152. if self.bidirectional:
  153. if hx_fw is None:
  154. hx_bw = None
  155. else:
  156. hx_bw = hx_fw[1]
  157. hx_fw = hx_fw[0]
  158. if cx_fw is None:
  159. cx_bw = None
  160. else:
  161. cx_bw = cx_fw[1]
  162. cx_fw = cx_fw[0]
  163. if hx_bw is not None and cx_bw is not None:
  164. hidden_bw = hx_bw, cx_bw
  165. if hx_fw is None and cx_fw is None:
  166. hidden_fw = None
  167. else:
  168. hidden_fw = torch.jit._unwrap_optional(hx_fw), torch.jit._unwrap_optional(cx_fw)
  169. result_fw, hidden_fw = self.layer_fw(x, hidden_fw)
  170. if hasattr(self, 'layer_bw') and self.bidirectional:
  171. x_reversed = x.flip(0)
  172. result_bw, hidden_bw = self.layer_bw(x_reversed, hidden_bw)
  173. result_bw = result_bw.flip(0)
  174. result = torch.cat([result_fw, result_bw], result_fw.dim() - 1)
  175. if hidden_fw is None and hidden_bw is None:
  176. h = None
  177. c = None
  178. elif hidden_fw is None:
  179. (h, c) = torch.jit._unwrap_optional(hidden_bw)
  180. elif hidden_bw is None:
  181. (h, c) = torch.jit._unwrap_optional(hidden_fw)
  182. else:
  183. h = torch.stack([hidden_fw[0], hidden_bw[0]], 0) # type: ignore[list-item]
  184. c = torch.stack([hidden_fw[1], hidden_bw[1]], 0) # type: ignore[list-item]
  185. else:
  186. result = result_fw
  187. h, c = torch.jit._unwrap_optional(hidden_fw) # type: ignore[assignment]
  188. if self.batch_first:
  189. result.transpose_(0, 1)
  190. return result, (h, c)
  191. @classmethod
  192. def from_float(cls, other, layer_idx=0, qconfig=None, **kwargs):
  193. r"""
  194. There is no FP equivalent of this class. This function is here just to
  195. mimic the behavior of the `prepare` within the `torch.ao.quantization`
  196. flow.
  197. """
  198. assert hasattr(other, 'qconfig') or (qconfig is not None)
  199. input_size = kwargs.get('input_size', other.input_size)
  200. hidden_size = kwargs.get('hidden_size', other.hidden_size)
  201. bias = kwargs.get('bias', other.bias)
  202. batch_first = kwargs.get('batch_first', other.batch_first)
  203. bidirectional = kwargs.get('bidirectional', other.bidirectional)
  204. layer = cls(input_size, hidden_size, bias, batch_first, bidirectional)
  205. layer.qconfig = getattr(other, 'qconfig', qconfig)
  206. wi = getattr(other, f'weight_ih_l{layer_idx}')
  207. wh = getattr(other, f'weight_hh_l{layer_idx}')
  208. bi = getattr(other, f'bias_ih_l{layer_idx}', None)
  209. bh = getattr(other, f'bias_hh_l{layer_idx}', None)
  210. layer.layer_fw = _LSTMSingleLayer.from_params(wi, wh, bi, bh)
  211. if other.bidirectional:
  212. wi = getattr(other, f'weight_ih_l{layer_idx}_reverse')
  213. wh = getattr(other, f'weight_hh_l{layer_idx}_reverse')
  214. bi = getattr(other, f'bias_ih_l{layer_idx}_reverse', None)
  215. bh = getattr(other, f'bias_hh_l{layer_idx}_reverse', None)
  216. layer.layer_bw = _LSTMSingleLayer.from_params(wi, wh, bi, bh)
  217. return layer
  218. class LSTM(torch.nn.Module):
  219. r"""A quantizable long short-term memory (LSTM).
  220. For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
  221. Attributes:
  222. layers : instances of the `_LSTMLayer`
  223. .. note::
  224. To access the weights and biases, you need to access them per layer.
  225. See examples below.
  226. Examples::
  227. >>> import torch.ao.nn.quantizable as nnqa
  228. >>> rnn = nnqa.LSTM(10, 20, 2)
  229. >>> input = torch.randn(5, 3, 10)
  230. >>> h0 = torch.randn(2, 3, 20)
  231. >>> c0 = torch.randn(2, 3, 20)
  232. >>> output, (hn, cn) = rnn(input, (h0, c0))
  233. >>> # To get the weights:
  234. >>> # xdoctest: +SKIP
  235. >>> print(rnn.layers[0].weight_ih)
  236. tensor([[...]])
  237. >>> print(rnn.layers[0].weight_hh)
  238. AssertionError: There is no reverse path in the non-bidirectional layer
  239. """
  240. _FLOAT_MODULE = torch.nn.LSTM
  241. def __init__(self, input_size: int, hidden_size: int,
  242. num_layers: int = 1, bias: bool = True,
  243. batch_first: bool = False, dropout: float = 0.,
  244. bidirectional: bool = False,
  245. device=None, dtype=None) -> None:
  246. factory_kwargs = {'device': device, 'dtype': dtype}
  247. super().__init__()
  248. self.input_size = input_size
  249. self.hidden_size = hidden_size
  250. self.num_layers = num_layers
  251. self.bias = bias
  252. self.batch_first = batch_first
  253. self.dropout = float(dropout)
  254. self.bidirectional = bidirectional
  255. self.training = False # We don't want to train using this module
  256. num_directions = 2 if bidirectional else 1
  257. if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
  258. isinstance(dropout, bool):
  259. raise ValueError("dropout should be a number in range [0, 1] "
  260. "representing the probability of an element being "
  261. "zeroed")
  262. if dropout > 0:
  263. warnings.warn("dropout option for quantizable LSTM is ignored. "
  264. "If you are training, please, use nn.LSTM version "
  265. "followed by `prepare` step.")
  266. if num_layers == 1:
  267. warnings.warn("dropout option adds dropout after all but last "
  268. "recurrent layer, so non-zero dropout expects "
  269. "num_layers greater than 1, but got dropout={} "
  270. "and num_layers={}".format(dropout, num_layers))
  271. layers = [_LSTMLayer(self.input_size, self.hidden_size,
  272. self.bias, batch_first=False,
  273. bidirectional=self.bidirectional, **factory_kwargs)]
  274. for layer in range(1, num_layers):
  275. layers.append(_LSTMLayer(self.hidden_size, self.hidden_size,
  276. self.bias, batch_first=False,
  277. bidirectional=self.bidirectional,
  278. **factory_kwargs))
  279. self.layers = torch.nn.ModuleList(layers)
  280. def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
  281. if self.batch_first:
  282. x = x.transpose(0, 1)
  283. max_batch_size = x.size(1)
  284. num_directions = 2 if self.bidirectional else 1
  285. if hidden is None:
  286. zeros = torch.zeros(num_directions, max_batch_size,
  287. self.hidden_size, dtype=torch.float,
  288. device=x.device)
  289. zeros.squeeze_(0)
  290. if x.is_quantized:
  291. zeros = torch.quantize_per_tensor(zeros, scale=1.0,
  292. zero_point=0, dtype=x.dtype)
  293. hxcx = [(zeros, zeros) for _ in range(self.num_layers)]
  294. else:
  295. hidden_non_opt = torch.jit._unwrap_optional(hidden)
  296. if isinstance(hidden_non_opt[0], Tensor):
  297. hx = hidden_non_opt[0].reshape(self.num_layers, num_directions,
  298. max_batch_size,
  299. self.hidden_size).unbind(0)
  300. cx = hidden_non_opt[1].reshape(self.num_layers, num_directions,
  301. max_batch_size,
  302. self.hidden_size).unbind(0)
  303. hxcx = [(hx[idx].squeeze_(0), cx[idx].squeeze_(0)) for idx in range(self.num_layers)]
  304. else:
  305. hxcx = hidden_non_opt
  306. hx_list = []
  307. cx_list = []
  308. for idx, layer in enumerate(self.layers):
  309. x, (h, c) = layer(x, hxcx[idx])
  310. hx_list.append(torch.jit._unwrap_optional(h))
  311. cx_list.append(torch.jit._unwrap_optional(c))
  312. hx_tensor = torch.stack(hx_list)
  313. cx_tensor = torch.stack(cx_list)
  314. # We are creating another dimension for bidirectional case
  315. # need to collapse it
  316. hx_tensor = hx_tensor.reshape(-1, hx_tensor.shape[-2], hx_tensor.shape[-1])
  317. cx_tensor = cx_tensor.reshape(-1, cx_tensor.shape[-2], cx_tensor.shape[-1])
  318. if self.batch_first:
  319. x = x.transpose(0, 1)
  320. return x, (hx_tensor, cx_tensor)
  321. def _get_name(self):
  322. return 'QuantizableLSTM'
  323. @classmethod
  324. def from_float(cls, other, qconfig=None):
  325. assert isinstance(other, cls._FLOAT_MODULE)
  326. assert (hasattr(other, 'qconfig') or qconfig)
  327. observed = cls(other.input_size, other.hidden_size, other.num_layers,
  328. other.bias, other.batch_first, other.dropout,
  329. other.bidirectional)
  330. observed.qconfig = getattr(other, 'qconfig', qconfig)
  331. for idx in range(other.num_layers):
  332. observed.layers[idx] = _LSTMLayer.from_float(other, idx, qconfig,
  333. batch_first=False)
  334. observed.eval()
  335. observed = torch.ao.quantization.prepare(observed, inplace=True)
  336. return observed
  337. @classmethod
  338. def from_observed(cls, other):
  339. # The whole flow is float -> observed -> quantized
  340. # This class does float -> observed only
  341. raise NotImplementedError("It looks like you are trying to convert a "
  342. "non-quantizable LSTM module. Please, see "
  343. "the examples on quantizable LSTMs.")