_utils.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. import copyreg
  2. import sys
  3. import traceback
  4. import warnings
  5. from collections import defaultdict
  6. from typing import Any, DefaultDict, List, Optional
  7. import torch
  8. def _type(self, dtype=None, non_blocking=False, **kwargs):
  9. """Returns the type if `dtype` is not provided, else casts this object to
  10. the specified type.
  11. If this is already of the correct type, no copy is performed and the
  12. original object is returned.
  13. Args:
  14. dtype (type or string): The desired type
  15. non_blocking (bool): If ``True``, and the source is in pinned memory
  16. and destination is on the GPU or vice versa, the copy is performed
  17. asynchronously with respect to the host. Otherwise, the argument
  18. has no effect.
  19. **kwargs: For compatibility, may contain the key ``async`` in place of
  20. the ``non_blocking`` argument. The ``async`` arg is deprecated.
  21. """
  22. non_blocking = _get_async_or_non_blocking("type", non_blocking, kwargs)
  23. if dtype is None:
  24. return self.__module__ + "." + self.__class__.__name__
  25. if isinstance(dtype, str):
  26. dtype = _import_dotted_name(dtype)
  27. if dtype == type(self):
  28. return self
  29. if self.is_sparse:
  30. if not dtype.is_sparse:
  31. raise RuntimeError("Cannot cast sparse tensor to dense tensor")
  32. new_module_name = dtype.__module__.replace(".sparse", "")
  33. new_values_type_name = new_module_name + "." + dtype.__name__
  34. new_values = torch.Tensor._values(self).type(new_values_type_name, non_blocking)
  35. new_indices_type_name = new_module_name + ".LongTensor"
  36. new_indices = torch.Tensor._indices(self).type(
  37. new_indices_type_name, non_blocking
  38. )
  39. return dtype(new_indices, new_values, self.size())
  40. if dtype.is_sparse:
  41. raise RuntimeError("Cannot cast dense tensor to sparse tensor")
  42. return dtype(self.size()).copy_(self, non_blocking)
  43. def _cuda(self, device=None, non_blocking=False, **kwargs):
  44. """Returns a copy of this object in CUDA memory.
  45. If this object is already in CUDA memory and on the correct device, then
  46. no copy is performed and the original object is returned.
  47. Args:
  48. device (int): The destination GPU id. Defaults to the current device.
  49. non_blocking (bool): If ``True`` and the source is in pinned memory,
  50. the copy will be asynchronous with respect to the host. Otherwise,
  51. the argument has no effect.
  52. **kwargs: For compatibility, may contain the key ``async`` in place of
  53. the ``non_blocking`` argument.
  54. """
  55. non_blocking = _get_async_or_non_blocking("cuda", non_blocking, kwargs)
  56. if self.is_cuda:
  57. if device is None:
  58. device = torch.cuda.current_device()
  59. if self.get_device() == device:
  60. return self
  61. else:
  62. if device is None:
  63. device = -1
  64. with torch.cuda.device(device):
  65. if self.is_sparse:
  66. new_type = getattr(torch.cuda.sparse, self.__class__.__name__)
  67. indices = torch.Tensor._indices(self).cuda(device, non_blocking)
  68. values = torch.Tensor._values(self).cuda(device, non_blocking)
  69. return new_type(indices, values, self.size())
  70. else:
  71. untyped_storage = torch.UntypedStorage(
  72. self.size(), device=torch.device("cuda")
  73. )
  74. untyped_storage.copy_(self, non_blocking)
  75. return untyped_storage
  76. def _get_async_or_non_blocking(function_name, non_blocking, kwargs):
  77. """Return the non-blocking flag given the function name and kwargs.
  78. Args:
  79. function_name (str): the name of the function being used.
  80. non_blocking (bool): the default value.
  81. **kwargs (dict): the kwargs passed to the function.
  82. """
  83. if not kwargs:
  84. return non_blocking
  85. if len(kwargs) != 1 or "async" not in kwargs:
  86. message = "{}() got an unexpected keyword argument '{}'"
  87. argument = list(kwargs.keys()).pop()
  88. raise TypeError(message.format(function_name, argument))
  89. warnings.warn("'async' is deprecated; use 'non_blocking'")
  90. return kwargs["async"]
  91. # Note [Don't serialize hooks]
  92. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  93. # Since time immemorial, we have serialized the backward hooks associated with
  94. # variables. This kind of half-worked--Python can pickle global functions
  95. # (but not closures!)--but there were problems.
  96. #
  97. # - It's fragile. If you serialize a backward hook into a saved
  98. # model, and then you rename the function associated with the hook,
  99. # now your saved model is broken and you can't load it anymore.
  100. #
  101. # - It's not actually used. The standard recommendation is to
  102. # serialize the *state_dict* of a model, not the model itself
  103. # (since this is more stable to code changes affecting the model
  104. # serialization), and the state dict saves "data" only, thus
  105. # stripping the the backward hooks. In some cases, hooks are
  106. # essential to the well-functioning of a model (e.g., DDP),
  107. # but DDP already manages readding the hooks!
  108. #
  109. # - We didn't serialize them in many cases. Prior to #10220, we
  110. # were dropping backward hooks in ForkingPickler. We "fixed" this
  111. # to be convenient with other serialization sites, but lack of
  112. # serializing backward hooks wasn't actually the root cause of
  113. # the bug.
  114. #
  115. # With these cases in mind, we have decided that a better strategy
  116. # is to just NOT serialize hooks at all.
  117. #
  118. # Since this is a BC-breaking change, we should warn when we previously
  119. # serialized a hook, but no longer do so. This will be done by adding a special
  120. # sentinel property to hooks will be used to suppress this warning. If a hook
  121. # has the property _torch_serialize_ignore, we will not emit a warning if we
  122. # attempt to serialize a Tensor with this hook attached to it.
  123. #
  124. # By the way, when _backward_hooks is skipped, we must give an EMPTY
  125. # OrderedDict(), if you pass a None you'll run afoul #12219.
  126. # TODO: Once we decide to break serialization FC, `storage` no longer needs to
  127. # be a TypedStorage
  128. def _rebuild_tensor(storage, storage_offset, size, stride):
  129. # first construct a tensor with the correct dtype/device
  130. t = torch.tensor([], dtype=storage.dtype, device=storage._untyped_storage.device)
  131. return t.set_(storage._untyped_storage, storage_offset, size, stride)
  132. def get_tensor_metadata(tensor):
  133. # Tensor's Metadata for serializing.
  134. # Currently, this only returns a dict[string, bool] specifing whether
  135. # `conj` or `neg` bit is set.
  136. assert isinstance(tensor, torch.Tensor)
  137. return torch._C._get_tensor_metadata(tensor) # type: ignore[attr-defined]
  138. def set_tensor_metadata(tensor, metadata):
  139. # See `get_tensor_metadata` above
  140. assert isinstance(metadata, dict)
  141. assert isinstance(tensor, torch.Tensor)
  142. torch._C._set_tensor_metadata(tensor, metadata) # type: ignore[attr-defined]
  143. def _rebuild_tensor_v2(
  144. storage, storage_offset, size, stride, requires_grad, backward_hooks, metadata=None
  145. ):
  146. tensor = _rebuild_tensor(storage, storage_offset, size, stride)
  147. tensor.requires_grad = requires_grad
  148. if metadata:
  149. set_tensor_metadata(tensor, metadata)
  150. # NB: This line exists only for backwards compatibility; the
  151. # general expectation is that backward_hooks is an empty
  152. # OrderedDict. See Note [Don't serialize hooks]
  153. tensor._backward_hooks = backward_hooks
  154. return tensor
  155. _sparse_tensors_to_validate: List["torch.Tensor"] = []
  156. # In _legacy_load() in serialization.py we unpickle storages after the sparse
  157. # tensors have been already unpickled. Those storages contain data necessary for
  158. # validating sparse tensors: indices and values. That's why sparse tensors are
  159. # first unpickled without any validation, and then this function is called just
  160. # before _legacy_load() returns, so that all the sparse tensors can be validated
  161. # in bulk.
  162. #
  163. # The same procedure must be followed by _load() in serialization.py because due
  164. # to Pickler semantics, we have to use the same (non-validating) function for
  165. # unpickling sparse tensors, regardless of the caller.
  166. def _validate_loaded_sparse_tensors():
  167. try:
  168. for t in _sparse_tensors_to_validate:
  169. if t.layout is torch.sparse_coo:
  170. torch._validate_sparse_coo_tensor_args(
  171. t._indices(), t._values(), t.size()
  172. )
  173. elif t.layout in {
  174. torch.sparse_csr,
  175. torch.sparse_csc,
  176. torch.sparse_bsr,
  177. torch.sparse_bsc,
  178. }:
  179. # TODO: Validation currently involves an expensive traversal
  180. # on CPU, which may include a device transfer.
  181. if t.layout in {torch.sparse_csr, torch.sparse_bsr}:
  182. compressed_indices, plain_indices = (
  183. t.crow_indices(),
  184. t.col_indices(),
  185. )
  186. else:
  187. compressed_indices, plain_indices = (
  188. t.ccol_indices(),
  189. t.row_indices(),
  190. )
  191. torch._validate_sparse_compressed_tensor_args(
  192. compressed_indices, plain_indices, t.values(), t.size(), t.layout
  193. )
  194. else:
  195. raise NotImplementedError(
  196. "_validate_loaded_sparse_tensors for layout `%s`" % (t.layout)
  197. )
  198. finally:
  199. _sparse_tensors_to_validate.clear()
  200. def _rebuild_sparse_tensor(layout, data):
  201. """
  202. Rebuilds a sparse tensor from its sparse storage representation.
  203. Args:
  204. layout (str): The sparse storage layout of the tensor.
  205. data (tuple): The tensor's sparse storage representation.
  206. """
  207. if layout == torch.sparse_coo:
  208. indices, values, size = data
  209. result = torch.sparse_coo_tensor(indices, values, size, check_invariants=False)
  210. _sparse_tensors_to_validate.append(result)
  211. return result
  212. elif layout in {
  213. torch.sparse_csr,
  214. torch.sparse_csc,
  215. torch.sparse_bsr,
  216. torch.sparse_bsc,
  217. }:
  218. compressed_indices, plain_indices, values, size = data
  219. result = torch.sparse_compressed_tensor(
  220. compressed_indices,
  221. plain_indices,
  222. values,
  223. size,
  224. layout=layout,
  225. check_invariants=False,
  226. )
  227. _sparse_tensors_to_validate.append(result)
  228. return result
  229. raise NotImplementedError("rebuilding sparse tensor for layout %s" % (layout))
  230. def _rebuild_device_tensor_from_numpy(data, dtype, device, requires_grad):
  231. tensor = torch.from_numpy(data).to(dtype=dtype, device=device)
  232. tensor.requires_grad = requires_grad
  233. return tensor
  234. # Should not be used, only here to be able to load Tensors serialized with older versions of pytorch
  235. _rebuild_xla_tensor = _rebuild_device_tensor_from_numpy
  236. def _rebuild_meta_tensor_no_storage(dtype, size, stride, requires_grad):
  237. return torch.empty_strided(
  238. size, stride, dtype=dtype, device="meta", requires_grad=requires_grad
  239. )
  240. def _rebuild_wrapper_subclass(
  241. cls, dtype, size, stride, storage_offset, layout, device, requires_grad
  242. ):
  243. return torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
  244. cls,
  245. size,
  246. strides=stride,
  247. storage_offset=storage_offset,
  248. layout=layout,
  249. device=device,
  250. requires_grad=requires_grad,
  251. )
  252. # TODO: Once we decide to break serialization FC, `storage` no longer needs to
  253. # be a TypedStorage
  254. def _rebuild_qtensor(
  255. storage,
  256. storage_offset,
  257. size,
  258. stride,
  259. quantizer_params,
  260. requires_grad,
  261. backward_hooks,
  262. ):
  263. qscheme = quantizer_params[0]
  264. if qscheme == torch.per_tensor_affine:
  265. _, scale, zero_point = quantizer_params
  266. tensor = torch._empty_affine_quantized(
  267. size,
  268. scale=scale,
  269. zero_point=zero_point,
  270. dtype=storage.dtype,
  271. device=storage.device,
  272. )
  273. elif qscheme in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
  274. _, scales, zero_points, axis = quantizer_params
  275. if type(scales) is list and type(zero_points) is list:
  276. if qscheme == torch.per_channel_affine:
  277. scales = torch.tensor(scales, dtype=torch.double, device=storage.device)
  278. zero_points = torch.tensor(
  279. zero_points, dtype=torch.long, device=storage.device
  280. )
  281. else:
  282. scales = torch.tensor(scales, dtype=torch.float, device=storage.device)
  283. zero_points = torch.tensor(
  284. zero_points, dtype=torch.float, device=storage.device
  285. )
  286. tensor = torch._empty_per_channel_affine_quantized(
  287. size,
  288. scales=scales,
  289. zero_points=zero_points,
  290. axis=axis,
  291. dtype=storage.dtype,
  292. device=storage.device,
  293. )
  294. else:
  295. raise RuntimeError(
  296. "Can't deserialize quantized tensor with qscheme {}".format(qscheme)
  297. )
  298. tensor.set_(storage, storage_offset, size, stride)
  299. tensor.requires_grad = requires_grad
  300. # NB: This line exists only for backwards compatibility; the
  301. # general expectation is that backward_hooks is an empty
  302. # OrderedDict. See Note [Don't serialize hooks]
  303. tensor._backward_hooks = backward_hooks
  304. return tensor
  305. def _rebuild_parameter(data, requires_grad, backward_hooks):
  306. param = torch.nn.Parameter(data, requires_grad)
  307. # NB: This line exists only for backwards compatibility; the
  308. # general expectation is that backward_hooks is an empty
  309. # OrderedDict. See Note [Don't serialize hooks]
  310. param._backward_hooks = backward_hooks
  311. return param
  312. def _rebuild_parameter_with_state(data, requires_grad, backward_hooks, state):
  313. param = torch.nn.Parameter(data, requires_grad)
  314. # NB: This line exists only for backwards compatibility; the
  315. # general expectation is that backward_hooks is an empty
  316. # OrderedDict. See Note [Don't serialize hooks]
  317. param._backward_hooks = backward_hooks
  318. # Restore state on Parameter like python attr.
  319. param = _set_obj_state(param, state)
  320. return param
  321. def _get_obj_state(obj):
  322. # Get the state of the python subclass
  323. # This loosely mimicks the function on the object class but since Tensor do not inherit
  324. # from it, we cannot call that function directly
  325. # https://github.com/python/cpython/blob/c83919bd635f4433f1c6ae8504996a9fe3c215e5/Objects/typeobject.c#L4891
  326. # Note that starting with Python 3.11, this `__getstate__` is always defined and thus
  327. # the else branch will never be taken.
  328. getstate_fn = getattr(obj, "__getstate__", None)
  329. if getstate_fn:
  330. state = getstate_fn()
  331. else:
  332. slots_to_save = copyreg._slotnames(obj.__class__) # type: ignore[attr-defined]
  333. if slots_to_save:
  334. state = (
  335. obj.__dict__,
  336. {
  337. name: getattr(obj, name)
  338. for name in slots_to_save
  339. if hasattr(obj, name)
  340. },
  341. )
  342. else:
  343. state = obj.__dict__
  344. return state
  345. def _set_obj_state(obj, state):
  346. if isinstance(state, tuple):
  347. if not len(state) == 2:
  348. raise RuntimeError(f"Invalid serialized state: {state}")
  349. dict_state = state[0]
  350. slots_state = state[1]
  351. else:
  352. dict_state = state
  353. slots_state = None
  354. # Starting with Python 3.11, the __dict__ attribute is lazily created
  355. # and is serialized as None when not needed.
  356. if dict_state:
  357. for k, v in dict_state.items():
  358. setattr(obj, k, v)
  359. if slots_state:
  360. for k, v in slots_state.items():
  361. setattr(obj, k, v)
  362. return obj
  363. def _import_dotted_name(name):
  364. components = name.split(".")
  365. obj = __import__(components[0])
  366. for component in components[1:]:
  367. obj = getattr(obj, component)
  368. return obj
  369. # Taken from python 3.5 docs
  370. def _accumulate(iterable, fn=lambda x, y: x + y):
  371. "Return running totals"
  372. # _accumulate([1,2,3,4,5]) --> 1 3 6 10 15
  373. # _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
  374. it = iter(iterable)
  375. try:
  376. total = next(it)
  377. except StopIteration:
  378. return
  379. yield total
  380. for element in it:
  381. total = fn(total, element)
  382. yield total
  383. def _flatten_dense_tensors(tensors):
  384. """Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
  385. same dense type.
  386. Since inputs are dense, the resulting tensor will be a concatenated 1D
  387. buffer. Element-wise operation on this buffer will be equivalent to
  388. operating individually.
  389. Args:
  390. tensors (Iterable[Tensor]): dense tensors to flatten.
  391. Returns:
  392. A contiguous 1D buffer containing input tensors.
  393. """
  394. return torch._C._nn.flatten_dense_tensors(tensors)
  395. def _flatten_sparse_tensors(tensors):
  396. """Flatten sparse tensors into two contiguous 1D buffers, one of indices and
  397. one of values. Assume tensors are of same sparse type.
  398. Args:
  399. tensors (Iterable[Tensor]): sparse tensors to flatten.
  400. Returns:
  401. A tuple of two contiguous 1D buffers, one containing input tensors'
  402. indices and the other containing the values.
  403. """
  404. flat_indices = torch._C._nn.flatten_dense_tensors(
  405. [torch.Tensor._indices(t) for t in tensors]
  406. )
  407. flat_values = torch._C._nn.flatten_dense_tensors(
  408. [torch.Tensor._values(t) for t in tensors]
  409. )
  410. return flat_indices, flat_values
  411. def _unflatten_dense_tensors(flat, tensors):
  412. """View a flat buffer using the sizes of tensors. Assume that tensors are of
  413. same dense type, and that flat is given by _flatten_dense_tensors.
  414. Args:
  415. flat (Tensor): flattened dense tensors to unflatten.
  416. tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
  417. unflatten flat.
  418. Returns:
  419. Unflattened dense tensors with sizes same as tensors and values from
  420. flat.
  421. """
  422. return torch._C._nn.unflatten_dense_tensors(flat, tensors)
  423. def _unflatten_sparse_tensors(flat, tensors):
  424. """View flat buffer (containing indices and values) using the sizes of
  425. tensors. Assume that tensors are of same sparse type, and that flat is given
  426. by _flatten_sparse_tensors.
  427. Args:
  428. flat (tuple(Tensor, Tensor)): flattened indices and values of sparse
  429. tensors to unflatten.
  430. tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to
  431. unflatten flat.
  432. Returns:
  433. Unflattened sparse tensors with sizes same as tensors and values from
  434. flat.
  435. """
  436. flat_indices, flat_values = flat
  437. indices = torch._C._nn.unflatten_dense_tensors(
  438. flat_indices, [torch.Tensor._indices(t) for t in tensors]
  439. )
  440. values = torch._C._nn.unflatten_dense_tensors(
  441. flat_values, [torch.Tensor._values(t) for t in tensors]
  442. )
  443. outputs = []
  444. for t, i, v in zip(tensors, indices, values):
  445. outputs.append(t.new(i, v, t.size()))
  446. return tuple(outputs)
  447. def _reorder_tensors_as(tensors, ordered_tensors):
  448. """Assume that tensors are of same order as ordered_tensors within their
  449. types, e.g., from _take_tensors. Reorder them to be of same order as
  450. ordered_tensors.
  451. Args:
  452. tensors (Iterable[Tensor]): tensors to be reordered. They should be of
  453. the same order as ordered_tensors within their own types.
  454. ordered_tensors (Iterable[Tensor]): tensors whose order will be the
  455. reference.
  456. Returns:
  457. Ordered tuple of tensors with contents from tensors and order of
  458. ordered_tensors.
  459. """
  460. type_dict = defaultdict(list)
  461. for tensor in tensors:
  462. type_dict[tensor.type()].append(tensor)
  463. type_dict_ = {t: iter(coll) for t, coll in type_dict.items()}
  464. return tuple(next(type_dict_[tensor.type()]) for tensor in ordered_tensors)
  465. def _take_tensors(tensors, size_limit):
  466. """Group tensors into chunks. This generator yields a chunk at each time,
  467. each containing tensors of same type up to certain byte limit in total size.
  468. Args:
  469. tensors (Sequence): A sequence of tensors to be separated into chunks.
  470. size_limit (int): The limit of each chunk in bytes.
  471. Yields:
  472. Blocks of tensors of same type and within size_limit. The yielded
  473. tensors are only ordered as the original sequence within its types.
  474. """
  475. buf_dict: DefaultDict[str, List] = defaultdict(lambda: [[], 0])
  476. for tensor in tensors:
  477. t = tensor.type()
  478. if tensor.is_sparse:
  479. indices = torch.Tensor._indices(tensor)
  480. values = torch.Tensor._values(tensor)
  481. size = (
  482. indices.numel() * indices.element_size()
  483. + values.numel() * values.element_size()
  484. )
  485. else:
  486. size = tensor.numel() * tensor.element_size()
  487. buf_and_size = buf_dict[t]
  488. if buf_and_size[1] + size > size_limit and buf_and_size[1] > 0:
  489. yield buf_and_size[0]
  490. buf_and_size = buf_dict[t] = [[], 0]
  491. buf_and_size[0].append(tensor)
  492. buf_and_size[1] += size
  493. for buf, _ in buf_dict.values():
  494. if len(buf) > 0:
  495. yield buf
  496. # annotation decorator to get annotations in a way that is compatible
  497. # with both Python 2 and 3
  498. def annotate(ret, **kwargs):
  499. def dec(fun):
  500. fun.__annotations__ = dict(kwargs)
  501. fun.__annotations__["return"] = ret
  502. return fun
  503. return dec
  504. # NOTE [ Python Traceback Reference Cycle Problem ]
  505. #
  506. # When using sys.exc_info(), it is important to **not** store the exc_info[2],
  507. # which is the traceback, because otherwise you will run into the traceback
  508. # reference cycle problem, i.e., the traceback holding reference to the frame,
  509. # and the frame (which holds reference to all the object in its temporary scope)
  510. # holding reference the traceback.
  511. class KeyErrorMessage(str):
  512. r"""str subclass that returns itself in repr"""
  513. def __repr__(self):
  514. return self
  515. class ExceptionWrapper:
  516. r"""Wraps an exception plus traceback to communicate across threads"""
  517. def __init__(self, exc_info=None, where="in background"):
  518. # It is important that we don't store exc_info, see
  519. # NOTE [ Python Traceback Reference Cycle Problem ]
  520. if exc_info is None:
  521. exc_info = sys.exc_info()
  522. self.exc_type = exc_info[0]
  523. self.exc_msg = "".join(traceback.format_exception(*exc_info))
  524. self.where = where
  525. def reraise(self):
  526. r"""Reraises the wrapped exception in the current thread"""
  527. # Format a message such as: "Caught ValueError in DataLoader worker
  528. # process 2. Original Traceback:", followed by the traceback.
  529. msg = "Caught {} {}.\nOriginal {}".format(
  530. self.exc_type.__name__, self.where, self.exc_msg
  531. )
  532. if self.exc_type == KeyError:
  533. # KeyError calls repr() on its argument (usually a dict key). This
  534. # makes stack traces unreadable. It will not be changed in Python
  535. # (https://bugs.python.org/issue2651), so we work around it.
  536. msg = KeyErrorMessage(msg)
  537. elif getattr(self.exc_type, "message", None):
  538. # Some exceptions have first argument as non-str but explicitly
  539. # have message field
  540. raise self.exc_type(message=msg)
  541. try:
  542. exception = self.exc_type(msg)
  543. except TypeError:
  544. # If the exception takes multiple arguments, don't try to
  545. # instantiate since we don't know how to
  546. raise RuntimeError(msg) from None
  547. raise exception
  548. def _get_available_device_type():
  549. if torch.cuda.is_available():
  550. return "cuda"
  551. if hasattr(torch, "xpu") and torch.xpu.is_available(): # type: ignore[attr-defined]
  552. return "xpu"
  553. # add more available device types here
  554. return None
  555. def _get_device_attr(get_member):
  556. device_type = _get_available_device_type()
  557. if device_type and device_type.lower() == "cuda":
  558. return get_member(torch.cuda)
  559. if device_type and device_type.lower() == "xpu":
  560. return get_member(torch.xpu) # type: ignore[attr-defined]
  561. # add more available device types here
  562. return None
  563. def _get_current_device_index():
  564. # current device index
  565. return _get_device_attr(lambda m: m.current_device())
  566. def _get_all_device_indices():
  567. # all device index
  568. return _get_device_attr(lambda m: list(range(m.device_count())))
  569. def _get_devices_properties(device_ids):
  570. # all device properties
  571. return [_get_device_attr(lambda m: m.get_device_properties(i)) for i in device_ids]
  572. def get_current_device_index() -> int:
  573. r"""Checks if there are CUDA devices available and
  574. returns the device index of the current default CUDA device.
  575. Returns -1 in case there are no CUDA devices available.
  576. Arguments: ``None``
  577. """
  578. if torch.cuda.device_count() > 0:
  579. return torch.cuda.current_device()
  580. return -1
  581. def _get_device_index(
  582. device: Any, optional: bool = False, allow_cpu: bool = False
  583. ) -> int:
  584. r"""Gets the device index from :attr:`device`, which can be a torch.device
  585. object, a Python integer, or ``None``.
  586. If :attr:`device` is a torch.device object, returns the device index if it
  587. has index. Note that for a device without a specified index,
  588. i.e., ``torch.device('xxx')``, this will return the current default
  589. device of that type if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
  590. CPU devices will be accepted and ``-1`` will be returned in this case.
  591. If :attr:`device` is a Python integer, it is returned as is.
  592. If :attr:`device` is ``None``, this will return the current default
  593. device of the supported runtime platform if :attr:`optional` is ``True``.
  594. i.e., the current default CUDA device will be returned if CUDA runtime is supported.
  595. """
  596. if isinstance(device, str):
  597. device = torch.device(device)
  598. device_idx: Optional[int] = None
  599. if isinstance(device, torch.device):
  600. if not allow_cpu and device.type == "cpu":
  601. raise ValueError("Expected a non cpu device, but got: {}".format(device))
  602. device_idx = -1 if device.type == "cpu" else device.index
  603. if isinstance(device, int):
  604. device_idx = device
  605. if device_idx is None:
  606. if optional:
  607. # The eager API _get_current_device_index uses `lambda` functions which are
  608. # not supported in JIT and hence not scriptable. The JIT equivalent API to get
  609. # the current device index is `get_current_device_index()` which can
  610. # be scripted. We use is_scripting to check the mode we are in and call the
  611. # appropriate API.
  612. if torch.jit.is_scripting():
  613. device_idx = get_current_device_index()
  614. else:
  615. device_idx = _get_current_device_index()
  616. else:
  617. raise ValueError(
  618. "Expected a torch.device with a specified index "
  619. "or an integer, but got:{}".format(device)
  620. )
  621. return device_idx
  622. def _handle_complex(tensor):
  623. """
  624. Returns a real view of a tensor if complex dtype else just the tensor
  625. need to check if a UninitializedParameter because otherwise checking is_complex is an error for a LazyModule
  626. """
  627. return (
  628. torch.view_as_real(tensor)
  629. if not isinstance(tensor, torch.nn.UninitializedParameter)
  630. and tensor.is_complex()
  631. else tensor
  632. )
  633. def _element_size(dtype):
  634. """
  635. Returns the element size for a dtype, in bytes
  636. """
  637. if not isinstance(dtype, torch.dtype):
  638. raise RuntimeError(f"expected torch.dtype, but got {type(dtype)}")
  639. if dtype.is_complex:
  640. return torch.finfo(dtype).bits >> 2
  641. elif dtype.is_floating_point:
  642. return torch.finfo(dtype).bits >> 3
  643. elif dtype == torch.bool:
  644. # NOTE: torch.bool is not supported in torch.iinfo()
  645. return 1
  646. else:
  647. return torch.iinfo(dtype).bits >> 3
  648. class _ClassPropertyDescriptor:
  649. def __init__(self, fget, fset=None):
  650. self.fget = fget
  651. def __get__(self, instance, owner=None):
  652. if owner is None:
  653. owner = type(instance)
  654. return self.fget.__get__(instance, owner)()
  655. def classproperty(func):
  656. if not isinstance(func, (classmethod, staticmethod)):
  657. func = classmethod(func)
  658. return _ClassPropertyDescriptor(func)
  659. # Whether we are compiling with torch.compile or not
  660. def is_compiling():
  661. return False