_ops.py 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751
  1. # -*- coding: utf-8 -*-
  2. import warnings
  3. # A workaround to support both TorchScript and MyPy:
  4. from typing import Any, List, Optional, Tuple, TYPE_CHECKING, Union
  5. import torch
  6. from torch import Tensor
  7. from torch.masked import as_masked_tensor, is_masked_tensor, MaskedTensor
  8. from . import _docs
  9. if TYPE_CHECKING:
  10. from torch.types import _dtype as DType
  11. DimOrDims = Optional[Union[int, Tuple[int], List[int]]]
  12. else:
  13. # The JIT doesn't understand Union, nor torch.dtype here
  14. DType = int
  15. DimOrDims = Optional[Tuple[int]]
  16. __all__ = []
  17. # All masked reduction/normalization operations have the same
  18. # signatures. Here we introduce docstring templates that are applied
  19. # to docstrings of reduction/normalization functions via
  20. # _apply_docstring_templates decorator.
  21. def _apply_docstring_templates(func):
  22. """Decorator that applies docstring templates to function docstring
  23. and returns the function instance.
  24. """
  25. doc_string = getattr(_docs, f"{func.__name__}_docstring", None)
  26. if doc_string is None:
  27. warnings.warn(
  28. f"No documentation string available for {func.__name__}."
  29. " PyTorch team should run `python tools/update_masked_docs.py`"
  30. " to generate the missing docstrings."
  31. )
  32. else:
  33. func.__doc__ = doc_string
  34. # Expose function as public symbol
  35. __all__.append(func.__name__)
  36. return func
  37. def _generate_docstring(func):
  38. """A utility function called from tools/update_masked_docs.py
  39. script to update the module torch.masked._docs.py
  40. """
  41. docstring_templates = dict(
  42. reduction_signature="""\
  43. {function_name}(input, {operation_args}, *, {operation_kwargs}) -> Tensor""",
  44. reduction_descr="""\
  45. Returns {operation name} of all the elements in the :attr:`input`
  46. tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
  47. elements are masked out according to the boolean tensor
  48. :attr:`mask`.""",
  49. reduction_args="""\
  50. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  51. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
  52. size 1. Otherwise, :attr:`dim` is squeezed (see
  53. :func:`torch.squeeze`), resulting in the output tensor having 1 (or
  54. ``len(dim)``) fewer dimension(s).
  55. The boolean tensor :attr:`mask` defines the "validity" of
  56. :attr:`input` tensor elements: if :attr:`mask` element is True
  57. then the corresponding element in :attr:`input` tensor will be
  58. included in {operation name} computation, otherwise the element is
  59. ignored.
  60. When all elements of :attr:`input` along the given dimension
  61. :attr:`dim` are ignored (fully masked-out), the corresponding element
  62. of the output tensor will have undefined value: it may or may not
  63. correspond to the identity value of {operation name} operation; the
  64. choice may correspond to the value that leads to the most efficient
  65. storage of :attr:`output` tensor.
  66. The mask of the output tensor can be computed as
  67. ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
  68. dtype=torch.bool)``.
  69. The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
  70. don't need to match, but they must be :ref:`broadcastable
  71. <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
  72. tensor must not be greater than of the :attr:`input` tensor.
  73. Args:
  74. input (Tensor): the input tensor
  75. {args_declarations}
  76. Keyword args:
  77. {kwargs_declarations}""",
  78. reduction_example="""\
  79. Example::
  80. >>> input = {example_input}
  81. >>> input
  82. {indent_example_input}
  83. >>> mask = {example_mask}
  84. >>> mask
  85. {indent_example_mask}
  86. >>> {full_function_name}(input, {example_args}, mask=mask)
  87. {indent_example_output}
  88. """,
  89. reduction_identity="""\
  90. The identity value of {operation name} operation, which is used to start the reduction, is ``{identity_int32}``.""",
  91. reduction_identity_dtype="""\
  92. The identity value of {operation name} operation, which is used to start the
  93. reduction, depends on input dtype. For instance, for float32, uint8,
  94. and int32 dtypes, the identity values are ``{identity_float32}``, ``{identity_uint8}``, and ``{identity_int32}``, respectively.""",
  95. normalization_signature="""\
  96. {function_name}(input, {operation_args}, *, {operation_kwargs}) -> Tensor""",
  97. normalization_descr="""\
  98. Returns {operation name} of all the slices in the :attr:`input` tensor
  99. along :attr:`dim` while the :attr:`input` elements are masked out
  100. according to the boolean tensor :attr:`mask`.
  101. {definition}""",
  102. normalization_args="""\
  103. The boolean tensor :attr:`mask` defines the "validity" of
  104. :attr:`input` tensor elements: if :attr:`mask` element is True then
  105. the corresponding element in :attr:`input` tensor will be included in
  106. {operation name} computation, otherwise the element is ignored.
  107. The values of masked-out elements of the output tensor have undefined
  108. value: it may or may not be set to zero or nan; the choice may correspond to
  109. the value that leads to the most efficient storage of :attr:`output`
  110. tensor.
  111. The mask of the {operation name} output tensor can be computed as
  112. ``torch.broadcast_to(mask, input.shape)``.
  113. The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
  114. don't need to match, but they must be :ref:`broadcastable
  115. <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
  116. tensor must not be greater than of the :attr:`input` tensor.
  117. Args:
  118. input (Tensor): the input tensor
  119. {args_declarations}
  120. Keyword args:
  121. {kwargs_declarations}""",
  122. normalization_example="""\
  123. Example::
  124. >>> input = {example_input}
  125. >>> input
  126. {indent_example_input}
  127. >>> mask = {example_mask}
  128. >>> mask
  129. {indent_example_mask}
  130. >>> {full_function_name}(input, {example_args}, mask=mask)
  131. {indent_example_output}
  132. """,
  133. )
  134. args_and_kwargs = dict(
  135. # argument name sufficies separated by double underscore will
  136. # be removed in the final documentation string.
  137. sum=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
  138. prod=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
  139. cumsum=(("dim__as_int",), ("dtype=None", "mask=None")),
  140. cumprod=(("dim__as_int",), ("dtype=None", "mask=None")),
  141. amin=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
  142. amax=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
  143. argmin=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")),
  144. argmax=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")),
  145. mean=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
  146. median=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")),
  147. norm=(
  148. (
  149. "ord",
  150. "dim",
  151. ),
  152. ("keepdim=False", "dtype=None", "mask=None"),
  153. ),
  154. var=(("dim", "unbiased"), ("keepdim=False", "dtype=None", "mask=None")),
  155. std=(("dim", "unbiased"), ("keepdim=False", "dtype=None", "mask=None")),
  156. logsumexp=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
  157. softmax=(("dim__as_int",), ("dtype=None", "mask=None")),
  158. log_softmax=(("dim__as_int",), ("dtype=None", "mask=None")),
  159. softmin=(("dim__as_int",), ("dtype=None", "mask=None")),
  160. normalize=(
  161. (
  162. "ord__required",
  163. "dim__as_int",
  164. ),
  165. ("eps=1e-12", "dtype=None", "mask=None"),
  166. ),
  167. )
  168. argument_declarations = dict(
  169. dim="""\
  170. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  171. Default: None that is equivalent to ``tuple(range(input.ndim))``.""",
  172. dim__as_int="""\
  173. dim (int): the dimension along which {operation name} is computed.""",
  174. ord="""\
  175. ord (int, float, optional): the order of vector norm. Default: 2.
  176. See :func:`torch.linalg.vector_norm` for a list of supported norms.""",
  177. ord__required="""\
  178. ord (int, float): the order of vector norm. Default: 2.
  179. See :func:`torch.linalg.vector_norm` for a list of supported norms.""",
  180. unbiased="""\
  181. unbiased (bool): when True, use Bessel’s correction, otherwise, compute
  182. the uncorrected sample variance.""",
  183. eps="""\
  184. eps (float, optional): small value to avoid division by zero. Default: {default}.""",
  185. keepdim="""\
  186. keepdim (bool, optional): whether the output tensor has
  187. :attr:`dim` retained or not. Default: {default}.""",
  188. dtype="""\
  189. dtype (:class:`torch.dtype`, optional): the desired data type
  190. of returned tensor. If specified, the input tensor is
  191. casted to :attr:`dtype` before the operation is
  192. performed. Default: {default}.""",
  193. mask="""\
  194. mask (:class:`torch.Tensor`, optional): the boolean tensor
  195. containing the binary mask of validity of input tensor
  196. elements.
  197. Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.""",
  198. )
  199. definitions = dict(
  200. softmax="""\
  201. Let ``x`` be a sequence of unmasked elements of one-dimensional slice
  202. of the :attr:`input` tensor. Softmax of i-th element in ``x`` is
  203. defined as ``exp(x[i])/sum(exp(x))``.""",
  204. log_softmax="""\
  205. Let ``x`` be a sequence of unmasked elements of one-dimensional slice
  206. of the :attr:`input` tensor. LogSoftmax of i-th element in ``x`` is
  207. defined as ``log(exp(x[i])/sum(exp(x)))``.""",
  208. softmin="""\
  209. Let ``x`` be a sequence of unmasked elements of one-dimensional slice
  210. of the :attr:`input` tensor. Softmin of i-th element in ``x`` is
  211. defined as ``exp(-x[i])/sum(exp(-x))``.""",
  212. normalize="""\
  213. Let ``x`` be a sequence of unmasked elements of one-dimensional slice
  214. of the :attr:`input` tensor. Normalize of i-th element in ``x`` is
  215. defined as ``x[i]/max(norm(x, p), eps)``.""",
  216. cumsum="""\
  217. Let ``x`` be a sequence of unmasked elements of one-dimensional slice
  218. of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
  219. defined as ``sum(x[:i])``.""",
  220. cumprod="""\
  221. Let ``x`` be a sequence of unmasked elements of one-dimensional slice
  222. of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
  223. defined as ``prod(x[:i])``.""",
  224. )
  225. reduction_names = dict(
  226. sum="sum",
  227. prod="product",
  228. amax="maximum",
  229. amin="minimum",
  230. argmax="argmax",
  231. argmin="argmin",
  232. mean="mean",
  233. median="median",
  234. norm="norm",
  235. var="variance",
  236. std="standard_deviation",
  237. logsumexp="logsumexp",
  238. )
  239. normalization_names = dict(
  240. softmax="softmax",
  241. log_softmax="log_softmax",
  242. softmin="softmin",
  243. normalize="normalize",
  244. cumsum="cumulative_sum",
  245. cumprod="cumulative_prod",
  246. )
  247. operation_names = {}
  248. operation_names.update(reduction_names)
  249. operation_names.update(normalization_names)
  250. # Default example data:
  251. example_dim = 1
  252. example_input = torch.tensor([[-3, -2, -1], [0, 1, 2]])
  253. example_mask = torch.tensor([[True, False, True], [False, False, False]])
  254. example_args: Tuple[Any, ...]
  255. if func.__name__ in {"norm", "normalize"}:
  256. example_args = (2.0, example_dim)
  257. example_input = example_input.to(dtype=torch.float32)
  258. elif func.__name__ in {"var", "std"}:
  259. example_args = (example_dim, False)
  260. elif func.__name__ == "median":
  261. example_args = (example_dim,)
  262. example_input = example_input.to(dtype=torch.float32)
  263. else:
  264. example_args = (example_dim,)
  265. operation_args: Tuple[str, ...]
  266. operation_kwargs: Tuple[str, ...]
  267. operation_args, operation_kwargs = args_and_kwargs[func.__name__]
  268. arg_declarations = [
  269. "\n ".join(
  270. argument_declarations.get(a, f'{a.split("__", 1)[0]}: TBD.').splitlines()
  271. )
  272. for a in operation_args
  273. ]
  274. kwarg_declarations = [
  275. "\n ".join(
  276. argument_declarations.get(
  277. a.split("=", 1)[0], f'{a.split("__", 1)[0]}: TBD.'
  278. )
  279. .format(default=a.split("=", 1)[1])
  280. .splitlines()
  281. )
  282. for a in operation_kwargs
  283. ]
  284. if func.__name__ in reduction_names:
  285. op_kind = "reduction"
  286. doc_sections = ["signature", "descr", "identity", "args", "example"]
  287. elif func.__name__ in normalization_names:
  288. op_kind = "normalization"
  289. doc_sections = ["signature", "descr", "args", "example"]
  290. example_input = example_input.to(dtype=torch.float32)
  291. else:
  292. assert 0 # add function name to operation names dictionaries
  293. example_output = func(example_input, *example_args, mask=example_mask)
  294. template_data = {
  295. "function_name": func.__name__,
  296. "full_function_name": func.__module__ + "." + func.__name__,
  297. "operation name": operation_names[func.__name__],
  298. "operation_args": ", ".join(a.split("__", 1)[0] for a in operation_args),
  299. "operation_kwargs": ", ".join(a.split("__", 1)[0] for a in operation_kwargs),
  300. # one-line representation of a tensor:
  301. "example_input": " ".join(str(example_input).split()),
  302. "example_args": ", ".join(map(str, example_args)),
  303. "example_mask": " ".join(str(example_mask).split()),
  304. # multi-line representation of a tensor with indent
  305. "indent_example_input": ("\n ").join(str(example_input).splitlines()),
  306. "indent_example_mask": ("\n ").join(str(example_mask).splitlines()),
  307. "indent_example_output": ("\n ").join(str(example_output).splitlines()),
  308. }
  309. if func.__name__ in reduction_names:
  310. template_data.update(
  311. identity_uint8=_reduction_identity(
  312. func.__name__, torch.tensor(0, dtype=torch.uint8)
  313. ),
  314. identity_int32=_reduction_identity(
  315. func.__name__, torch.tensor(0, dtype=torch.int32)
  316. ),
  317. identity_float32=_reduction_identity(
  318. func.__name__, torch.tensor(0, dtype=torch.float32)
  319. ),
  320. )
  321. if func.__name__ == "norm":
  322. template_data.update(
  323. identity_ord_ninf=_reduction_identity(
  324. func.__name__, torch.tensor(0, dtype=torch.float32), float("-inf")
  325. )
  326. )
  327. elif func.__name__ in normalization_names:
  328. template_data.update(definition=definitions[func.__name__])
  329. else:
  330. assert 0 # add function name to operation names dictionaries
  331. template_data.update(
  332. args_declarations=("\n ".join(arg_declarations)).format_map(template_data)
  333. )
  334. template_data.update(
  335. kwargs_declarations=("\n ".join(kwarg_declarations)).format_map(
  336. template_data
  337. )
  338. )
  339. # Apply function name info to docstring templates:
  340. templates = {
  341. k: v.format_map(template_data)
  342. for k, v in docstring_templates.items()
  343. if k.startswith(op_kind)
  344. }
  345. templates.update(
  346. (k, v.format_map(template_data) if isinstance(v, str) else v)
  347. for k, v in template_data.items()
  348. )
  349. # Apply docstring templates to function doctring:
  350. if func.__doc__ is None:
  351. doc_template = "\n\n".join([f"{{{op_kind}_{sec}}}" for sec in doc_sections])
  352. else:
  353. doc_template = func.__doc__
  354. return doc_template.format_map(templates)
  355. def _reduction_identity(op_name: str, input: Tensor, *args):
  356. """Return identity value as scalar tensor of a reduction operation on
  357. given input, or None, if the identity value cannot be uniquely
  358. defined for the given input.
  359. The identity value of the operation is defined as the initial
  360. value to reduction operation that has a property ``op(op_identity,
  361. value) == value`` for any value in the domain of the operation.
  362. Or put it another way, including or exlucing the identity value in
  363. a list of operands will not change the reduction result.
  364. See https://github.com/pytorch/rfcs/pull/27 for more information.
  365. """
  366. dtype: DType = input.dtype
  367. device = input.device
  368. op_name = op_name.rsplit(".", 1)[-1] # lstrip module name when present
  369. if op_name in {"sum", "cumsum"}:
  370. return torch.tensor(0, dtype=dtype, device=device)
  371. elif op_name in {"prod", "cumprod"}:
  372. return torch.tensor(1, dtype=dtype, device=device)
  373. elif op_name in {"amax", "argmax", "logsumexp"}:
  374. if torch.is_floating_point(input):
  375. return torch.tensor(-torch.inf, dtype=dtype, device=device)
  376. elif torch.is_signed(input) or dtype == torch.uint8:
  377. return torch.tensor(torch.iinfo(dtype).min, dtype=dtype, device=device)
  378. elif op_name in {"amin", "argmin"}:
  379. if torch.is_floating_point(input):
  380. return torch.tensor(torch.inf, dtype=dtype, device=device)
  381. elif torch.is_signed(input) or dtype == torch.uint8:
  382. return torch.tensor(torch.iinfo(dtype).max, dtype=dtype, device=device)
  383. elif op_name == "mean":
  384. # Strictly speaking, the identity value of the mean operation
  385. # is the mean of the input. Since the mean value depends on
  386. # the dim argument and it may be a non-scalar tensor, we
  387. # consider the identity value of the mean operation ambiguous.
  388. # Moreover, the mean value of empty input is undefined.
  389. return None
  390. elif op_name == "norm":
  391. ord = args[0] if args else 2
  392. if ord == float("-inf"):
  393. assert torch.is_floating_point(input), input.dtype
  394. return torch.tensor(torch.inf, dtype=dtype, device=device)
  395. return torch.tensor(0, dtype=dtype, device=device)
  396. elif op_name == "median":
  397. # We use NaN for now because the implementation is currently using torch.nanmedian
  398. # and NaN is the identity for that function since it gets ignored
  399. dtype = input.dtype if torch.is_floating_point(input) else torch.float
  400. return torch.tensor(torch.nan, dtype=dtype, device=device)
  401. elif op_name in {"var", "std"}:
  402. return None
  403. raise NotImplementedError(f"identity of {op_name} on {dtype} input")
  404. def _canonical_dim(dim: DimOrDims, ndim: int) -> Tuple[int, ...]:
  405. """Return dim argument as a tuple of sorted dim values."""
  406. dims: List[int] = []
  407. if dim == ():
  408. # Currently, `dim=()` in reductions operations means "reduce
  409. # over all dimensions" while in future, it will read "no
  410. # reduce". See https://github.com/pytorch/pytorch/issues/29137
  411. # When gh-29137 is resolved, this if-block must be deleted.
  412. dim = None
  413. if dim is None:
  414. return tuple(range(ndim))
  415. ndim = max(ndim, 1)
  416. dim_ = (dim,) if isinstance(dim, int) else dim
  417. for d in dim_:
  418. if d in dims:
  419. raise RuntimeError(f"dim={d} appears multiple times in the list of dims")
  420. if d >= ndim or d < -ndim:
  421. raise IndexError(
  422. f"Dimension out of range (expected to be in range of [{-ndim}, {ndim-1}], but got {d})"
  423. )
  424. dims.append(d % ndim)
  425. return tuple(sorted(dims))
  426. def _sparse_coo_flatten_indices(indices: Tensor, shape: tuple):
  427. # Flatted N-D indices to 1-D indices
  428. flat_indices = indices.new_zeros(indices.size(1))
  429. for d, sz in enumerate(shape):
  430. flat_indices.mul_(sz)
  431. flat_indices.add_(indices[d])
  432. return flat_indices
  433. def _any(input: Tensor, dim: tuple, keepdim: bool):
  434. # Support torch.any with tuple dim argument.
  435. # Workaround of https://github.com/pytorch/pytorch/issues/56586
  436. r = input
  437. for d in reversed(dim):
  438. r = r.any(dim=d, keepdim=keepdim)
  439. return r
  440. def _sparse_coo_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:
  441. """Sparse variant of torch.where. Supports sparse COO and hybrid sparse COO tensors.
  442. _sparse_coo_where implements the following invariant:
  443. _sparse_coo_where(mask, input, fill_value).to_dense(fill_value) ==
  444. torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value))
  445. where `a == b` means `assertEqual(a, b)`, mask is boolean sparse
  446. tensor, and `to_dense(fill_value)` is like `to_dense()` except
  447. that the unspecified elements are mapped to `fill_value` rather
  448. than to `0`.
  449. Returns a sparse COO tensor with the following features:
  450. - all specified elements correspond to masked-in elements that
  451. have the values of the input tensor. If there exists a masked-in
  452. element (as specified by mask) that is not specified in the
  453. input, in the result tensor, the corresponding element has value
  454. 0. In the dense part of the sparse tensor, the masked-out
  455. elements are replaced with fill_value.
  456. - all unspecified elements correspond to masked-out elements.
  457. """
  458. assert input.layout == torch.sparse_coo
  459. assert mask.layout == input.layout
  460. assert mask.shape == input.shape
  461. assert mask.dense_dim() == input.dense_dim() # TODO: eliminate this restriction
  462. input = input.coalesce()
  463. # For set operations on sparse tensor indices, we'll convert
  464. # multi-dimensional indices to 1-D indices for efficiency.
  465. input_flat_indices = _sparse_coo_flatten_indices(
  466. input.indices(), input.shape[: input.sparse_dim()]
  467. )
  468. mask_flat_indices = _sparse_coo_flatten_indices(
  469. mask.indices(), mask.shape[: mask.sparse_dim()]
  470. )
  471. # the set of mask flat indices that define masked-in elements:
  472. if mask.dense_dim() > 0:
  473. mask_values = _any(
  474. mask.values(), tuple(range(1, input.sparse_dim() + 1)), False
  475. )
  476. else:
  477. mask_values = mask.values()
  478. maskin_flat_indices = mask_flat_indices[mask_values.nonzero()[:, 0]]
  479. def intersection(i1, i2):
  480. union, counts = torch.cat([i1, i2]).unique(return_counts=True)
  481. return union, torch.where(counts.gt(1))
  482. def minus(i1, i2):
  483. union, counts = torch.cat([i1, i2]).unique(return_counts=True)
  484. return intersection(union[torch.where(counts.eq(1))], i1)
  485. def _apply(a):
  486. obj, w = a
  487. return obj[w]
  488. # the set of input flat indices of specified and masked-in elements:
  489. maskin_input_flat_indices = _apply(
  490. intersection(maskin_flat_indices, input_flat_indices)
  491. )
  492. _, w = intersection(input_flat_indices, maskin_input_flat_indices)
  493. # the indices and values of masked-in elements
  494. where_input_indices = input.indices()[(slice(None),) + w]
  495. where_input_values = input.values()[w]
  496. if mask.dense_dim() > 0:
  497. # apply mask to the dense part of the input values:
  498. _, w1 = intersection(mask_flat_indices, maskin_input_flat_indices)
  499. where_mask_values = mask.values()[w1]
  500. where_input_values = torch.where(
  501. where_mask_values, where_input_values, fill_value
  502. )
  503. # the set of flat indices of unspecified input and masked-in elements:
  504. maskin_zero_flat_indices = _apply(
  505. minus(maskin_flat_indices, maskin_input_flat_indices)
  506. )
  507. # the indices of masked-in zero elements
  508. _, w = intersection(mask_flat_indices, maskin_zero_flat_indices)
  509. where_zero_indices = mask.indices()[(slice(None),) + w]
  510. # construct result
  511. n = where_zero_indices.size(1)
  512. if n == 0:
  513. # the input is coalesced, hence input_flat_indices are ordered
  514. # and the result is guaranteed to be coalesced:
  515. result = torch.sparse_coo_tensor(
  516. where_input_indices, where_input_values, input.shape
  517. )
  518. return result._coalesced_(True)
  519. where_indices = torch.cat([where_input_indices, where_zero_indices], dim=1)
  520. where_values = torch.cat(
  521. [
  522. where_input_values,
  523. where_input_values.new_zeros((n,) + where_input_values.shape[1:]),
  524. ]
  525. )
  526. result = torch.sparse_coo_tensor(where_indices, where_values, input.shape)
  527. # appending zero elements leads to uncoalesced sparse tensor
  528. return result.coalesce()
  529. def _sparse_coo_scatter_reduction_helper(
  530. op,
  531. mask_input: Tensor,
  532. dims: Tuple[int, ...],
  533. keepdim: bool,
  534. dtype: Optional[DType] = None,
  535. ) -> Tensor:
  536. reduce = op.__name__
  537. valid_reductions = ["sum", "prod", "amax", "amin"]
  538. if reduce not in valid_reductions:
  539. raise ValueError(
  540. f"op must be one of {' '.join(valid_reductions)}, but got {reduce} instead"
  541. )
  542. output_dtype = dtype
  543. values, indices = mask_input._values(), mask_input._indices()
  544. input_dims = mask_input.dim()
  545. num_sparse_dims = mask_input.sparse_dim()
  546. reduced_sparse_dims = []
  547. retained_sparse_dims = []
  548. reduced_dense_dims = []
  549. # promote dtype if specified
  550. if values.dtype != output_dtype:
  551. values = values.to(output_dtype)
  552. if keepdim:
  553. output_shape = tuple(
  554. 1 if i in dims else si for (i, si) in enumerate(mask_input.shape)
  555. )
  556. else:
  557. output_shape = tuple(
  558. si for (i, si) in enumerate(mask_input.shape) if i not in dims
  559. )
  560. for d in dims:
  561. if d >= input_dims:
  562. continue
  563. if d < num_sparse_dims:
  564. reduced_sparse_dims.append(d)
  565. else:
  566. reduced_dense_dims.append(d + 1 - num_sparse_dims)
  567. # Reduce dense dimensions
  568. if len(reduced_dense_dims) > 0:
  569. if reduce == "sum":
  570. new_values = values
  571. new_values = op(new_values, dim=reduced_dense_dims, keepdim=bool(keepdim))
  572. else:
  573. # FIXME: Implement reductions for dense dimensions for ops with non-zero reduction identities
  574. return NotImplemented
  575. else:
  576. new_values = values.clone()
  577. # Reduce sparse dimensions
  578. if len(reduced_sparse_dims) == num_sparse_dims:
  579. if reduce in {"amax", "amin"} and new_values.size(0) == 0:
  580. # IndexError: amax(): Expected reduction dim 0 to have non-zero size.
  581. # sum()/prod() return the reduction identity when dim has size 0 but amax()/amin() do not
  582. # See https://github.com/pytorch/pytorch/issues/61901
  583. new_values = _reduction_identity(reduce, new_values)
  584. else:
  585. new_values = op(new_values, dim=0)
  586. if keepdim:
  587. for _ in range(num_sparse_dims):
  588. new_values = new_values.unsqueeze(0)
  589. return new_values.to(dtype=output_dtype).to_sparse()
  590. else:
  591. new_indices = indices.clone()
  592. if keepdim:
  593. # zero out reduced sparse dimensions if keepdim = True
  594. # ensures that the call to torch.unique folds duplicated indices together while preserving the dimension
  595. new_indices[reduced_sparse_dims, :] = 0
  596. else:
  597. # remove reduced sparse dimensions if keepdim = False
  598. if len(reduced_sparse_dims) > 0:
  599. retained_sparse_dims = [
  600. i
  601. for i in range(num_sparse_dims)
  602. if i not in set(reduced_sparse_dims)
  603. ]
  604. new_indices = new_indices.index_select(
  605. 0, torch.tensor(retained_sparse_dims).to(mask_input.device)
  606. )
  607. # Use scatter_reduce to reduce items in the new_values tensor that correspond to the same indices in new_indices
  608. if new_indices.numel() > 0:
  609. # lexsort indices and get index tensor for scatter reduction
  610. new_indices, inverse_indices = torch.unique(
  611. new_indices, return_inverse=True, dim=1
  612. )
  613. out_shape = list(new_values.shape)
  614. out_shape[0] = new_indices.shape[1]
  615. for _ in range(new_values.ndim - 1):
  616. inverse_indices = inverse_indices.unsqueeze(-1)
  617. scatter_indices = inverse_indices.expand(new_values.shape)
  618. # FIXME: temporary workaround for issue with bfloat16/float16 remove when acctype is implemented for scatter_reduce
  619. if output_dtype in {torch.bfloat16, torch.float16}:
  620. new_values = new_values.to(torch.float)
  621. out = new_values.new_empty(out_shape)
  622. new_values = out.scatter_reduce_(
  623. 0, scatter_indices, new_values, reduce=reduce, include_self=False
  624. )
  625. new_values = new_values.to(dtype=output_dtype)
  626. else:
  627. out = new_values.new_empty(out_shape)
  628. new_values = out.scatter_reduce_(
  629. 0, scatter_indices, new_values, reduce=reduce, include_self=False
  630. )
  631. return torch.sparse_coo_tensor(
  632. new_indices,
  633. new_values,
  634. output_shape,
  635. dtype=output_dtype,
  636. device=mask_input.device,
  637. )
  638. def _sparse_csr_segment_reduction_helper(
  639. op,
  640. mask_input: Tensor,
  641. dims: Tuple[int, ...],
  642. keepdim: bool,
  643. dtype: Optional[DType] = None,
  644. ) -> Tensor:
  645. # Currently, while sparse CSR is always 2D with no dense dimensions keepdim must be True
  646. # FIXME: when dense dimensions are implemented for CSR tensors
  647. assert (
  648. keepdim
  649. ), "reduction operations on CSR tensors with keepdim=False is unsupported"
  650. reduce = op.__name__
  651. valid_reductions = ["sum", "prod", "mean", "amax", "amin"]
  652. if reduce not in valid_reductions:
  653. raise ValueError(
  654. f"op must be one of {' '.join(valid_reductions)}, but got {reduce} instead"
  655. )
  656. device = mask_input.device
  657. output_dtype = dtype
  658. values, crow_indices, col_indices = (
  659. mask_input.values(),
  660. mask_input.crow_indices(),
  661. mask_input.col_indices(),
  662. )
  663. # promote dtype if specified
  664. if values.dtype != output_dtype:
  665. values = values.to(output_dtype)
  666. if len(dims) == 0:
  667. return mask_input
  668. if len(dims) == 1:
  669. if dims[0] == 0:
  670. new_col_indices, scatter_indices = torch.unique(
  671. col_indices, return_inverse=True
  672. )
  673. new_nnz = new_col_indices.shape[0]
  674. new_crow_indices = torch.tensor([0, new_nnz])
  675. new_values = values.new_empty(new_col_indices.shape)
  676. new_values.scatter_reduce_(
  677. 0, scatter_indices, values, reduce, include_self=False
  678. )
  679. new_shape = [1, mask_input.size(1)]
  680. else:
  681. assert (
  682. dims[0] == 1
  683. ), "Sparse CSR tensors are 2D and only support reduction along dim 0 or 1."
  684. # all intervals new_crow_indices[i] - new_crow_indices[i-1] are 1
  685. # except for where crow_indices[i] == crow_indices[i-1] where the interval remains as 0
  686. new_crow_indices = torch.cat(
  687. (
  688. crow_indices.new_zeros(1),
  689. torch.cumsum(torch.diff(crow_indices) != 0, 0),
  690. ),
  691. 0,
  692. )
  693. new_nnz = new_crow_indices[-1]
  694. new_col_indices = col_indices.new_zeros(new_nnz)
  695. new_values = torch._segment_reduce(values, reduce, offsets=crow_indices) # type: ignore[attr-defined]
  696. new_shape = [mask_input.size(0), 1]
  697. else:
  698. assert len(dims) == 2
  699. nnz = min(1, values.numel())
  700. if nnz == 1:
  701. op_kwargs = {"keepdim": True, "dtype": output_dtype}
  702. # amax and amin do not support dtype kwarg
  703. if reduce in ["amax", "amin"]:
  704. del op_kwargs["dtype"]
  705. new_values = op(values, 0, **op_kwargs)
  706. else:
  707. new_values = torch.empty(0, dtype=output_dtype)
  708. new_col_indices = col_indices.new_zeros(nnz)
  709. new_crow_indices = torch.tensor([0, nnz])
  710. new_shape = [1, nnz]
  711. return torch.sparse_csr_tensor(
  712. new_crow_indices,
  713. new_col_indices,
  714. new_values,
  715. new_shape,
  716. dtype=output_dtype,
  717. device=device,
  718. )
  719. def _sparse_csr_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:
  720. """Sparse variant of torch.where. Supports sparse CSR tensors."""
  721. # TODO: implement sparse CSR specific where operator for efficiency
  722. return _sparse_coo_where(
  723. mask.to_sparse_coo(), input.to_sparse_coo(), fill_value
  724. ).to_sparse_csr()
  725. def _where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:
  726. """torch.where with sparse inputs support.
  727. _where implements the following invariant:
  728. _where(mask, input, fill_value).to_dense(fill_value) ==
  729. torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value))
  730. where `a == b` means `assertEqual(a, b)`, mask is boolean sparse
  731. tensor, and `to_dense(fill_value)` is like `to_dense()` except
  732. that the unspecified elements are mapped to `fill_value` rather
  733. than to `0`.
  734. Returns a sparse tensor with the following features:
  735. - all specified elements correspond to masked-in elements that
  736. have the values of the input tensor. If there exists a masked-in
  737. element (as specified by mask) that is not specified in the
  738. input, in the result tensor, the corresponding element has value
  739. 0. In the dense part of the sparse tensor, the masked-out
  740. elements are replaced with fill_value.
  741. - all unspecified elements correspond to masked-out elements.
  742. """
  743. if mask.layout == torch.strided:
  744. return torch.where(mask, input, fill_value)
  745. elif mask.layout == torch.sparse_coo:
  746. return _sparse_coo_where(mask, input, fill_value)
  747. elif mask.layout == torch.sparse_csr:
  748. return _sparse_csr_where(mask, input, fill_value)
  749. else:
  750. raise ValueError(
  751. f"_where expects strided or sparse COO or sparse CSR tensor but got {mask.layout}"
  752. )
  753. def _input_mask(input: Union[Tensor, MaskedTensor], *args, **kwargs) -> Tensor:
  754. """Return canonical input mask.
  755. A canonical input mask is defined as a boolean mask tensor that
  756. shape and layout matches with the shape and the layout of the
  757. input.
  758. The canonical input mask is computed from the :attr:`mask` tensor
  759. content to meet the following criteria:
  760. 1. The shape of the canonical input mask is the same as the shape
  761. of :attr:`input` tensor. If the mask tensor has a smaller shape
  762. than the shape of the :attr:`input`, broadcasting rules will be
  763. applied. Downcasting of mask is not supported.
  764. 2. The layout of the canonical input mask is the same as the
  765. layout of the :attr:`input` tensor. If the mask has different
  766. layout, it will be converted to the expected layout. In the
  767. case of sparse COO layout, the canonical input mask will be
  768. coalesced.
  769. 3. The dtype of the canonical input mask is torch.bool. If the
  770. mask dtype is not bool then it will be converted to bool dtype
  771. using `.to(dtype=bool)` method call.
  772. 4. The elements of the canonical input mask have boolean values
  773. copied from the content of the :attr:`mask` tensor (after
  774. possible broadcasting and dtype conversion transforms). In
  775. general, the sparsity pattern of the sparse canonical input
  776. mask need not to be the same as the sparsity pattern of the
  777. sparse :attr:`input` tensor.
  778. """
  779. if input.layout not in {torch.strided, torch.sparse_coo, torch.sparse_csr}:
  780. raise ValueError(
  781. f"_input_mask expects strided or sparse COO or sparse CSR tensor but got {input.layout}"
  782. )
  783. mask = kwargs.get("mask")
  784. # default mask
  785. if mask is None:
  786. raise ValueError("_input_mask requires explicit mask")
  787. # mask shape must match with input shape
  788. if mask.shape != input.shape:
  789. if mask.ndim > input.ndim:
  790. raise IndexError(
  791. "_input_mask expected broadcastable mask (got mask dimensionality higher than of the input)"
  792. )
  793. if mask.layout == torch.strided:
  794. mask = torch.broadcast_to(mask.clone(), input.shape).to(dtype=torch.bool)
  795. elif mask.layout == torch.sparse_coo:
  796. mask = torch._sparse_broadcast_to(mask, input.shape)
  797. else:
  798. assert mask.layout == torch.sparse_csr
  799. # Broadcasting of CSR tensors is not implemented. Working
  800. # around by using COO layout.
  801. mask = torch._sparse_broadcast_to(
  802. mask.to_sparse(), input.shape
  803. ).to_sparse_csr()
  804. # mask layout must match with input layout
  805. if mask.layout != input.layout:
  806. if input.layout == torch.strided:
  807. mask = mask.to_dense()
  808. elif input.layout == torch.sparse_coo:
  809. if mask.layout == torch.strided:
  810. mask = mask.to_sparse(input.sparse_dim())
  811. else:
  812. mask = mask.to_sparse()
  813. else:
  814. assert input.layout == torch.sparse_csr
  815. mask = mask.to_sparse_csr()
  816. # sparse mask must be coalesced
  817. if mask.layout == torch.sparse_coo:
  818. mask = mask.coalesce()
  819. # mask is a boolean tensor
  820. mask = mask.to(dtype=torch.bool)
  821. return mask
  822. def _output_mask(op, input: Tensor, *args, **kwargs) -> Tensor:
  823. """Return output mask of masked operation applied to given arguments."""
  824. if callable(op):
  825. is_reduction = op.__name__ in {
  826. "sum",
  827. "prod",
  828. "amax",
  829. "amin",
  830. "argmax",
  831. "argmin",
  832. "mean",
  833. "median",
  834. "norm",
  835. "var",
  836. "std",
  837. "logsumexp",
  838. }
  839. is_normalization = op.__name__ in {
  840. "softmax",
  841. "log_softmax",
  842. "softmin",
  843. "normalize",
  844. "cumsum",
  845. "cumprod",
  846. }
  847. if is_reduction:
  848. if op.__name__ == "norm":
  849. if args:
  850. args = args[1:] # lstrip ord argument
  851. dim = args[0] if args else kwargs.get("dim")
  852. outmask = _input_mask(input, *args, **kwargs)
  853. keepdim = kwargs.get("keepdim", False)
  854. dim_ = _canonical_dim(dim, input.ndim)
  855. return _any(outmask, dim_, bool(keepdim))
  856. elif is_normalization:
  857. return _input_mask(input, *args, **kwargs)
  858. else:
  859. raise ValueError(
  860. f"_output_mask expected masked operation (got callable {op.__module__}.{op.__name__})"
  861. )
  862. else:
  863. raise ValueError(
  864. f"_output_mask expected masked operation (got {type(op).__name__} object)"
  865. )
  866. def _combine_input_and_mask(
  867. op, input: Union[MaskedTensor, Tensor], mask, *args
  868. ) -> Tensor:
  869. def helper(input, mask):
  870. if mask is None:
  871. return input
  872. canonical_mask = _input_mask(input, mask=mask)
  873. if callable(op):
  874. fill_value = _reduction_identity(op.__name__, input, *args)
  875. return _where(canonical_mask, input, fill_value)
  876. else:
  877. raise ValueError(
  878. f"_combine_input_and_mask expected masked operation (got {type(op).__name__} object)"
  879. )
  880. class Combine(torch.autograd.Function):
  881. @staticmethod
  882. def forward(ctx, input, mask):
  883. """Return input with masked-out elements eliminated for the given operations."""
  884. ctx.save_for_backward(mask)
  885. if mask is not None:
  886. ctx.mark_non_differentiable(mask)
  887. return helper(input, mask)
  888. @staticmethod
  889. def backward(ctx, grad_output):
  890. (mask,) = ctx.saved_tensors
  891. grad_data = (
  892. grad_output.get_data() if is_masked_tensor(grad_output) else grad_output
  893. )
  894. result = as_masked_tensor(grad_data, mask)
  895. return result, None
  896. return (
  897. Combine.apply(input.get_data(), input.get_mask()) # type: ignore[union-attr]
  898. if is_masked_tensor(input)
  899. else helper(input, mask)
  900. )
  901. @_apply_docstring_templates
  902. def sum(
  903. input: Union[Tensor, MaskedTensor],
  904. dim: DimOrDims = None,
  905. *,
  906. keepdim: Optional[bool] = False,
  907. dtype: Optional[DType] = None,
  908. mask: Optional[Tensor] = None,
  909. ) -> Tensor:
  910. # __doc__ is generated by _apply_docstring_templates decorator
  911. if dtype is None:
  912. # promote integer types to int64 when output dtype is not specified
  913. if input.layout == torch.sparse_csr:
  914. if input.dtype in {
  915. torch.uint8,
  916. torch.bool,
  917. torch.int8,
  918. torch.int16,
  919. torch.int32,
  920. }:
  921. # csr.to(dtype=torch.int64) is not implemented, so
  922. # using coo.to on input to ensure the promoted dtype
  923. input = input.to_sparse_coo().to(dtype=torch.int64).to_sparse_csr()
  924. else:
  925. dtype = input.dtype
  926. else:
  927. dtype = input.dtype
  928. if input.dtype in {
  929. torch.uint8,
  930. torch.bool,
  931. torch.int8,
  932. torch.int16,
  933. torch.int32,
  934. }:
  935. dtype = torch.int64
  936. dim_ = _canonical_dim(dim, input.ndim)
  937. mask_input = _combine_input_and_mask(sum, input, mask)
  938. if mask_input.layout == torch.strided:
  939. return torch.sum(mask_input, dim_, bool(keepdim), dtype=dtype)
  940. elif mask_input.layout == torch.sparse_coo:
  941. return _sparse_coo_scatter_reduction_helper(
  942. torch.sum, mask_input, dim_, bool(keepdim), dtype
  943. )
  944. elif mask_input.layout == torch.sparse_csr:
  945. return torch._sparse_csr_sum(
  946. mask_input, dim=list(dim_), keepdim=bool(keepdim), dtype=dtype
  947. )
  948. else:
  949. raise ValueError(
  950. f"masked sum expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)"
  951. )
  952. @_apply_docstring_templates
  953. def prod(
  954. input: Union[Tensor, MaskedTensor],
  955. dim: DimOrDims = None,
  956. *,
  957. keepdim: Optional[bool] = False,
  958. dtype: Optional[DType] = None,
  959. mask: Optional[Tensor] = None,
  960. ) -> Tensor:
  961. # __doc__ is generated by _apply_docstring_templates decorator
  962. if dtype is None:
  963. # promote integer types to int64 when output dtype is not specified
  964. if input.layout == torch.sparse_csr:
  965. if input.dtype in {
  966. torch.uint8,
  967. torch.bool,
  968. torch.int8,
  969. torch.int16,
  970. torch.int32,
  971. }:
  972. # csr.to(dtype=torch.int64) is not implemented, so
  973. # using coo.to on input to ensure the promoted dtype
  974. input = input.to_sparse_coo().to(dtype=torch.int64).to_sparse_csr()
  975. else:
  976. dtype = input.dtype
  977. else:
  978. dtype = input.dtype
  979. if input.dtype in {
  980. torch.uint8,
  981. torch.bool,
  982. torch.int8,
  983. torch.int16,
  984. torch.int32,
  985. }:
  986. dtype = torch.int64
  987. dim_ = _canonical_dim(dim, input.ndim)
  988. mask_input = _combine_input_and_mask(prod, input, mask)
  989. if mask_input.layout == torch.strided:
  990. # Workaround https://github.com/pytorch/pytorch/issues/56586
  991. result = mask_input
  992. result = result.to(dtype=dtype)
  993. for d in reversed(dim_):
  994. result = result.prod(dim=d, keepdim=bool(keepdim))
  995. return result
  996. elif mask_input.layout == torch.sparse_coo:
  997. if mask is None:
  998. # See comment in the sparse_csr branch, the same issue arises for sparse_coo tensors
  999. raise ValueError(
  1000. "masked prod expects explicit mask for sparse_coo tensor input"
  1001. )
  1002. return _sparse_coo_scatter_reduction_helper(
  1003. torch.prod, mask_input, dim_, bool(keepdim), dtype
  1004. )
  1005. elif mask_input.layout == torch.sparse_csr:
  1006. if mask is None:
  1007. # mask is None corresponds to all-True mask. The
  1008. # unspecified elements in the CSR tensor correspond to
  1009. # zero values. Hence, the prod reduction result is
  1010. # automatically zero unless all elements are specified.
  1011. # A semi-optimal way to take this into account is to use:
  1012. #
  1013. # masked_prod(csr, ..., mask=None) == torch._sparse_csr_prod(csr, ...) * all(csr.nonzero(), ...)
  1014. #
  1015. # but that requires implementing `all` and `nonzero`
  1016. # support for sparse csr tensors.
  1017. raise ValueError(
  1018. "masked prod expects explicit mask for sparse_csr tensor input"
  1019. )
  1020. return torch._sparse_csr_prod(
  1021. mask_input, dim=list(dim_), keepdim=bool(keepdim), dtype=dtype
  1022. )
  1023. else:
  1024. raise ValueError(
  1025. f"masked prod expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)"
  1026. )
  1027. @_apply_docstring_templates
  1028. def cumsum(
  1029. input: Tensor,
  1030. dim: int,
  1031. *,
  1032. dtype: Optional[DType] = None,
  1033. mask: Optional[Tensor] = None,
  1034. ) -> Tensor:
  1035. if dtype is None:
  1036. dtype = input.dtype
  1037. dim_ = _canonical_dim(dim, input.ndim)[0]
  1038. mask_input = _combine_input_and_mask(sum, input, mask)
  1039. if mask_input.layout == torch.strided:
  1040. return torch.cumsum(mask_input, dim_, dtype=dtype).to(dtype=dtype)
  1041. else:
  1042. raise ValueError(
  1043. f"masked cumsum expects strided tensor (got {mask_input.layout} tensor)"
  1044. )
  1045. @_apply_docstring_templates
  1046. def cumprod(
  1047. input: Tensor,
  1048. dim: int,
  1049. *,
  1050. dtype: Optional[DType] = None,
  1051. mask: Optional[Tensor] = None,
  1052. ) -> Tensor:
  1053. if dtype is None:
  1054. dtype = input.dtype
  1055. dim_ = _canonical_dim(dim, input.ndim)[0]
  1056. mask_input = _combine_input_and_mask(prod, input, mask)
  1057. if mask_input.layout == torch.strided:
  1058. return torch.cumprod(mask_input, dim_, dtype=dtype).to(dtype=dtype)
  1059. else:
  1060. raise ValueError(
  1061. f"masked cumprod expects strided tensor (got {mask_input.layout} tensor)"
  1062. )
  1063. @_apply_docstring_templates
  1064. def amax(
  1065. input: Union[Tensor, MaskedTensor],
  1066. dim: DimOrDims = None,
  1067. *,
  1068. keepdim: Optional[bool] = False,
  1069. dtype: Optional[DType] = None,
  1070. mask: Optional[Tensor] = None,
  1071. ) -> Tensor:
  1072. """\
  1073. {reduction_signature}
  1074. {reduction_descr}
  1075. {reduction_identity_dtype}
  1076. {reduction_args}
  1077. {reduction_example}"""
  1078. if dtype is None:
  1079. dtype = input.dtype
  1080. mask_input = _combine_input_and_mask(amax, input, mask)
  1081. dim_ = _canonical_dim(dim, mask_input.ndim)
  1082. if mask_input.layout == torch.strided:
  1083. return torch.amax(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
  1084. elif mask_input.layout == torch.sparse_coo:
  1085. if mask is None:
  1086. # See comment in the sparse_csr branch of prod, a similar issue arises here
  1087. # where unspecified elements along a dimension may need to be reduced with the result
  1088. raise ValueError(
  1089. "masked amax expects explicit mask for sparse_coo tensor input"
  1090. )
  1091. return _sparse_coo_scatter_reduction_helper(
  1092. torch.amax, mask_input, dim_, bool(keepdim), dtype
  1093. )
  1094. elif mask_input.layout == torch.sparse_csr:
  1095. if mask is None:
  1096. raise ValueError(
  1097. "masked amax expects explicit mask for sparse_csr tensor input"
  1098. )
  1099. return _sparse_csr_segment_reduction_helper(
  1100. torch.amax, mask_input, dim_, bool(keepdim), dtype
  1101. )
  1102. else:
  1103. raise ValueError(
  1104. f"masked amax expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)"
  1105. )
  1106. @_apply_docstring_templates
  1107. def amin(
  1108. input: Union[Tensor, MaskedTensor],
  1109. dim: DimOrDims = None,
  1110. *,
  1111. keepdim: Optional[bool] = False,
  1112. dtype: Optional[DType] = None,
  1113. mask: Optional[Tensor] = None,
  1114. ) -> Tensor:
  1115. """\
  1116. {reduction_signature}
  1117. {reduction_descr}
  1118. {reduction_identity_dtype}
  1119. {reduction_args}
  1120. {reduction_example}"""
  1121. if dtype is None:
  1122. dtype = input.dtype
  1123. mask_input = _combine_input_and_mask(amin, input, mask)
  1124. dim_ = _canonical_dim(dim, mask_input.ndim)
  1125. if mask_input.layout == torch.strided:
  1126. return torch.amin(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
  1127. elif mask_input.layout == torch.sparse_coo:
  1128. if mask is None:
  1129. # See comment in the sparse_csr branch of prod, a similar issue arises here
  1130. # where unspecified elements along a dimension may need to be reduced with the result
  1131. raise ValueError(
  1132. "masked amax expects explicit mask for sparse_coo tensor input"
  1133. )
  1134. return _sparse_coo_scatter_reduction_helper(
  1135. torch.amin, mask_input, dim_, bool(keepdim), dtype
  1136. )
  1137. elif mask_input.layout == torch.sparse_csr:
  1138. if mask is None:
  1139. raise ValueError(
  1140. "masked amin expects explicit mask for sparse_csr tensor input"
  1141. )
  1142. return _sparse_csr_segment_reduction_helper(
  1143. torch.amin, mask_input, dim_, bool(keepdim), dtype
  1144. )
  1145. else:
  1146. raise ValueError(
  1147. f"masked amin expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)"
  1148. )
  1149. @_apply_docstring_templates
  1150. def argmax(
  1151. input: Union[Tensor, MaskedTensor],
  1152. dim: int = None,
  1153. *,
  1154. keepdim: Optional[bool] = False,
  1155. dtype: Optional[DType] = None,
  1156. mask: Optional[Tensor] = None,
  1157. ) -> Tensor:
  1158. """\
  1159. {reduction_signature}
  1160. {reduction_descr}
  1161. {reduction_identity_dtype}
  1162. {reduction_args}
  1163. {reduction_example}"""
  1164. if dtype is None:
  1165. dtype = input.dtype
  1166. mask_input = _combine_input_and_mask(argmax, input, mask)
  1167. if mask_input.layout == torch.strided:
  1168. return torch.argmax(mask_input, dim, bool(keepdim)).to(dtype=dtype)
  1169. else:
  1170. raise ValueError(
  1171. f"masked argmax expects strided tensor (got {mask_input.layout} tensor)"
  1172. )
  1173. @_apply_docstring_templates
  1174. def argmin(
  1175. input: Union[Tensor, MaskedTensor],
  1176. dim: int = None,
  1177. *,
  1178. keepdim: Optional[bool] = False,
  1179. dtype: Optional[DType] = None,
  1180. mask: Optional[Tensor] = None,
  1181. ) -> Tensor:
  1182. """\
  1183. {reduction_signature}
  1184. {reduction_descr}
  1185. {reduction_identity_dtype}
  1186. {reduction_args}
  1187. {reduction_example}"""
  1188. if dtype is None:
  1189. dtype = input.dtype
  1190. mask_input = _combine_input_and_mask(argmin, input, mask)
  1191. if mask_input.layout == torch.strided:
  1192. return torch.argmin(mask_input, dim, bool(keepdim)).to(dtype=dtype)
  1193. else:
  1194. raise ValueError(
  1195. f"masked argmin expects strided tensor (got {mask_input.layout} tensor)"
  1196. )
  1197. @_apply_docstring_templates
  1198. def mean(
  1199. input: Union[Tensor, MaskedTensor],
  1200. dim: DimOrDims = None,
  1201. *,
  1202. keepdim: Optional[bool] = False,
  1203. dtype: Optional[DType] = None,
  1204. mask: Optional[Tensor] = None,
  1205. ) -> Tensor:
  1206. """\
  1207. {reduction_signature}
  1208. {reduction_descr}
  1209. By definition, the identity value of a mean operation is the mean
  1210. value of the tensor. If all elements of the input tensor along given
  1211. dimension(s) :attr:`dim` are masked-out, the identity value of the
  1212. mean is undefined. Due to this ambiguity, the elements of output
  1213. tensor with strided layout, that correspond to fully masked-out
  1214. elements, have ``nan`` values.
  1215. {reduction_args}
  1216. {reduction_example}"""
  1217. if dtype is None:
  1218. dtype = input.dtype
  1219. if input.layout == torch.strided:
  1220. if mask is None:
  1221. # TODO: compute count analytically
  1222. count = sum(
  1223. torch.ones(input.shape, dtype=torch.int64, device=input.device),
  1224. dim,
  1225. keepdim=keepdim,
  1226. )
  1227. total = sum(input, dim, keepdim=keepdim, dtype=dtype)
  1228. else:
  1229. inmask = _input_mask(input, mask=mask)
  1230. count = sum(
  1231. inmask.new_ones(input.shape, dtype=torch.int64),
  1232. dim,
  1233. keepdim=keepdim,
  1234. mask=inmask,
  1235. )
  1236. total = sum(input, dim, keepdim=keepdim, dtype=dtype, mask=inmask)
  1237. return total / count
  1238. elif input.layout == torch.sparse_csr:
  1239. mask_input = _combine_input_and_mask(mean, input, mask)
  1240. dim_ = _canonical_dim(dim, mask_input.ndim)
  1241. if mask is None:
  1242. raise ValueError(
  1243. "masked mean expects explicit mask for sparse_csr tensor input"
  1244. )
  1245. return _sparse_csr_segment_reduction_helper(
  1246. torch.mean, mask_input, dim_, bool(keepdim), dtype
  1247. )
  1248. else:
  1249. raise ValueError(
  1250. f"masked mean expects strided or sparse_csr tensor (got {input.layout} tensor)"
  1251. )
  1252. @_apply_docstring_templates
  1253. def median(
  1254. input: Union[Tensor, MaskedTensor],
  1255. dim: int = -1,
  1256. *,
  1257. keepdim: bool = False,
  1258. dtype: Optional[DType] = None,
  1259. mask: Optional[Tensor] = None,
  1260. ) -> Tensor:
  1261. """\
  1262. {reduction_signature}
  1263. {reduction_descr}
  1264. By definition, the identity value of a median operation is the median
  1265. value of the tensor. If all elements of the input tensor along given
  1266. dimension(s) :attr:`dim` are masked-out, the identity value of the
  1267. median is undefined. Due to this ambiguity, the elements of output
  1268. tensor with strided layout, that correspond to fully masked-out
  1269. elements, have ``nan`` values.
  1270. {reduction_args}
  1271. {reduction_example}"""
  1272. if dtype is None:
  1273. dtype = input.dtype
  1274. dim_ = _canonical_dim(dim, input.ndim)[0]
  1275. is_float = torch.is_floating_point(input)
  1276. if not is_float:
  1277. input = input.to(dtype=torch.float)
  1278. mask_input = _combine_input_and_mask(median, input, mask)
  1279. if mask_input.layout == torch.strided:
  1280. output = torch.nanmedian(mask_input, dim_, keepdim).values
  1281. if is_float:
  1282. return output
  1283. elif not is_float and not torch.isnan(output).any():
  1284. return output.to(dtype=dtype)
  1285. else:
  1286. raise ValueError(
  1287. "masked median expects no fully masked out rows if dtype is not floating point"
  1288. )
  1289. else:
  1290. raise ValueError(
  1291. f"masked median expects strided tensor (got {mask_input.layout} tensor)"
  1292. )
  1293. @_apply_docstring_templates
  1294. def logsumexp(
  1295. input: Tensor,
  1296. dim: DimOrDims = None,
  1297. *,
  1298. keepdim: bool = False,
  1299. dtype: Optional[DType] = None,
  1300. mask: Optional[Tensor] = None,
  1301. ) -> Tensor:
  1302. if dtype is None:
  1303. dtype = input.dtype
  1304. dim_ = _canonical_dim(dim, input.ndim)
  1305. mask_input = _combine_input_and_mask(logsumexp, input, mask)
  1306. if mask_input.layout == torch.strided:
  1307. return torch.logsumexp(mask_input, dim_, keepdim=keepdim).to(dtype=dtype)
  1308. else:
  1309. raise ValueError(
  1310. f"masked logsumexp expects strided tensor (got {mask_input.layout} tensor)"
  1311. )
  1312. # TODO: Add docstring; currently they're only set up for reductions and normalizations
  1313. # @_apply_docstring_templates
  1314. def logaddexp(
  1315. input: Union[Tensor, MaskedTensor],
  1316. other: Union[Tensor, MaskedTensor],
  1317. *,
  1318. dtype: Optional[DType] = None,
  1319. input_mask: Optional[Tensor] = None,
  1320. other_mask: Optional[Tensor] = None,
  1321. ) -> Tensor:
  1322. if dtype is None:
  1323. dtype = input.dtype
  1324. if input.layout == torch.strided and other.layout == torch.strided:
  1325. mask_input = _combine_input_and_mask(logsumexp, input, input_mask)
  1326. mask_other = _combine_input_and_mask(logsumexp, other, other_mask)
  1327. return torch.logaddexp(mask_input, mask_other).to(dtype=dtype)
  1328. else:
  1329. raise ValueError(
  1330. f"masked logaddexp expects strided tensors (got {input.layout} tensor for input, {other.layout} for other)"
  1331. )
  1332. @_apply_docstring_templates
  1333. def norm(
  1334. input: Union[Tensor, MaskedTensor],
  1335. ord: Optional[float] = 2.0,
  1336. dim: DimOrDims = None,
  1337. *,
  1338. keepdim: Optional[bool] = False,
  1339. dtype: Optional[DType] = None,
  1340. mask: Optional[Tensor] = None,
  1341. ) -> Tensor:
  1342. """\
  1343. {reduction_signature}
  1344. {reduction_descr}
  1345. The identity value of norm operation, which is used to start the
  1346. reduction, is ``{identity_float32}``, except for ``ord=-inf`` it is
  1347. ``{identity_ord_ninf}``.
  1348. {reduction_args}
  1349. {reduction_example}"""
  1350. if dtype is None:
  1351. dtype = input.dtype
  1352. mask_input = _combine_input_and_mask(norm, input, mask, ord)
  1353. if mask_input.layout == torch.strided:
  1354. dim_ = _canonical_dim(dim, input.ndim)
  1355. return torch.linalg.vector_norm(
  1356. mask_input, ord, dim_, bool(keepdim), dtype=dtype
  1357. )
  1358. else:
  1359. raise ValueError(
  1360. f"masked norm expects strided tensor (got {mask_input.layout} tensor)"
  1361. )
  1362. def _std_var(
  1363. input: Union[Tensor, MaskedTensor],
  1364. dim: DimOrDims,
  1365. unbiased: Optional[bool],
  1366. *,
  1367. correction: Optional[int],
  1368. keepdim: Optional[bool],
  1369. dtype: Optional[DType],
  1370. mask: Optional[Tensor],
  1371. take_sqrt: Optional[bool],
  1372. ) -> Tensor:
  1373. assert (unbiased is None or correction is None), "Only one of unbiased and correction may be given"
  1374. correction_int = 1
  1375. if unbiased is not None:
  1376. correction_int = 1 if unbiased else 0
  1377. if correction is not None:
  1378. correction_int = correction
  1379. if dtype is None:
  1380. dtype = input.dtype
  1381. if not (dtype.is_floating_point or dtype.is_complex):
  1382. dtype = torch.float32
  1383. compute_dtype = dtype
  1384. if not (compute_dtype.is_floating_point or compute_dtype.is_complex):
  1385. compute_dtype = torch.float32
  1386. if input.layout == torch.strided:
  1387. if mask is None:
  1388. # TODO: compute count analytically
  1389. count = sum(
  1390. torch.ones(input.shape, dtype=torch.int64, device=input.device),
  1391. dim,
  1392. keepdim=True,
  1393. )
  1394. sample_total = sum(input, dim, keepdim=True, dtype=dtype)
  1395. else:
  1396. inmask = _input_mask(input, mask=mask)
  1397. count = sum(
  1398. inmask.new_ones(input.shape, dtype=torch.int64),
  1399. dim,
  1400. keepdim=True,
  1401. mask=inmask,
  1402. )
  1403. sample_total = sum(input, dim, keepdim=True, dtype=dtype, mask=inmask)
  1404. # TODO: replace torch.subtract/divide/square/maximum with
  1405. # masked subtract/divide/square/maximum when these will be
  1406. # available.
  1407. sample_mean = torch.divide(sample_total, count)
  1408. x = torch.subtract(input, sample_mean)
  1409. if mask is None:
  1410. total = sum(x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype)
  1411. else:
  1412. total = sum(
  1413. x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype, mask=inmask
  1414. )
  1415. if not keepdim:
  1416. count = count.reshape(total.shape)
  1417. if correction_int != 0:
  1418. count = torch.subtract(count, correction_int)
  1419. count = torch.maximum(count, count.new_zeros([]))
  1420. output = torch.divide(total, count).to(dtype=dtype)
  1421. if take_sqrt:
  1422. output = torch.sqrt(output)
  1423. return output
  1424. else:
  1425. raise ValueError(
  1426. f"masked std/var expects strided tensor (got {input.layout} tensor)"
  1427. )
  1428. @_apply_docstring_templates
  1429. def var(
  1430. input: Union[Tensor, MaskedTensor],
  1431. dim: DimOrDims = None,
  1432. unbiased: Optional[bool] = None,
  1433. *,
  1434. correction: Optional[int] = None,
  1435. keepdim: Optional[bool] = False,
  1436. dtype: Optional[DType] = None,
  1437. mask: Optional[Tensor] = None,
  1438. ) -> Tensor:
  1439. """\
  1440. {reduction_signature}
  1441. {reduction_descr}
  1442. The identity value of sample variance operation is undefined. The
  1443. elements of output tensor with strided layout, that correspond to
  1444. fully masked-out elements, have ``nan`` values.
  1445. {reduction_args}
  1446. {reduction_example}"""
  1447. return _std_var(
  1448. input=input,
  1449. dim=dim,
  1450. unbiased=unbiased,
  1451. correction=correction,
  1452. keepdim=keepdim,
  1453. dtype=dtype,
  1454. mask=mask,
  1455. take_sqrt=False,
  1456. )
  1457. @_apply_docstring_templates
  1458. def std(
  1459. input: Union[Tensor, MaskedTensor],
  1460. dim: DimOrDims = None,
  1461. unbiased: Optional[bool] = None,
  1462. *,
  1463. correction: Optional[int] = None,
  1464. keepdim: Optional[bool] = False,
  1465. dtype: Optional[DType] = None,
  1466. mask: Optional[Tensor] = None,
  1467. ) -> Tensor:
  1468. """\
  1469. {reduction_signature}
  1470. {reduction_descr}
  1471. The identity value of sample standard deviation operation is undefined. The
  1472. elements of output tensor with strided layout, that correspond to
  1473. fully masked-out elements, have ``nan`` values.
  1474. {reduction_args}
  1475. {reduction_example}"""
  1476. return _std_var(
  1477. input=input,
  1478. dim=dim,
  1479. unbiased=unbiased,
  1480. correction=correction,
  1481. keepdim=keepdim,
  1482. dtype=dtype,
  1483. mask=mask,
  1484. take_sqrt=True,
  1485. )
  1486. @_apply_docstring_templates
  1487. def softmax(
  1488. input: Union[Tensor, MaskedTensor],
  1489. dim: int,
  1490. *,
  1491. dtype: Optional[DType] = None,
  1492. mask: Optional[Tensor] = None,
  1493. ) -> Tensor:
  1494. if dtype is None:
  1495. dtype = input.dtype
  1496. dim_ = _canonical_dim(dim, input.ndim)[0]
  1497. mask_input = _combine_input_and_mask(amax, input, mask)
  1498. if mask_input.layout == torch.strided:
  1499. return torch.nn.functional.softmax(mask_input, dim_, dtype=dtype)
  1500. else:
  1501. raise ValueError(
  1502. f"masked softmax expects strided tensor (got {mask_input.layout} tensor)"
  1503. )
  1504. @_apply_docstring_templates
  1505. def log_softmax(
  1506. input: Union[Tensor, MaskedTensor],
  1507. dim: int,
  1508. *,
  1509. dtype: Optional[DType] = None,
  1510. mask: Optional[Tensor] = None,
  1511. ) -> Tensor:
  1512. if dtype is None:
  1513. dtype = input.dtype
  1514. dim_ = _canonical_dim(dim, input.ndim)[0]
  1515. mask_input = _combine_input_and_mask(amax, input, mask)
  1516. if mask_input.layout == torch.strided:
  1517. return torch.nn.functional.log_softmax(mask_input, dim_, dtype=dtype)
  1518. else:
  1519. raise ValueError(
  1520. f"masked log_softmax expects strided tensor (got {mask_input.layout} tensor)"
  1521. )
  1522. @_apply_docstring_templates
  1523. def softmin(
  1524. input: Union[Tensor, MaskedTensor],
  1525. dim: int,
  1526. *,
  1527. dtype: Optional[DType] = None,
  1528. mask: Optional[Tensor] = None,
  1529. ) -> Tensor:
  1530. if dtype is None:
  1531. dtype = input.dtype
  1532. dim_ = _canonical_dim(dim, input.ndim)[0]
  1533. mask_input = _combine_input_and_mask(amin, input, mask)
  1534. if mask_input.layout == torch.strided:
  1535. return torch.nn.functional.softmin(mask_input, dim_, dtype=dtype)
  1536. else:
  1537. raise ValueError(
  1538. f"masked softmin expects strided tensor (got {mask_input.layout} tensor)"
  1539. )
  1540. @_apply_docstring_templates
  1541. def normalize(
  1542. input: Union[Tensor, MaskedTensor],
  1543. ord: float,
  1544. dim: int,
  1545. *,
  1546. eps: float = 1e-12,
  1547. dtype: Optional[DType] = None,
  1548. mask: Optional[Tensor] = None,
  1549. ) -> Tensor:
  1550. if dtype is None:
  1551. dtype = input.dtype
  1552. dim_ = _canonical_dim(dim, input.ndim)[0]
  1553. # TODO: eliminate mask_input as unnecessary when using masked divide.
  1554. mask_input = _combine_input_and_mask(sum, input, mask)
  1555. if mask_input.layout == torch.strided:
  1556. nrm_ = norm(input, ord, dim, keepdim=True, dtype=dtype, mask=mask)
  1557. # TODO: replace torch.maximum with masked maximum when available.
  1558. denom = torch.maximum(nrm_, nrm_.new_full([], eps))
  1559. # TODO: replace torch.divide with masked divide when available.
  1560. return torch.divide(mask_input, denom)
  1561. else:
  1562. raise ValueError(
  1563. f"masked normalize expects strided tensor (got {mask_input.layout} tensor)"
  1564. )