overrides.py 98 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887
  1. """
  2. Python implementation of ``__torch_function__``
  3. While most of the torch API and handling for ``__torch_function__`` happens
  4. at the C++ level, some of the torch API is written in Python so we need
  5. python-level handling for ``__torch_function__`` overrides as well. The main
  6. developer-facing functionality in this file are handle_torch_function and
  7. has_torch_function. See torch/functional.py and test/test_overrides.py
  8. for usage examples.
  9. Note
  10. ----
  11. heavily inspired by NumPy's ``__array_function__`` (see:
  12. https://github.com/pytorch/pytorch/issues/24015 and
  13. https://www.numpy.org/neps/nep-0018-array-function-protocol.html
  14. )
  15. If changing this file in a way that can affect ``__torch_function__`` overhead,
  16. please report the benchmarks in ``benchmarks/overrides_benchmark``. See the
  17. instructions in the ``README.md`` in that directory.
  18. """
  19. import __future__
  20. import collections
  21. import functools
  22. import types
  23. import warnings
  24. from typing import Dict, Set, List, Any, Callable, Iterable, Type, Tuple
  25. import contextlib
  26. import torch
  27. from torch._C import (
  28. _has_torch_function, _has_torch_function_unary,
  29. _has_torch_function_variadic, _add_docstr,
  30. _push_on_torch_function_stack, _pop_torch_function_stack, _get_function_stack_at, _len_torch_function_stack,
  31. _is_torch_function_mode_enabled)
  32. __all__ = [
  33. "get_ignored_functions",
  34. "get_overridable_functions",
  35. "get_testing_overrides",
  36. "handle_torch_function",
  37. "has_torch_function",
  38. "resolve_name",
  39. "is_tensor_like",
  40. "is_tensor_method_or_property",
  41. "wrap_torch_function",
  42. "enable_reentrant_dispatch",
  43. "get_buffer",
  44. ]
  45. @functools.lru_cache(None)
  46. def get_ignored_functions() -> Set[Callable]:
  47. """
  48. Return public functions that cannot be overridden by ``__torch_function__``.
  49. Returns
  50. -------
  51. Set[Callable]
  52. A tuple of functions that are publicly available in the torch API but cannot
  53. be overridden with ``__torch_function__``. Mostly this is because none of the
  54. arguments of these functions are tensors or tensor-likes.
  55. Examples
  56. --------
  57. >>> torch.Tensor.as_subclass in torch.overrides.get_ignored_functions()
  58. True
  59. >>> torch.add in torch.overrides.get_ignored_functions()
  60. False
  61. """
  62. Tensor = torch.Tensor
  63. return {
  64. torch.typename,
  65. torch.is_tensor,
  66. torch.is_storage,
  67. torch.set_default_tensor_type,
  68. torch.set_default_device,
  69. torch.set_rng_state,
  70. torch.get_rng_state,
  71. torch.manual_seed,
  72. torch.initial_seed,
  73. torch.seed,
  74. torch.save,
  75. torch.load,
  76. torch.set_printoptions,
  77. torch.fork,
  78. torch.get_default_dtype,
  79. torch.get_num_interop_threads,
  80. torch.get_num_threads,
  81. torch.init_num_threads,
  82. torch.import_ir_module,
  83. torch.import_ir_module_from_buffer,
  84. torch.is_anomaly_enabled,
  85. torch.is_anomaly_check_nan_enabled,
  86. torch.is_grad_enabled,
  87. torch.merge_type_from_type_comment,
  88. torch.parse_ir,
  89. torch.parse_schema,
  90. torch.parse_type_comment,
  91. torch.set_anomaly_enabled,
  92. torch.set_flush_denormal,
  93. torch.set_num_interop_threads,
  94. torch.set_num_threads,
  95. torch.wait,
  96. torch.as_tensor,
  97. torch.from_numpy,
  98. torch.get_device,
  99. torch.tensor,
  100. torch.default_generator,
  101. torch.has_cuda,
  102. torch.has_cudnn,
  103. torch.has_lapack,
  104. torch.device,
  105. torch.dtype,
  106. torch.finfo,
  107. torch.has_mkl,
  108. torch.has_mps,
  109. torch.has_mkldnn,
  110. torch.has_openmp,
  111. torch.iinfo,
  112. torch.memory_format,
  113. torch.qscheme,
  114. torch.set_grad_enabled,
  115. torch.no_grad,
  116. torch.enable_grad,
  117. torch.inference_mode,
  118. torch.is_inference_mode_enabled,
  119. torch.layout,
  120. torch.align_tensors,
  121. torch.arange,
  122. torch.as_strided,
  123. torch.bartlett_window,
  124. torch.blackman_window,
  125. torch.broadcast_shapes,
  126. torch.can_cast,
  127. torch.compile,
  128. torch.cudnn_affine_grid_generator,
  129. torch.cudnn_batch_norm,
  130. torch.cudnn_convolution,
  131. torch.cudnn_convolution_transpose,
  132. torch.cudnn_convolution_relu,
  133. torch.cudnn_convolution_add_relu,
  134. torch.cudnn_grid_sampler,
  135. torch.cudnn_is_acceptable,
  136. torch.empty,
  137. torch.empty_strided,
  138. torch.empty_quantized,
  139. torch.eye,
  140. torch.fft.fftfreq,
  141. torch.fft.rfftfreq,
  142. torch.from_file,
  143. torch.full,
  144. torch.fill,
  145. torch.hamming_window,
  146. torch.hann_window,
  147. torch.kaiser_window,
  148. torch.linspace,
  149. torch.logspace,
  150. torch.mkldnn_adaptive_avg_pool2d,
  151. torch.mkldnn_convolution,
  152. torch.mkldnn_max_pool2d,
  153. torch.mkldnn_max_pool3d,
  154. torch.mkldnn_linear_backward_weights,
  155. torch.mkldnn_rnn_layer,
  156. torch.normal,
  157. torch.ones,
  158. torch.promote_types,
  159. torch.rand,
  160. torch.randn,
  161. torch.randint,
  162. torch.randperm,
  163. torch.range,
  164. torch.result_type,
  165. torch.scalar_tensor,
  166. torch.sparse_coo_tensor,
  167. torch.sparse_compressed_tensor,
  168. torch.sparse_csr_tensor,
  169. torch.sparse_csc_tensor,
  170. torch.sparse_bsr_tensor,
  171. torch.sparse_bsc_tensor,
  172. torch.sym_float,
  173. torch.sym_int,
  174. torch.sym_max,
  175. torch.sym_min,
  176. torch.sym_not,
  177. torch.tril_indices,
  178. torch.triu_indices,
  179. torch.vander,
  180. torch.zeros,
  181. torch._jit_internal.boolean_dispatch,
  182. torch.nn.functional.assert_int_or_pair,
  183. torch.nn.functional.upsample,
  184. torch.nn.functional.upsample_bilinear,
  185. torch.nn.functional.upsample_nearest,
  186. torch.nn.functional.has_torch_function,
  187. torch.nn.functional.has_torch_function_unary,
  188. torch.nn.functional.has_torch_function_variadic,
  189. torch.nn.functional.handle_torch_function,
  190. torch.nn.functional.sigmoid,
  191. torch.nn.functional.hardsigmoid,
  192. torch.nn.functional.tanh,
  193. torch.nn.functional._canonical_mask,
  194. torch.nn.functional._none_or_dtype,
  195. # Doesn't actually take or return tensor arguments
  196. torch.nn.init.calculate_gain,
  197. # These are deprecated; don't test them
  198. torch.nn.init.uniform,
  199. torch.nn.init.normal,
  200. torch.nn.init.constant,
  201. torch.nn.init.eye,
  202. torch.nn.init.dirac,
  203. torch.nn.init.xavier_uniform,
  204. torch.nn.init.xavier_normal,
  205. torch.nn.init.kaiming_uniform,
  206. torch.nn.init.kaiming_normal,
  207. torch.nn.init.orthogonal,
  208. torch.nn.init.sparse,
  209. torch.nested.to_padded_tensor,
  210. has_torch_function,
  211. handle_torch_function,
  212. torch.set_autocast_enabled,
  213. torch.is_autocast_enabled,
  214. torch.clear_autocast_cache,
  215. torch.set_autocast_cpu_enabled,
  216. torch.is_autocast_cpu_enabled,
  217. torch.set_autocast_cpu_dtype,
  218. torch.get_autocast_cpu_dtype,
  219. torch.get_autocast_gpu_dtype,
  220. torch.set_autocast_gpu_dtype,
  221. torch.autocast_increment_nesting,
  222. torch.autocast_decrement_nesting,
  223. torch.is_autocast_cache_enabled,
  224. torch.set_autocast_cache_enabled,
  225. torch.nn.functional.hardswish,
  226. torch.is_vulkan_available,
  227. torch.are_deterministic_algorithms_enabled,
  228. torch.use_deterministic_algorithms,
  229. torch.is_deterministic_algorithms_warn_only_enabled,
  230. torch.set_deterministic_debug_mode,
  231. torch.get_deterministic_debug_mode,
  232. torch.set_float32_matmul_precision,
  233. torch.get_float32_matmul_precision,
  234. torch.unify_type_list,
  235. torch.is_warn_always_enabled,
  236. torch.set_warn_always,
  237. torch.vitals_enabled,
  238. torch.set_vital,
  239. torch.read_vitals,
  240. torch.vmap,
  241. torch.frombuffer,
  242. torch.asarray,
  243. Tensor.__delitem__,
  244. Tensor.__dir__,
  245. Tensor.__getattribute__,
  246. Tensor.__init__,
  247. Tensor.__iter__,
  248. Tensor.__init_subclass__,
  249. Tensor.__delattr__,
  250. Tensor.__setattr__,
  251. Tensor.__torch_function__,
  252. Tensor.__torch_dispatch__,
  253. Tensor.__new__,
  254. Tensor.__class__,
  255. Tensor.__subclasshook__,
  256. Tensor.__hash__,
  257. Tensor.as_subclass,
  258. Tensor.eig,
  259. Tensor.lstsq,
  260. Tensor.reinforce,
  261. Tensor.new,
  262. Tensor.new_tensor,
  263. Tensor.new_empty,
  264. Tensor.new_empty_strided,
  265. Tensor.new_zeros,
  266. Tensor.new_ones,
  267. Tensor.new_full,
  268. Tensor._make_subclass,
  269. Tensor.solve,
  270. Tensor.symeig,
  271. Tensor.stride,
  272. Tensor.unflatten,
  273. Tensor.to_sparse_coo,
  274. Tensor.to_sparse_csr,
  275. Tensor.to_sparse_csc,
  276. Tensor.to_sparse_bsr,
  277. Tensor.to_sparse_bsc,
  278. Tensor._typed_storage,
  279. Tensor._reduce_ex_internal,
  280. Tensor._fix_weakref,
  281. Tensor._view_func,
  282. Tensor._make_wrapper_subclass,
  283. Tensor._python_dispatch.__get__,
  284. Tensor._has_symbolic_sizes_strides.__get__,
  285. Tensor._conj,
  286. Tensor._conj_physical,
  287. Tensor._neg_view,
  288. Tensor._is_zerotensor,
  289. Tensor._is_all_true,
  290. Tensor._is_any_true,
  291. Tensor._addmm_activation,
  292. Tensor.to_padded_tensor,
  293. }
  294. @functools.lru_cache(None)
  295. def get_default_nowrap_functions() -> Set[Callable]:
  296. """
  297. Return public functions that do not wrap in a subclass when invoked by
  298. the default ``Tensor.__torch_function__`` that preserves subclasses. Typically,
  299. these functions represent field accesses (i.e., retrieving a Tensor that
  300. is stored somewhere on the Tensor) as opposed to computation. Users of
  301. these functions expect object identity to be preserved over multiple accesses
  302. (e.g., ``a.grad is a.grad``) which cannot be upheld if we're wrapping on
  303. the fly every time (furthermore, the tensor stored here might already be
  304. the subclass, in which case wrapping really ought not to happen).
  305. Not ALL property accessors have this property; for example ``Tensor.T`` actually
  306. just creates a new transposed tensor on the fly, and so we SHOULD interpose on
  307. these calls (you need to check the implementation of the function to see if
  308. this is the case or not). Additionally, if a property accessor doesn't return a Tensor,
  309. it doesn't have to be on this list (though it is harmless if it is).
  310. """
  311. Tensor = torch.Tensor
  312. return {
  313. Tensor._base.__get__,
  314. Tensor.grad.__get__,
  315. Tensor._grad.__get__,
  316. }
  317. @functools.lru_cache(None)
  318. def get_testing_overrides() -> Dict[Callable, Callable]:
  319. """Return a dict containing dummy overrides for all overridable functions
  320. Returns
  321. -------
  322. Dict[Callable, Callable]
  323. A dictionary that maps overridable functions in the PyTorch API to
  324. lambda functions that have the same signature as the real function
  325. and unconditionally return -1. These lambda functions are useful
  326. for testing API coverage for a type that defines ``__torch_function__``.
  327. Examples
  328. --------
  329. >>> import inspect
  330. >>> my_add = torch.overrides.get_testing_overrides()[torch.add]
  331. >>> inspect.signature(my_add)
  332. <Signature (input, other, out=None)>
  333. """
  334. # Every function in the PyTorchAPI that can be overriden needs an entry
  335. # in this dict.
  336. #
  337. # Optimally we would use inspect to get the function signature and define
  338. # the lambda function procedurally but that is blocked by generating
  339. # function signatures for native kernels that can be consumed by inspect.
  340. # See Issue #28233.
  341. Tensor = torch.Tensor
  342. ret: Dict[Callable, Callable] = {
  343. torch.abs: lambda input, out=None: -1,
  344. torch.absolute: lambda input, out=None: -1,
  345. torch.adaptive_avg_pool1d: lambda input, output_size: -1,
  346. torch.adaptive_max_pool1d: lambda inputs, output_size: -1,
  347. torch.acos: lambda input, out=None: -1,
  348. torch.adjoint: lambda input: -1,
  349. torch.arccos: lambda input, out=None: -1,
  350. torch.acosh: lambda input, out=None: -1,
  351. torch.arccosh: lambda input, out=None: -1,
  352. torch.add: lambda input, other, out=None: -1,
  353. torch.addbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
  354. torch.addcdiv: lambda input, tensor1, tensor2, value=1, out=None: -1,
  355. torch.addcmul: lambda input, tensor1, tensor2, value=1, out=None: -1,
  356. torch.addmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
  357. torch.addmv: lambda input, mat, vec, beta=1, alpha=1, out=None: -1,
  358. torch.addr: lambda input, vec1, vec2, beta=1, alpha=1, out=None: -1,
  359. torch.affine_grid_generator: lambda theta, size, align_corners: -1,
  360. torch.all: lambda input, dim=None: -1,
  361. torch.allclose: lambda input, other, trol=1e-05, atol=1e-08, equal_nan=False: -1,
  362. torch.alpha_dropout: lambda input, p, train, inplace=False: -1,
  363. torch.amax: lambda input, dim=None: -1,
  364. torch.amin: lambda input, dim=None: -1,
  365. torch.aminmax: lambda input, dim=None, keepdim=False, out=None: -1,
  366. torch.angle: lambda input, out=None: -1,
  367. torch.any: lambda input, dim=None, keepdim=False, out=None: -1,
  368. torch.argmax: lambda input: -1,
  369. torch.argmin: lambda input: -1,
  370. torch.argsort: lambda input, dim=None: -1,
  371. torch.asin: lambda input, out=None: -1,
  372. torch._assert_async: lambda input: -1,
  373. torch.arcsin: lambda input, out=None: -1,
  374. torch.asinh: lambda input, out=None: -1,
  375. torch.arcsinh: lambda input, out=None: -1,
  376. torch.atan: lambda input, out=None: -1,
  377. torch.arctan: lambda input, out=None: -1,
  378. torch.atan2: lambda input, other, out=None: -1,
  379. torch.arctan2: lambda input, other, out=None: -1,
  380. torch.atanh: lambda input, out=None: -1,
  381. torch.arctanh: lambda input, out=None: -1,
  382. torch.atleast_1d: lambda *tensors: -1,
  383. torch.atleast_2d: lambda *tensors: -1,
  384. torch.atleast_3d: lambda *tensors: -1,
  385. torch.avg_pool1d: lambda input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True: -1,
  386. torch.baddbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
  387. torch.batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled: -1,
  388. torch.batch_norm_backward_elemt: lambda grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count_tensor: -1,
  389. torch.batch_norm_backward_reduce: lambda grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g: -1,
  390. torch.batch_norm_elemt: lambda input, weight, bias, mean, invstd, eps: -1,
  391. torch.batch_norm_gather_stats: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1,
  392. torch.batch_norm_gather_stats_with_counts: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1,
  393. torch.batch_norm_stats: lambda input, eps: -1,
  394. torch.batch_norm_update_stats: lambda input, running_mean, running_var, momentum: -1,
  395. torch.bernoulli: lambda input, generator=None, out=None: -1,
  396. torch.bilinear: lambda input1, input2, weight, bias: -1,
  397. torch.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None, reduce=None,
  398. reduction='mean', pos_weight=None: -1),
  399. torch.bincount: lambda input, weights=None, minlength=0: -1,
  400. torch.binomial: lambda count, prob, generator=None: -1,
  401. torch.bitwise_and: lambda input, other, out=None: -1,
  402. torch.bitwise_not: lambda input, out=None: -1,
  403. torch.bitwise_or: lambda input, other, out=None: -1,
  404. torch.bitwise_xor: lambda input, other, out=None: -1,
  405. torch.bitwise_left_shift: lambda input, other, out=None: -1,
  406. torch.bitwise_right_shift: lambda input, other, out=None: -1,
  407. torch.block_diag: lambda *tensors: -1,
  408. torch.bmm: lambda input, mat2, out=None: -1,
  409. torch.broadcast_tensors: lambda *tensors: -1,
  410. torch.broadcast_to: lambda self, size: -1,
  411. torch.bucketize: lambda input, boundaries, out_int32=False, right=False, out=None: -1,
  412. torch.cartesian_prod: lambda *tensors: -1,
  413. torch.cat: lambda tensors, dim=0, out=None: -1,
  414. torch.concat: lambda tensors, dim=0, out=None: -1, # alias for torch.cat
  415. torch.concatenate: lambda tensors, dim=0, out=None: -1, # alias for torch.concatenate
  416. torch.cdist: lambda x1, x2, p=2.0, compute_mode='use_mm_for_euclid_dist_if_necessary': -1,
  417. torch.ceil: lambda input, out=None: -1,
  418. torch.celu: lambda input, alpha=1., inplace=False: -1,
  419. torch.chain_matmul: lambda *matrices, out=None: -1,
  420. torch.channel_shuffle: lambda input, groups : -1,
  421. torch.cholesky: lambda input, upper=False, out=None: -1,
  422. torch.linalg.cholesky: lambda input, out=None: -1,
  423. torch.linalg.cholesky_ex: lambda input, check_errors=False, out=None: -1,
  424. torch.cholesky_inverse: lambda input, upper=False, out=None: -1,
  425. torch.cholesky_solve: lambda input1, input2, upper=False, out=None: -1,
  426. torch.choose_qparams_optimized: lambda input, numel, n_bins, ratio, bit_width: -1,
  427. torch.chunk: lambda input, chunks, dim=0: -1,
  428. torch.clamp: lambda input, min=None, max=None, out=None: -1,
  429. torch.clip: lambda input, min=None, max=None, out=None: -1,
  430. torch.clamp_min: lambda input, min, out=None: -1,
  431. torch.clamp_max: lambda input, max, out=None: -1,
  432. torch.column_stack: lambda tensors, out=None: -1,
  433. torch.cov: lambda input, correction=1, fweights=None, aweights=None: -1,
  434. torch.clone: lambda input: -1,
  435. torch.combinations: lambda input, r=2, with_replacement=False: -1,
  436. torch.complex: lambda real, imag: -1,
  437. torch.copysign: lambda input, other, out=None: -1,
  438. torch.polar: lambda abs, ang: -1,
  439. torch.linalg.cond: lambda input, ord=None: -1,
  440. torch.conj: lambda input, out=None: -1,
  441. torch.conj_physical: lambda input, out=None: -1,
  442. torch.resolve_conj: lambda input, out=None: -1,
  443. torch.resolve_neg: lambda input, out=None: -1,
  444. torch.constant_pad_nd: lambda input, pad, value=0: -1,
  445. torch.conv1d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
  446. torch.conv2d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
  447. torch.conv3d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
  448. torch.convolution: lambda input, weight, bias, stride, padding, dilation, transposed, output_adding, groups: -1,
  449. torch.conv_tbc: lambda input, weight, bias, pad=0: -1,
  450. torch.conv_transpose1d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
  451. torch.conv_transpose2d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
  452. torch.conv_transpose3d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
  453. torch.corrcoef: lambda input: -1,
  454. torch.cos: lambda input, out=None: -1,
  455. torch.cosine_embedding_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1,
  456. torch.cosh: lambda input, out=None: -1,
  457. torch.cosine_similarity: lambda x1, x2, dim=1, eps=1e-8: -1,
  458. torch.count_nonzero: lambda input: -1,
  459. torch.cross: lambda input, other, dim=None, out=None: -1,
  460. torch.linalg.cross: lambda input, other, dim=-1, out=None: -1,
  461. torch.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean',
  462. zero_infinity=False: -1),
  463. torch.cummax: lambda input, dim, out=None: -1,
  464. torch.cummin: lambda input, dim, out=None: -1,
  465. torch.cumprod: lambda input, dim, out=None, dtype=None: -1,
  466. torch.cumsum: lambda input, dim, out=None, dtype=None: -1,
  467. torch.cumulative_trapezoid: lambda y, x=None, dim=-1: -1,
  468. torch.logcumsumexp: lambda input, dim, out=None: -1,
  469. torch.deg2rad: lambda input, out=None: -1,
  470. torch.dequantize: lambda input: -1,
  471. torch.det: lambda input: -1,
  472. torch.linalg.det: lambda input: -1, # alias for torch.det # type: ignore[attr-defined]
  473. torch.detach: lambda input: -1,
  474. torch.diag: lambda input, diagonal=0, out=None: -1,
  475. torch.diag_embed: lambda input, diagonal=0, out=None: -1,
  476. torch.diagflat: lambda input, offset=0: -1,
  477. torch.diff: lambda input, n=1, dim=-1, prepend=None, append=None, out=None: -1,
  478. torch.diagonal: lambda input, offset=0, dim1=0, dim2=1: -1,
  479. torch.linalg.diagonal: lambda input, offset=0, dim1=-2, dim2=-1: -1,
  480. torch.diagonal_scatter: lambda input, src, offset=0, dim1=0, dim2=1: -1,
  481. torch.as_strided_scatter: lambda self, src, size, stride, storage_offset=None: -1,
  482. torch.digamma: lambda input, out=None: -1,
  483. torch.dist: lambda input, other, p=2: -1,
  484. torch.div: lambda input, other, rounding_mode=None, out=None: -1,
  485. torch.divide: lambda input, other, rounding_mode=None, out=None: -1,
  486. torch.dot: lambda input, other, out=None: -1,
  487. torch.dropout: lambda input, p, train, inplace=False: -1,
  488. torch.dsmm: lambda input, mat2: -1,
  489. torch.hsmm: lambda mat1, mat2: -1,
  490. torch.dsplit: lambda input, indices_or_sections: -1,
  491. torch.dstack: lambda tensors, out=None: -1,
  492. torch.linalg.eig: lambda input, out=None: -1,
  493. torch.linalg.eigvals: lambda input, out=None: -1,
  494. torch.linalg.eigh: lambda input, UPLO="L", out=None: -1,
  495. torch.linalg.eigvalsh: lambda input, UPLO="L", out=None: -1,
  496. torch.einsum: lambda equation, *operands: -1,
  497. torch.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False,
  498. sparse=False: -1),
  499. torch.embedding_bag: (lambda input, weight, offsets, max_norm=None, norm_type=2, scale_grad_by_freq=False,
  500. mode='mean', sparse=False, per_sample_weights=None, padding_idx=None: -1),
  501. torch.empty_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
  502. torch.eq: lambda input, other, out=None: -1,
  503. torch.equal: lambda input, other: -1,
  504. torch.erf: lambda input, out=None: -1,
  505. torch.erfc: lambda input, out=None: -1,
  506. torch.erfinv: lambda input, out=None: -1,
  507. torch.exp: lambda input, out=None: -1,
  508. torch.exp2: lambda input, out=None: -1,
  509. torch.expm1: lambda input, out=None: -1,
  510. torch.fake_quantize_per_channel_affine: lambda input, scale, zero_point, axis, quant_min, quant_max: -1,
  511. torch.fake_quantize_per_tensor_affine: lambda input, scale, zero_point, quant_min, quant_max: -1,
  512. torch.fused_moving_avg_obs_fake_quant: (lambda x, observer_on, fake_quant_on, averaging_const, running_min,
  513. running_max, scale, zero_point, quant_min, quant_max, ch_axis,
  514. per_row_fake_quant=False, symmetric_quant=False: -1),
  515. torch.fbgemm_linear_fp16_weight: lambda input, packed_weight, bias: -1,
  516. torch.fbgemm_linear_fp16_weight_fp32_activation: lambda input, packed_weight, bias: -1,
  517. torch.fbgemm_linear_int8_weight: lambda input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias: -1,
  518. torch.fbgemm_linear_int8_weight_fp32_activation: (lambda input, weight, packed, col_offsets, weight_scale,
  519. weight_zero_point, bias: -1),
  520. torch.fbgemm_linear_quantize_weight: lambda input: -1,
  521. torch.fbgemm_pack_gemm_matrix_fp16: lambda input: -1,
  522. torch.fbgemm_pack_quantized_matrix: lambda input, a, b: -1,
  523. torch.feature_alpha_dropout: lambda input, p, train: -1,
  524. torch.feature_dropout: lambda input, p, train: -1,
  525. torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1,
  526. torch.fft.ifft: lambda input, n=None, dim=-1, norm=None: -1,
  527. torch.fft.rfft: lambda input, n=None, dim=-1, norm=None: -1,
  528. torch.fft.irfft: lambda input, n=None, dim=-1, norm=None: -1,
  529. torch.fft.hfft: lambda input, n=None, dim=-1, norm=None: -1,
  530. torch.fft.ihfft: lambda input, n=None, dim=-1, norm=None: -1,
  531. torch.fft.hfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
  532. torch.fft.ihfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
  533. torch.fft.hfftn: lambda input, s=None, dim=-1, norm=None: -1,
  534. torch.fft.ihfftn: lambda input, s=None, dim=-1, norm=None: -1,
  535. torch.fft.fftn: lambda input, s=None, dim=None, norm=None: -1,
  536. torch.fft.ifftn: lambda input, s=None, dim=None, norm=None: -1,
  537. torch.fft.rfftn: lambda input, s=None, dim=None, norm=None: -1,
  538. torch.fft.irfftn: lambda input, s=None, dim=None, norm=None: -1,
  539. torch.fft.fft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
  540. torch.fft.ifft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
  541. torch.fft.rfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
  542. torch.fft.irfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
  543. torch.fft.fftshift: lambda input, dim=None: -1,
  544. torch.fft.ifftshift: lambda input, dim=None: -1,
  545. torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1,
  546. torch.fix: lambda input, out=None: -1,
  547. torch.flatten: lambda input, start_dim=0, end_dim=-1: -1,
  548. torch.flip: lambda input, dims: -1,
  549. torch.fliplr: lambda input: -1,
  550. torch.flipud: lambda input: -1,
  551. torch.frobenius_norm: lambda input, dim=None, keepdim=False, out=None: -1,
  552. torch.floor: lambda input, out=None: -1,
  553. torch.floor_divide: lambda input, other: -1,
  554. torch.float_power: lambda input, exponent, out=None: -1,
  555. torch.fmod: lambda input, other, out=None: -1,
  556. torch.frac: lambda input, out=None: -1,
  557. torch.frexp: lambda input, out=None: -1,
  558. torch.full_like: lambda input, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1,
  559. torch.lu_unpack: lambda LU_data, LU_pivots, unpack_data=True, unpack_pivots=True: -1,
  560. torch.gather: lambda input, dim, index, out=None, sparse_grad=False: -1,
  561. torch.gcd: lambda input, other, out=None: -1,
  562. torch.ge: lambda input, other, out=None: -1,
  563. torch.greater_equal: lambda input, other, out=None: -1,
  564. torch.geqrf: lambda input, out=None: -1,
  565. torch.i0: lambda input, out=None: -1,
  566. torch.inner: lambda input, other, out=None: -1,
  567. torch.outer: lambda input, vec2, out=None: -1,
  568. torch.ger: lambda input, vec2, out=None: -1, # alias for torch.outer
  569. torch.gradient: lambda input, spacing=None, dim=None, edge_order=1: -1,
  570. torch.grid_sampler: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
  571. torch.grid_sampler_2d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
  572. torch.grid_sampler_3d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
  573. torch.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05, cudnn_enabled=True: -1,
  574. torch.gru: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1,
  575. torch.gru_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
  576. torch.gt: lambda input, other, out=None: -1,
  577. torch.greater: lambda input, other, out=None: -1,
  578. torch.hardshrink: lambda input, lambd=0.5: -1,
  579. torch.heaviside: lambda input, values, out=None: -1,
  580. torch.hinge_embedding_loss: lambda input, target, margin=1.0, size_average=None, reduce=None, reduction='mean': -1,
  581. torch.histc: lambda input, bins=100, min=0, max=0, out=None: -1,
  582. torch.histogram: lambda input, bins=100, min=None, max=None, weight=None, density=False, out=None: -1,
  583. torch.histogramdd: lambda input, bins, range=None, weight=None, density=False: -1,
  584. torch.linalg.householder_product: lambda input, tau: -1,
  585. torch.hspmm: lambda mat1, mat2, out=None: -1,
  586. torch.hsplit: lambda input, indices_or_sections: -1,
  587. torch.hstack: lambda tensors, out=None: -1,
  588. torch.hypot: lambda input, other, out=None: -1,
  589. torch.igamma: lambda input, other, out=None: -1,
  590. torch.igammac: lambda input, other, out=None: -1,
  591. torch.imag: lambda input, out=None: -1,
  592. torch.index_add: lambda input, dim, index, source: -1,
  593. torch.index_copy: lambda input, dim, index, source: -1,
  594. torch.index_put: lambda input, indices, values, accumulate=False: -1,
  595. torch.index_select: lambda input, dim, index, out=None: -1,
  596. torch.index_fill: lambda input, dim, index, value: -1,
  597. torch.index_reduce: lambda input, dim, index, source, reduce, include_input=True: -1,
  598. torch.isfinite: lambda tensor: -1,
  599. torch.isin: lambda e, te, assume_unique=False, invert=False: -1,
  600. torch.isinf: lambda tensor: -1,
  601. torch.isreal: lambda tensor: -1,
  602. torch.isposinf: lambda input, out=None: -1,
  603. torch.isneginf: lambda input, out=None: -1,
  604. torch.instance_norm: (lambda input, running_mean, running_var, weight, bias, use_input_stats, momentum, eps,
  605. cudnn_enabled: -1),
  606. torch.int_repr: lambda input: -1,
  607. torch.inverse: lambda input, out=None: -1,
  608. torch.linalg.inv: lambda input, out=None: -1,
  609. torch.linalg.inv_ex: lambda input, check_errors=False, out=None: -1,
  610. torch.is_complex: lambda input: -1,
  611. torch.is_conj: lambda input: -1,
  612. torch.is_neg: lambda input: -1,
  613. torch.is_distributed: lambda input: -1,
  614. torch.is_inference: lambda input: -1,
  615. torch.is_floating_point: lambda input: -1,
  616. torch.is_nonzero: lambda input: -1,
  617. torch.is_same_size: lambda input, other: -1,
  618. torch.is_signed: lambda input: -1,
  619. torch.isclose: lambda input, other, rtol=1e-05, atol=1e-08, equal_nan=False: -1,
  620. torch.isnan: lambda input: -1,
  621. torch.istft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True,
  622. normalized=False, onesided=None, length=None, return_complex=False: -1),
  623. torch.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1,
  624. torch.kron: lambda input, other: -1,
  625. torch.kthvalue: lambda input, k, dim=None, keepdim=False, out=None: -1,
  626. torch.linalg.ldl_factor_ex: lambda input, hermitian=False, check_errors=False, out=None: -1,
  627. torch.linalg.ldl_factor: lambda input, hermitian=False, out=None: -1,
  628. torch.linalg.ldl_solve: lambda LD, pivots, B, hermitian=False, out=None: -1,
  629. torch.layer_norm: lambda input, normalized_shape, weight=None, bias=None, esp=1e-05, cudnn_enabled=True: -1,
  630. torch.lcm: lambda input, other, out=None: -1,
  631. torch.ldexp: lambda input, other, out=None: -1,
  632. torch.le: lambda input, other, out=None: -1,
  633. torch.less_equal: lambda input, other, out=None: -1,
  634. torch.lerp: lambda input, end, weight, out=None: -1,
  635. torch.lgamma: lambda input, out=None: -1,
  636. torch.lobpcg: lambda input, k=None, B=None, X=None, n=None, iK=None, niter=None, tol=None, largest=None, method=None,
  637. tracker=None, ortho_iparams=None, ortho_fparams=None, ortho_bparams=None: -1,
  638. torch.log: lambda input, out=None: -1,
  639. torch.log_softmax: lambda input, dim, dtype=None: -1,
  640. torch.log10: lambda input, out=None: -1,
  641. torch.log1p: lambda input, out=None: -1,
  642. torch.log2: lambda input, out=None: -1,
  643. torch.logaddexp: lambda input, other, out=None: -1,
  644. torch.logaddexp2: lambda input, other, out=None: -1,
  645. torch.logdet: lambda input: -1,
  646. torch.xlogy: lambda x, y, out=None: -1,
  647. torch.logical_and: lambda input, other, out=None: -1,
  648. torch.logical_not: lambda input, out=None: -1,
  649. torch.logical_or: lambda input, other, out=None: -1,
  650. torch.logical_xor: lambda input, other, out=None: -1,
  651. torch.logsumexp: lambda input, names, keepdim=False, out=None: -1,
  652. torch.logit: lambda input, eps=None: -1,
  653. torch.logsumexp: lambda input, names, keepdim=False, out=None: -1,
  654. torch.lstm: lambda data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional: -1,
  655. torch.lstm_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
  656. torch.lt: lambda input, other, out=None: -1,
  657. torch.less: lambda input, other, out=None: -1,
  658. torch.lu: lambda A, pivot=True, get_infos=False, out=None: -1,
  659. torch.lu_solve: lambda b, LU_data, LU_pivots, out=None: -1,
  660. torch.margin_ranking_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1, # type: ignore[attr-defined] # noqa: B950
  661. torch.masked_fill: lambda input, mask, value: -1,
  662. torch.masked_scatter: lambda input, mask, source: -1,
  663. torch.masked_select: lambda input, mask, out=None: -1,
  664. torch.matmul: lambda input, other, out=None: -1,
  665. torch.linalg.lu: lambda input, pivot=True, out=None: -1,
  666. torch.linalg.lu_factor: lambda input, pivot=True, out=None: -1,
  667. torch.linalg.lu_factor_ex: lambda input, pivot=True, check_errors=False, out=None: -1,
  668. torch.linalg.lu_solve: lambda LU, pivots, B, left=True, adjoint=False, out=None: -1,
  669. torch.linalg.matmul: lambda input, other, out=None: -1, # alias for torch.matmul
  670. torch.matrix_power: lambda input, n: -1,
  671. torch.linalg.matrix_power: lambda input, n, out=None: -1,
  672. torch.linalg.matrix_rank: lambda input, tol=None, hermitian=False: -1,
  673. torch.linalg.multi_dot: lambda tensors, out=None: -1,
  674. torch.matrix_exp: lambda input: -1,
  675. torch.linalg.matrix_exp: lambda input: -1,
  676. torch.max: lambda input, out=None: -1,
  677. torch.maximum: lambda input, other, out=None: -1,
  678. torch.fmax: lambda input, other, out=None: -1,
  679. torch.max_pool1d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
  680. torch.max_pool2d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
  681. torch.max_pool3d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
  682. torch.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
  683. return_indices=False, ceil_mode=False: -1),
  684. torch.mean: lambda input, dim=None: -1,
  685. torch.nanmean: lambda input, dim=None, keepdim=False, dtype=None, out=None: -1,
  686. torch.median: lambda input, dim=None: -1,
  687. torch.nanmedian: lambda input, dim=None: -1,
  688. torch.meshgrid: lambda *tensors, **kwargs: -1,
  689. torch.min: lambda input, out=None: -1,
  690. torch.minimum: lambda input, other, out=None: -1,
  691. torch.fmin: lambda input, other, out=None: -1,
  692. torch.miopen_batch_norm: (lambda input, weight, bias, running_mean, running_var, training,
  693. exponential_average_factor, epsilon: -1),
  694. torch.miopen_convolution: lambda input, weight, bias, padding, stride, dilation, groups, benchmark, deterministic: -1,
  695. torch.miopen_convolution_add_relu: lambda input, weight, z, alpha, bias, stride, padding, dilation, groups: -1,
  696. torch.miopen_convolution_relu: lambda input, weight, bias, stride, padding, dilation, groups: -1,
  697. torch.miopen_convolution_transpose: (lambda input, weight, bias, padding, output_padding, stride, dilation,
  698. groups, benchmark, deterministic: -1),
  699. torch.miopen_depthwise_convolution: (lambda input, weight, bias, padding, stride, dilation, groups, benchmark,
  700. deterministic: -1),
  701. torch.miopen_rnn: (lambda input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first,
  702. dropout, train, bidirectional, batch_sizes, dropout_state: -1),
  703. torch.mm: lambda input, mat2, out=None: -1,
  704. torch.mode: lambda input, dim=-1, keepdim=False, out=None: -1,
  705. torch.movedim: lambda input, source, destination: -1,
  706. torch.moveaxis: lambda input, source, destination: -1,
  707. torch.msort: lambda input, descending=False, out=None: -1,
  708. torch.mul: lambda input, other, out=None: -1,
  709. torch.multiply: lambda input, other, out=None: -1,
  710. torch.multinomial: lambda input, num_samples, replacement=False, out=None: -1,
  711. torch.mv: lambda input, vec, out=None: -1,
  712. torch.mvlgamma: lambda input, p: -1,
  713. torch.narrow: lambda input, dim, start, length: -1,
  714. torch.narrow_copy: lambda input, dim, start, length: -1,
  715. torch.nan_to_num: lambda input, nan=0.0, posinf=None, neginf=None, out=None: -1,
  716. torch.native_batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps: -1,
  717. torch._native_batch_norm_legit: lambda input, weight, bias, training, momentum, eps: -1,
  718. torch.native_dropout: lambda input, p, train: -1,
  719. torch.native_layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1,
  720. torch.native_group_norm: lambda input, weight, bias, N, C, HxW, group, eps: -1,
  721. torch.native_norm: lambda input, p=2: -1,
  722. torch.native_norm: lambda input, p=2: -1,
  723. torch.native_norm: lambda input, p=2, dim=None, keepdim=False, dtype=None: -1,
  724. torch.native_channel_shuffle: lambda input, groups : -1,
  725. torch.ne: lambda input, other, out=None: -1,
  726. torch.not_equal: lambda input, other, out=None: -1,
  727. torch.neg: lambda input, out=None: -1,
  728. torch.negative: lambda input, out=None: -1,
  729. torch.nextafter: lambda input, other, out=None: -1,
  730. torch.nn.functional.adaptive_avg_pool2d: lambda input, output_size: -1,
  731. torch.nn.functional.adaptive_avg_pool3d: lambda input, output_size: -1,
  732. torch.nn.functional.adaptive_max_pool1d: lambda input, output_size, return_indices=False: -1,
  733. torch.nn.functional.adaptive_max_pool1d_with_indices: lambda input, output_size, return_indices=False: -1,
  734. torch.nn.functional.adaptive_max_pool2d: lambda input, output_size, return_indices=False: -1,
  735. torch.nn.functional.adaptive_max_pool2d_with_indices: lambda input, output_size, return_indices=False: -1,
  736. torch.nn.functional.adaptive_max_pool3d: lambda input, output_size, return_indices=False: -1,
  737. torch.nn.functional.adaptive_max_pool3d_with_indices: lambda input, output_size, return_indices=False: -1,
  738. torch.nn.functional.affine_grid: lambda theta, size, align_corners=None: -1,
  739. torch.nn.functional.alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
  740. torch.nn.functional.avg_pool2d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False,
  741. count_include_pad=True, divisor_override=None: -1),
  742. torch.nn.functional.avg_pool3d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False,
  743. count_include_pad=True, divisor_override=None: -1),
  744. torch.nn.functional.batch_norm: (lambda input, running_mean, running_var, weight=None, bias=None, training=False,
  745. momentum=0.1, eps=1e-05: -1),
  746. torch.nn.functional.bilinear: lambda input1, input2, weight, bias=None: -1,
  747. torch.nn.functional.binary_cross_entropy: (lambda input, target, weight=None, size_average=None, reduce=None,
  748. reduction="mean": -1),
  749. torch.nn.functional.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None,
  750. reduce=None, reduction="mean", pos_weight=None: -1),
  751. torch.nn.functional.celu: lambda input, alpha=1.0, inplace=False: -1,
  752. torch.nn.functional.cosine_embedding_loss: (lambda input1, input2, target, margin=0, size_average=None,
  753. reduce=None, reduction='mean': -1),
  754. torch.nn.functional.cross_entropy: (lambda input, target, weight=None, size_average=None, ignore_index=-100,
  755. reduce=None, reduction="mean", label_smoothing=0.0: -1),
  756. torch.nn.functional.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0,
  757. reduction='mean', zero_infinity=False: -1),
  758. torch.nn.functional.dropout: lambda input, p=0.5, training=True, inplace=False: -1,
  759. torch.nn.functional.dropout1d: lambda input, p=0.5, training=True, inplace=False: -1,
  760. torch.nn.functional.dropout2d: lambda input, p=0.5, training=True, inplace=False: -1,
  761. torch.nn.functional.dropout3d: lambda input, p=0.5, training=True, inplace=False: -1,
  762. torch.nn.functional.elu: lambda input, alpha=1.0, inplace=False: -1,
  763. torch.nn.functional.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0,
  764. scale_grad_by_freq=False, sparse=False: -1),
  765. torch.nn.functional.embedding_bag: (lambda input, weight, offsets=None, max_norm=None, norm_type=2,
  766. scale_grad_by_freq=False, mode='mean', sparse=False, per_sample_weights=None,
  767. include_last_offset=False, padding_idx=None: -1),
  768. torch.nn.functional.feature_alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
  769. torch.nn.functional.fold: lambda input, output_size, kernel_size, dilation=1, padding=0, stride=1: -1,
  770. torch.nn.functional.fractional_max_pool2d: (lambda input, kernel_size, output_size=None, output_ratio=None,
  771. return_indices=False, _random_samples=None: -1),
  772. torch.nn.functional.fractional_max_pool2d_with_indices: (
  773. lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False,
  774. _random_samples=None: -1),
  775. torch.nn.functional.fractional_max_pool3d: (lambda input, kernel_size, output_size=None, output_ratio=None,
  776. return_indices=False, _random_samples=None: -1),
  777. torch.nn.functional.fractional_max_pool3d_with_indices: (
  778. lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False,
  779. _random_samples=None: -1),
  780. torch.nn.functional.gaussian_nll_loss: lambda input, target, var, full=False, eps=1e-06, reduction='mean': -1,
  781. torch.nn.functional.gelu: lambda input, approximate='none': -1,
  782. torch.nn.functional.glu: lambda input, dim=-1: -1,
  783. torch.nn.functional.grid_sample: lambda input, grid, mode='bilinear', padding_mode='zeros', align_corners=None: -1,
  784. torch.nn.functional.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05: -1,
  785. torch.nn.functional.gumbel_softmax: lambda logits, tau=1, hard=False, eps=1e-10, dim=-1: -1,
  786. torch.nn.functional.hardshrink: lambda input, lambd=0.5: -1,
  787. torch.nn.functional.hardtanh: lambda input, min_val=-1., max_val=1., inplace=False: -1,
  788. torch.nn.functional.hinge_embedding_loss: (lambda input, target, margin=1.0, size_average=None, reduce=None,
  789. reduction='mean': -1),
  790. torch.nn.functional.instance_norm: (lambda input, running_mean=None, running_var=None, weight=None, bias=None,
  791. use_input_stats=True, momentum=0.1, eps=1e-05: -1),
  792. torch.nn.functional.interpolate: (lambda input, size=None, scale_factor=None, mode='nearest', align_corners=None,
  793. recompute_scale_factor=None, antialias=False: -1),
  794. torch.nn.functional.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1,
  795. torch.nn.functional.l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
  796. torch.nn.functional.layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1,
  797. torch.nn.functional.leaky_relu: lambda input, negative_slope=0.01, inplace=False: -1,
  798. torch.nn.functional.linear: lambda input, weight, bias=None: -1,
  799. torch.nn.functional.local_response_norm: lambda input, size, alpha=0.0001, beta=0.75, k=1.0: -1,
  800. torch.nn.functional.log_softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
  801. torch.nn.functional.logsigmoid: lambda input: -1,
  802. torch.nn.functional.lp_pool1d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1,
  803. torch.nn.functional.lp_pool2d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1,
  804. torch.nn.functional.margin_ranking_loss: (lambda input1, input2, target, margin=0, size_average=None,
  805. reduce=None, reduction='mean': -1),
  806. torch.nn.functional.max_pool1d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
  807. ceil_mode=False, return_indices=False: -1),
  808. torch.nn.functional.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
  809. return_indices=False, ceil_mode=False: -1),
  810. torch.nn.functional.max_pool2d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
  811. ceil_mode=False, return_indices=False: -1),
  812. torch.nn.functional.max_pool2d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
  813. return_indices=False, ceil_mode=False: -1),
  814. torch.nn.functional.max_pool3d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
  815. return_indices=False, ceil_mode=False: -1),
  816. torch.nn.functional.max_pool3d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
  817. return_indices=False, ceil_mode=False: -1),
  818. torch.nn.functional.max_unpool1d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
  819. torch.nn.functional.max_unpool2d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
  820. torch.nn.functional.max_unpool3d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
  821. torch.nn.functional.mse_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
  822. torch.nn.functional.multi_head_attention_forward: (
  823. lambda query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v,
  824. add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training=True, key_padding_mask=None,
  825. need_weights=True, attn_mask=None, use_separate_proj_weight=False, q_proj_weight=None, k_proj_weight=None,
  826. v_proj_weight=None, static_k=None, static_v=None, average_attn_weights=None, is_causal=False: -1),
  827. torch.nn.functional.multi_margin_loss: (lambda input, target, p=1, margin=1.0, weight=None, size_average=None,
  828. reduce=None, reduction='mean': -1),
  829. torch.nn.functional.multilabel_margin_loss: (lambda input, target, size_average=None, reduce=None,
  830. reduction='mean': -1),
  831. torch.nn.functional.multilabel_soft_margin_loss: (lambda input, target, weight=None, size_average=None,
  832. reduce=None, reduction='mean': -1),
  833. torch.nn.functional.nll_loss: (lambda input, target, weight=None, size_average=None, ignore_index=-100,
  834. reduce=None, reduction='mean': -1),
  835. torch.nn.functional.normalize: lambda input, p=2, dim=1, eps=1e-12, out=None: -1,
  836. torch.nn.functional.one_hot: lambda tensor, num_classes=-1: -1,
  837. torch.nn.functional.pad: lambda input, pad, mode='constant', value=0: -1,
  838. torch.nn.functional.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
  839. torch.nn.functional.poisson_nll_loss: (lambda input, target, log_input=True, full=False, size_average=None,
  840. eps=1e-08, reduce=None, reduction='mean': -1),
  841. torch.nn.functional.prelu: lambda input, weight: -1,
  842. torch.nn.functional.relu: lambda input, inplace=False: -1,
  843. torch.nn.functional.relu6: lambda input, inplace=False: -1,
  844. torch.nn.functional.rrelu: lambda input, lower=0.125, upper=0.3333333333333333, training=False, inplace=False: -1,
  845. torch.nn.functional.selu: lambda input, inplace=False: -1,
  846. torch.nn.functional.silu: lambda input, inplace=False: -1,
  847. torch.nn.functional.mish: lambda input, inplace=False: -1,
  848. torch.nn.functional.scaled_dot_product_attention: lambda query, key, value, attn_mask=None, dropout_p=0.0: -1,
  849. torch.nn.functional.smooth_l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean', beta=1.: -1,
  850. torch.nn.functional.huber_loss: lambda input, target, reduction='mean', delta=1.: -1,
  851. torch.nn.functional.soft_margin_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
  852. torch.nn.functional.softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
  853. torch.nn.functional.softmin: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
  854. torch.nn.functional.softplus: lambda input, beta=1, threshold=20: -1,
  855. torch.nn.functional.softshrink: lambda input, lambd=0.5: -1,
  856. torch.nn.functional.softsign: lambda input: -1,
  857. torch.nn.functional.tanhshrink: lambda input: -1,
  858. torch.nn.functional.threshold: lambda input, threshold, value, inplace=False: -1,
  859. torch.nn.functional.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06,
  860. swap=False, size_average=None, reduce=None, reduction='mean': -1),
  861. torch.nn.functional.triplet_margin_with_distance_loss: (lambda anchor, positive, negative, *,
  862. distance_function=None, margin=1.0,
  863. swap=False, reduction='mean': -1),
  864. torch.nn.functional.unfold: lambda input, kernel_size, dilation=1, padding=0, stride=1: -1,
  865. torch.nn.init.uniform_: lambda tensor, a=0., b=1.: -1,
  866. torch.nn.init.constant_: lambda tensor, val: -1,
  867. torch.nn.init.normal_: lambda tensor, mean=0., std=1.: -1,
  868. torch.nn.init.constant_: lambda tensor, val: -1,
  869. torch.nn.init.kaiming_uniform_: lambda tensor, a=0, mode='fan_in', nonlinearity='leaky_relu': -1,
  870. torch.nonzero: lambda input, as_tuple=False: -1,
  871. torch.argwhere: lambda input: -1,
  872. torch.norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1,
  873. torch.linalg.norm: lambda input, ord=None, dim=None, keepdim=False, out=None, dtype=None: -1,
  874. torch.linalg.vector_norm: lambda input, ord=2, dim=None, keepdim=False, out=None, dtype=None: -1,
  875. torch.linalg.matrix_norm: lambda input, ord='fro', dim=(-2, -1), keepdim=False, out=None, dtype=None: -1,
  876. torch.norm_except_dim: lambda v, pow=2, dim=0: -1,
  877. torch.nuclear_norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1,
  878. torch.numel: lambda input: -1,
  879. torch.orgqr: lambda input, tau: -1,
  880. torch.ormqr: lambda input, input2, input3, left=True, transpose=False: -1,
  881. torch.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
  882. torch.permute: lambda self, dim: -1,
  883. torch.pca_lowrank: lambda input, q=None, center=True, niter=2: -1,
  884. torch.pdist: lambda input, p=2: -1,
  885. torch.pinverse: lambda input, rcond=1e-15: -1,
  886. torch.linalg.pinv: lambda input, rcond=1e-15, hermitian=False: -1,
  887. torch.pixel_shuffle: lambda input, upscale_factor: -1,
  888. torch.pixel_unshuffle: lambda input, downscale_factor: -1,
  889. torch.poisson: lambda input, generator=None: -1,
  890. torch.poisson_nll_loss: lambda input, target, log_input, full, eps, reduction: -1,
  891. torch.polygamma: lambda input, n, out=None: -1,
  892. torch.positive: lambda input, out=None: -1,
  893. torch.prelu: lambda input, weight: -1,
  894. torch.ones_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
  895. torch.pow: lambda input, exponent, out=None: -1,
  896. torch.prod: lambda input, dtype=None: -1,
  897. torch.put: lambda input, index, source, accumulate=False: -1,
  898. torch.q_per_channel_axis: lambda input: -1,
  899. torch.q_per_channel_scales: lambda input: -1,
  900. torch.q_per_channel_zero_points: lambda input: -1,
  901. torch.q_scale: lambda input: -1,
  902. torch.q_zero_point: lambda input: -1,
  903. torch.qr: lambda input, some=True, out=None: -1,
  904. torch.linalg.qr: lambda input, mode='reduced', out=None: -1,
  905. torch.quantile: lambda input, q, dim=None, keepdim=False, interpolation='linear', out=None: -1,
  906. torch.nanquantile: lambda input, q, dim=None, keepdim=False, interpolation='linear', out=None: -1,
  907. torch.quantize_per_channel: lambda input, scales, zero_points, axis, dtype: -1,
  908. torch.quantize_per_tensor: lambda input, scale, zero_point, dtype: -1,
  909. torch.quantize_per_tensor_dynamic: lambda input, dtype, reduce_range: -1,
  910. torch.quantized_batch_norm: lambda input, weight, bias, mean, var, eps, output_scale, output_zero_point: -1,
  911. torch.quantized_gru_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
  912. col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
  913. torch.quantized_lstm_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
  914. col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
  915. torch.quantized_max_pool1d: (lambda input, kernel_size, stride=tuple(), padding=(0,),
  916. dilation=(1,), ceil_mode=False: -1),
  917. torch.quantized_max_pool2d: (lambda input, kernel_size, stride=tuple(), padding=(0, 0),
  918. dilation=(1, 1), ceil_mode=False: -1),
  919. torch.quantized_rnn_relu_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
  920. col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
  921. torch.quantized_rnn_tanh_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
  922. col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
  923. torch.rad2deg: lambda input, out=None: -1,
  924. torch.rand_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
  925. torch.randint_like: lambda input, high, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1,
  926. torch.randn_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
  927. torch.ravel: lambda input: -1,
  928. torch.real: lambda input, out=None: -1,
  929. torch.vdot: lambda input, other, out=None: -1,
  930. torch.linalg.vecdot: lambda input, other, dim=-1, out=None: -1,
  931. torch.view_as_real: lambda input: -1,
  932. torch.view_as_complex: lambda input: -1,
  933. torch.reciprocal: lambda input, out=None: -1,
  934. torch.relu: lambda input, inplace=False: -1,
  935. torch.remainder: lambda input, other, out=None: -1,
  936. torch.renorm: lambda input, p, dim, maxnorm, out=None: -1,
  937. torch.repeat_interleave: lambda input, dim=None: -1,
  938. torch.reshape: lambda input, shape: -1,
  939. torch.rnn_relu: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1,
  940. torch.rnn_relu_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
  941. torch.rnn_tanh: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1,
  942. torch.rnn_tanh_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
  943. torch.roll: lambda input, shifts, dims=None: -1,
  944. torch.rot90: lambda input, k=1, dims=(0, 1): -1,
  945. torch.round: lambda input, out=None: -1,
  946. torch.row_stack: lambda tensors, out=None: -1, # alias for torch.vstack
  947. torch._rowwise_prune: (lambda weight, mask, compressed_indices_dtype: -1),
  948. torch.rrelu: lambda input, lower=1. / 8, upper=1. / 3, training=False, inplace=False: -1,
  949. torch.rsqrt: lambda input, out=None: -1,
  950. torch.rsub: lambda input, other, alpha=1: -1,
  951. torch.saddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
  952. torch.scatter: lambda input, dim, index, src: -1,
  953. torch.scatter_add: lambda input, dim, index, src: -1,
  954. torch.scatter_reduce: lambda input, dim, index, src, reduce, include_self=True: -1,
  955. torch.searchsorted: lambda sorted_sequence, input, out_int32=False, right=False, out=None: -1,
  956. torch._segment_reduce: lambda data, reduce="max", lengths=None, indices=None, offsets=None, axis=0, unsafe=False: -1,
  957. torch.select: lambda input, dim, index: -1,
  958. torch.select_scatter: lambda input, src, dim, index: -1,
  959. torch.slice_scatter: lambda input, src, dim=0, start=None, end=None, step=1: -1,
  960. torch.selu: lambda input, inplace=False: -1,
  961. torch.sigmoid: lambda input, out=None: -1,
  962. torch.sign: lambda input, out=None: -1,
  963. torch.signbit: lambda input, out=None: -1,
  964. torch.sgn: lambda input, out=None: -1,
  965. torch.sin: lambda input, out=None: -1,
  966. torch.sinc: lambda input, out=None: -1,
  967. torch.sinh: lambda input, out=None: -1,
  968. torch.slogdet: lambda input: -1,
  969. torch.linalg.slogdet: lambda input: -1,
  970. torch.smm: lambda input, mat2: -1,
  971. torch.spmm: lambda input, mat2: -1,
  972. torch.softmax: lambda input, dim, dtype=None: -1,
  973. torch.linalg.solve: lambda A, B, left=True, out=None: -1,
  974. torch.linalg.solve_ex: lambda A, B, left=True, check_errors=False, out=None: -1,
  975. torch.sort: lambda input, dim=-1, descending=False, *, stable=False, out=None: -1,
  976. torch.split: lambda tensor, split_size_or_sections, dim=0: -1,
  977. torch.split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
  978. torch.sqrt: lambda input, out=None: -1,
  979. torch.square: lambda input, out=None: -1,
  980. torch.squeeze: lambda input, dim=None, out=None: -1,
  981. torch.sspaddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
  982. torch.stack: lambda tensors, dim=0, out=None: -1,
  983. torch.std: lambda input, dim=None: -1,
  984. torch.std_mean: lambda input, dim=None: -1,
  985. torch.stft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True,
  986. pad_mode='reflect', normalized=False, onesided=True, return_complex=None: -1),
  987. torch.sub: lambda input, other, out=None: -1,
  988. torch.subtract: lambda input, other, out=None: -1,
  989. torch.sum: lambda input, dim=None: -1,
  990. torch.nansum: lambda input, dim=None: -1,
  991. torch.svd: lambda input, some=True, compute_uv=True, out=None: -1,
  992. torch.svd_lowrank: lambda input, q=6, niter=2, M=None: -1,
  993. torch.linalg.svd: lambda input, full_matrices=True, out=None: -1,
  994. torch.linalg.svdvals: lambda input, out=None: -1,
  995. torch.swapaxes: lambda input, dim0, dim1: -1,
  996. torch.swapdims: lambda input, axis0, axis1: -1,
  997. torch.special.airy_ai: lambda input: -1,
  998. torch.special.bessel_j0: lambda input: -1,
  999. torch.special.bessel_j1: lambda input: -1,
  1000. torch.special.bessel_y0: lambda input: -1,
  1001. torch.special.bessel_y1: lambda input: -1,
  1002. torch.special.chebyshev_polynomial_t: lambda input, n, out=None: -1,
  1003. torch.special.chebyshev_polynomial_u: lambda input, n, out=None: -1,
  1004. torch.special.chebyshev_polynomial_v: lambda input, n, out=None: -1,
  1005. torch.special.chebyshev_polynomial_w: lambda input, n, out=None: -1,
  1006. torch.special.digamma: lambda input: -1,
  1007. torch.special.entr: lambda input: -1,
  1008. torch.special.erf: lambda input: -1,
  1009. torch.special.erfc: lambda input: -1,
  1010. torch.special.erfcx: lambda input: -1,
  1011. torch.special.erfinv: lambda input: -1,
  1012. torch.special.exp2: lambda input: -1,
  1013. torch.special.expit: lambda input: -1,
  1014. torch.special.expm1: lambda input: -1,
  1015. torch.special.gammainc: lambda input, other, out=None: -1,
  1016. torch.special.gammaincc: lambda input, other, out=None: -1,
  1017. torch.special.gammaln: lambda input: -1,
  1018. torch.special.hermite_polynomial_h: lambda input, n, out=None: -1,
  1019. torch.special.hermite_polynomial_he: lambda input, n, out=None: -1,
  1020. torch.special.i0: lambda input: -1,
  1021. torch.special.i0e: lambda input: -1,
  1022. torch.special.i1: lambda input: -1,
  1023. torch.special.i1e: lambda input: -1,
  1024. torch.special.laguerre_polynomial_l: lambda input, n, out=None: -1,
  1025. torch.special.legendre_polynomial_p: lambda input, n, out=None: -1,
  1026. torch.special.log1p: lambda input: -1,
  1027. torch.special.log_ndtr: lambda input: -1,
  1028. torch.special.log_softmax: lambda input, dim, dtype=None: -1,
  1029. torch.special.logit: lambda input: -1,
  1030. torch.special.logsumexp: lambda input, dim, keepdim=False, out=None: -1,
  1031. torch.special.modified_bessel_i0: lambda input: -1,
  1032. torch.special.modified_bessel_i1: lambda input: -1,
  1033. torch.special.modified_bessel_k0: lambda input: -1,
  1034. torch.special.modified_bessel_k1: lambda input: -1,
  1035. torch.special.multigammaln: lambda input, p: -1,
  1036. torch.special.ndtr: lambda input: -1,
  1037. torch.special.ndtri: lambda input: -1,
  1038. torch.special.polygamma: lambda input, n, out=None: -1,
  1039. torch.special.psi: lambda input: -1,
  1040. torch.special.round: lambda input: -1,
  1041. torch.special.scaled_modified_bessel_k0: lambda input: -1,
  1042. torch.special.scaled_modified_bessel_k1: lambda input: -1,
  1043. torch.special.shifted_chebyshev_polynomial_t: lambda input, n, out=None: -1,
  1044. torch.special.shifted_chebyshev_polynomial_u: lambda input, n, out=None: -1,
  1045. torch.special.shifted_chebyshev_polynomial_v: lambda input, n, out=None: -1,
  1046. torch.special.shifted_chebyshev_polynomial_w: lambda input, n, out=None: -1,
  1047. torch.special.sinc: lambda input: -1,
  1048. torch.special.softmax: lambda input, dim, dtype=None: -1,
  1049. torch.special.spherical_bessel_j0: lambda input: -1,
  1050. torch.special.xlog1py: lambda input, other, out=None: -1,
  1051. torch.special.xlogy: lambda input, other, out=None: -1,
  1052. torch.special.zeta: lambda self, other, out=None: -1,
  1053. torch.t: lambda input: -1,
  1054. torch.take: lambda input, index: -1,
  1055. torch.take_along_dim: lambda input, indices, dim=None, out=None: -1,
  1056. torch.tan: lambda input, out=None: -1,
  1057. torch.tanh: lambda input, out=None: -1,
  1058. torch.linalg.tensorinv: lambda a, ind=2: -1,
  1059. torch.linalg.tensorsolve: lambda a, b, dims=None: -1,
  1060. torch.tensordot: lambda a, b, dims=2, out=None: -1,
  1061. torch.tensor_split: lambda input, indices_or_sections, dim=0: -1,
  1062. torch.threshold: lambda input, threshold, value, inplace=False: -1,
  1063. torch.tile: lambda input, dims: -1,
  1064. torch.topk: lambda input, k, dim=-1, descending=False, out=None: -1,
  1065. torch.trace: lambda input: -1,
  1066. torch.transpose: lambda input, dim0, dim1: -1,
  1067. torch.trapz: lambda y, x=None, dim=-1: -1,
  1068. torch.trapezoid: lambda y, x=None, dim=-1: -1,
  1069. torch.triangular_solve: lambda input, A, upper=True, transpose=False, unitriangular=False: -1,
  1070. torch.linalg.solve_triangular: lambda input, B, upper, left=True, unitriangular=False: -1,
  1071. torch.tril: lambda input, diagonal=0, out=None: -1,
  1072. torch.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06, swap=False,
  1073. size_average=None, reduce=None, reduction='mean': -1),
  1074. torch.triu: lambda input, diagonal=0, out=None: -1,
  1075. torch.true_divide: lambda input, other: -1,
  1076. torch.trunc: lambda input, out=None: -1,
  1077. torch.unbind: lambda input, dim=0: -1,
  1078. torch.unflatten: lambda input, dim, sizes, names: -1,
  1079. torch.unique: lambda input, sorted=True, return_inverse=False, return_counts=False, dim=None: -1,
  1080. torch.unique_consecutive: lambda input, return_inverse=False, return_counts=False, dim=None: -1,
  1081. torch.unsafe_chunk: lambda input, chunks, dim=0: -1,
  1082. torch.unsafe_split: lambda tensor, split_size_or_sections, dim=0: -1,
  1083. torch.unsafe_split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
  1084. torch.unsqueeze: lambda input, dim, out=None: -1,
  1085. torch.linalg.vander: lambda x, N=None: -1,
  1086. torch.var: lambda input, dim=None: -1,
  1087. torch.var_mean: lambda input, dim=None: -1,
  1088. torch.vsplit: lambda input, indices_or_sections: -1,
  1089. torch.vstack: lambda tensors, out=None: -1,
  1090. torch.where: lambda condition, x=None, y=None: -1,
  1091. torch.zeros_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
  1092. torch._fw_primal_copy: lambda self, level: -1,
  1093. torch._make_dual_copy: lambda primal, tangent, level: -1,
  1094. torch.view_as_real_copy: lambda self: -1,
  1095. torch.view_as_complex_copy: lambda self: -1,
  1096. torch._conj_copy: lambda self: -1,
  1097. torch._neg_view_copy: lambda self: -1,
  1098. torch.as_strided_copy: lambda self, size, stride, storage_offset=None: -1,
  1099. torch._sparse_broadcast_to_copy: lambda self, size: -1,
  1100. torch.diagonal_copy: lambda self, offset=0, dim1=0, dim2=1: -1,
  1101. torch.expand_copy: lambda self, size, *, implicit=False: -1,
  1102. torch.narrow_copy: lambda self, dim, start, length: -1,
  1103. torch.permute_copy: lambda self, dims: -1,
  1104. torch._reshape_alias_copy: lambda self, size, stride: -1,
  1105. torch.select_copy: lambda self, dim, index: -1,
  1106. torch.detach_copy: lambda self: -1,
  1107. torch.slice_copy: lambda self, dim=0, start=None, end=None, step=1: -1,
  1108. torch.split_copy: lambda self, split_size, dim=0: -1,
  1109. torch.split_with_sizes_copy: lambda self, split_sizes, dim=0: -1,
  1110. torch.squeeze_copy: lambda self: -1,
  1111. torch.squeeze_copy: lambda self, dim: -1,
  1112. torch.t_copy: lambda self: -1,
  1113. torch.transpose_copy: lambda self, dim0, dim1: -1,
  1114. torch.unsqueeze_copy: lambda self, dim: -1,
  1115. torch._indices_copy: lambda self: -1,
  1116. torch._values_copy: lambda self: -1,
  1117. torch.indices_copy: lambda self: -1,
  1118. torch.values_copy: lambda self: -1,
  1119. torch.crow_indices_copy: lambda self: -1,
  1120. torch.col_indices_copy: lambda self: -1,
  1121. torch.ccol_indices_copy: lambda self: -1,
  1122. torch.row_indices_copy: lambda self: -1,
  1123. torch.unbind_copy: lambda self, dim=0: -1,
  1124. torch.view_copy: lambda self, size: -1,
  1125. torch.view_copy: lambda self, dtype: -1,
  1126. torch.unfold_copy: lambda self, dimension, size, step: -1,
  1127. torch.alias_copy: lambda self: -1,
  1128. Tensor.__floordiv__: lambda self, other: -1,
  1129. Tensor.__rfloordiv__: lambda self, other: -1,
  1130. Tensor.__ifloordiv__: lambda self, other: -1,
  1131. Tensor.__truediv__: lambda self, other: -1,
  1132. Tensor.__rtruediv__: lambda self, other: -1,
  1133. Tensor.__itruediv__: lambda self, other: -1,
  1134. Tensor.__lshift__: lambda self, other: -1,
  1135. Tensor.__rlshift__: lambda self, other: -1,
  1136. Tensor.__ilshift__: lambda self, other: -1,
  1137. Tensor.__rshift__: lambda self, other: -1,
  1138. Tensor.__rrshift__: lambda self, other: -1,
  1139. Tensor.__irshift__: lambda self, other: -1,
  1140. Tensor.__and__: lambda self, other: -1,
  1141. Tensor.__or__: lambda self, other: -1,
  1142. Tensor.__xor__: lambda self, other: -1,
  1143. Tensor.__float__: lambda self: -1,
  1144. Tensor.__complex__: lambda self: -1,
  1145. Tensor.__array__: lambda self, dtype: -1,
  1146. Tensor.__bool__: lambda self: -1,
  1147. Tensor.__contains__: lambda self, other: -1,
  1148. Tensor.__neg__: lambda self: -1,
  1149. Tensor.__invert__: lambda self: -1,
  1150. Tensor.__mod__: lambda self, other: -1,
  1151. Tensor.__rmod__: lambda self, other: -1,
  1152. Tensor.__imod__: lambda self, other: -1,
  1153. Tensor.__array_wrap__: lambda self, array: -1,
  1154. Tensor.__getitem__: lambda self, idx: -1,
  1155. Tensor.__deepcopy__: lambda self, memo: -1,
  1156. Tensor.__int__: lambda self: -1,
  1157. Tensor.__long__: lambda self: -1,
  1158. Tensor.__index__: lambda self: -1,
  1159. Tensor.__len__: lambda self: -1,
  1160. Tensor.__format__: lambda self, format_spec: -1,
  1161. Tensor.__reduce_ex__: lambda self, proto: -1,
  1162. Tensor.__reversed__: lambda self: -1,
  1163. Tensor.__repr__: lambda self, *, tensor_contents=None: -1,
  1164. Tensor.__setitem__: lambda self, k, v: -1,
  1165. Tensor.__setstate__: lambda self, d: -1,
  1166. Tensor.T.__get__: lambda self: -1,
  1167. Tensor.H.__get__: lambda self: -1,
  1168. Tensor.mT.__get__: lambda self: -1,
  1169. Tensor.mH.__get__: lambda self: -1,
  1170. Tensor._backward_hooks.__get__: lambda self: -1,
  1171. Tensor._base.__get__: lambda self: -1,
  1172. Tensor._cdata.__get__: lambda self: -1,
  1173. Tensor.grad.__get__: lambda self: -1,
  1174. Tensor._grad.__get__: lambda self: -1,
  1175. Tensor._grad_fn.__get__: lambda self: -1,
  1176. Tensor.grad_fn.__get__: lambda self: -1,
  1177. Tensor._version.__get__: lambda self: -1,
  1178. Tensor._autocast_to_reduced_precision: lambda self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype: -1,
  1179. Tensor._autocast_to_full_precision: lambda self, cuda_enabled, cpu_enabled: -1,
  1180. Tensor.data.__get__: lambda self: -1,
  1181. Tensor.device.__get__: lambda self: -1,
  1182. Tensor.dtype.__get__: lambda self: -1,
  1183. Tensor.is_cuda.__get__: lambda self: -1,
  1184. Tensor.is_cpu.__get__: lambda self: -1,
  1185. Tensor.is_xpu.__get__: lambda self: -1,
  1186. Tensor.is_ipu.__get__: lambda self: -1,
  1187. Tensor.is_leaf.__get__: lambda self: -1,
  1188. Tensor.retains_grad.__get__: lambda self: -1,
  1189. Tensor.is_meta.__get__: lambda self: -1,
  1190. Tensor.is_mps.__get__: lambda self: -1,
  1191. Tensor.is_nested.__get__: lambda self: -1,
  1192. Tensor.is_ort.__get__: lambda self: -1,
  1193. Tensor.is_mkldnn.__get__: lambda self: -1,
  1194. Tensor.is_quantized.__get__: lambda self: -1,
  1195. Tensor.is_sparse.__get__: lambda self: -1,
  1196. Tensor.is_sparse_csr.__get__: lambda self: -1,
  1197. Tensor.is_vulkan.__get__: lambda self: -1,
  1198. Tensor.layout.__get__: lambda self: -1,
  1199. Tensor.name.__get__: lambda self: -1,
  1200. Tensor.names.__get__: lambda self: -1,
  1201. Tensor.ndim.__get__: lambda self: -1,
  1202. Tensor.output_nr.__get__: lambda self: -1,
  1203. Tensor.requires_grad.__get__: lambda self: -1,
  1204. Tensor.shape.__get__: lambda self: -1,
  1205. Tensor.volatile.__get__: lambda self: -1,
  1206. Tensor.real.__get__: lambda self: -1,
  1207. Tensor.imag.__get__: lambda self: -1,
  1208. Tensor.__cuda_array_interface__.__get__: lambda self: -1,
  1209. Tensor.type: lambda self, dtype=None, non_blocking=False, **kwargs: -1,
  1210. Tensor._coalesced_: lambda self: -1,
  1211. Tensor._dimI: lambda self: -1,
  1212. Tensor._dimV: lambda self: -1,
  1213. Tensor._indices: lambda self: -1,
  1214. Tensor._is_view: lambda self: -1,
  1215. Tensor._nnz: lambda self: -1,
  1216. Tensor.crow_indices: lambda self: -1,
  1217. Tensor.col_indices: lambda self: -1,
  1218. Tensor.ccol_indices: lambda self: -1,
  1219. Tensor.row_indices: lambda self: -1,
  1220. Tensor._update_names: lambda self, names, inplace: -1,
  1221. Tensor._values: lambda self: -1,
  1222. Tensor.adjoint: lambda self: -1,
  1223. Tensor.align_as: lambda self, other: -1,
  1224. Tensor.align_to: lambda self, order, ellipsis_idx: -1,
  1225. Tensor.apply_: lambda self, callable: -1,
  1226. Tensor.as_strided: lambda self, size, stride: -1,
  1227. Tensor.as_strided_: lambda self, size, stride: -1,
  1228. Tensor.backward: lambda self, gradient=None, retain_graph=None, create_graph=False, inputs=None: -1,
  1229. Tensor.bfloat16: lambda self, memory_format=torch.preserve_format: -1,
  1230. Tensor.bool: lambda self, memory_format=torch.preserve_format: -1,
  1231. Tensor.byte: lambda self, memory_format=torch.preserve_format: -1,
  1232. Tensor.char: lambda self, memory_format=torch.preserve_format: -1,
  1233. Tensor.cauchy_: lambda self, median=0, sigma=1, *, generator=None: -1,
  1234. Tensor.coalesce: lambda self: -1,
  1235. Tensor._coalesced_: lambda self, coalesced: -1,
  1236. Tensor.contiguous: lambda self, memory_format=torch.contiguous_format: -1,
  1237. Tensor.copy_: lambda self, src, non_blocking=False: -1,
  1238. Tensor.cpu: lambda self, memory_format=torch.preserve_format: -1,
  1239. Tensor.cuda: lambda self, memory_format=torch.preserve_format: -1,
  1240. Tensor.xpu: lambda self, memory_format=torch.preserve_format: -1,
  1241. Tensor.ipu: lambda self, memory_format=torch.preserve_format: -1,
  1242. Tensor.data_ptr: lambda self: -1,
  1243. Tensor.dense_dim: lambda self: -1,
  1244. Tensor.diagonal_scatter: lambda self, src, offset=0, dim1=0, dim2=1: -1,
  1245. Tensor.dim: lambda self: -1,
  1246. Tensor.double: lambda self, memory_format=torch.preserve_format: -1,
  1247. Tensor.cdouble: lambda self, memory_format=torch.preserve_format: -1,
  1248. Tensor.element_size: lambda self: -1,
  1249. Tensor.expand: lambda self, size: -1,
  1250. Tensor.expand_as: lambda self, other: -1,
  1251. Tensor.exponential_: lambda self, lambd=1, *, generator=None: -1,
  1252. Tensor.fill_: lambda self, value: -1,
  1253. Tensor.fill_diagonal_: lambda self, value: -1,
  1254. Tensor.float: lambda self, memory_format=torch.preserve_format: -1,
  1255. Tensor.cfloat: lambda self, memory_format=torch.preserve_format: -1,
  1256. Tensor.geometric_: lambda self, p, *, generator=None: -1,
  1257. Tensor.get_device: lambda self: -1,
  1258. Tensor.half: lambda self, memory_format=torch.preserve_format: -1,
  1259. Tensor.chalf: lambda self, memory_format=torch.preserve_format: -1,
  1260. Tensor.has_names: lambda self: -1,
  1261. Tensor.indices: lambda self: -1,
  1262. Tensor.int: lambda self, memory_format=torch.preserve_format: -1,
  1263. Tensor.is_coalesced: lambda self: -1,
  1264. Tensor.is_contiguous: lambda self: -1,
  1265. Tensor.is_inference: lambda self: -1,
  1266. Tensor.is_pinned: lambda self: -1,
  1267. Tensor.is_set_to: lambda self, tensor: -1,
  1268. Tensor.is_shared: lambda self: -1,
  1269. Tensor.item: lambda self: -1,
  1270. Tensor.log_normal_: lambda self, mean=1, std=2, *, generator=None: -1,
  1271. Tensor.log_softmax: lambda self, dim: -1,
  1272. Tensor.long: lambda self, memory_format=torch.preserve_format: -1,
  1273. Tensor.map_: lambda self, tensor, callable: -1,
  1274. Tensor.map2_: lambda self, x, y, callable: -1,
  1275. Tensor.mm: lambda self, mat2: -1,
  1276. Tensor.narrow_copy: lambda self, dimension, start, length: -1,
  1277. Tensor.ndimension: lambda self: -1,
  1278. Tensor.nelement: lambda self: -1,
  1279. Tensor._nested_tensor_size: lambda self: -1,
  1280. Tensor._nested_tensor_strides: lambda self: -1,
  1281. Tensor.normal_: lambda self: -1,
  1282. Tensor.numpy: lambda self: -1,
  1283. Tensor.permute: lambda self, dim: -1,
  1284. Tensor.pin_memory: lambda self: -1,
  1285. Tensor.put_: lambda self, indices, tensor, accumulate=False: -1,
  1286. Tensor.qscheme: lambda self: -1,
  1287. Tensor.random_: lambda self, from_=0, to=None, *, generator=None: -1,
  1288. Tensor.record_stream: lambda self, stream: -1,
  1289. Tensor.refine_names: lambda self, names: -1,
  1290. Tensor.register_hook: lambda self, hook: -1,
  1291. Tensor.rename: lambda self, name: -1,
  1292. Tensor.repeat: lambda self, *size: -1,
  1293. Tensor.requires_grad_: lambda self, requires_grad=True: -1,
  1294. Tensor.reshape_as: lambda self, other: -1,
  1295. Tensor.resize: lambda self, *size: -1,
  1296. Tensor.resize_: lambda self, size: -1,
  1297. Tensor.resize_as: lambda self, other: -1,
  1298. Tensor.resize_as_sparse_: lambda self, other: -1,
  1299. Tensor.retain_grad: lambda self: -1,
  1300. Tensor.set_: lambda self, source=None, storage_offset=0, size=None, stride=None: -1,
  1301. Tensor.select_scatter: lambda self, src, dim, index: -1,
  1302. Tensor.share_memory_: lambda self: -1,
  1303. Tensor.short: lambda self, memory_format=torch.preserve_format: -1,
  1304. Tensor.size: lambda self: -1,
  1305. Tensor.slice_scatter: lambda self, src, dim=0, start=None, end=None, step=1: -1,
  1306. Tensor.sparse_dim: lambda self: -1,
  1307. Tensor.sparse_mask: lambda self, mask: -1,
  1308. Tensor.sparse_resize_: lambda self, size1, size2, dense_dim: -1,
  1309. Tensor.sparse_resize_and_clear_: lambda self, size1, size2, dense_dim: -1,
  1310. Tensor.sspaddmm: lambda self, mat1, mat2, beta=1, alpha=1, out=None: -1,
  1311. Tensor.storage: lambda self: -1,
  1312. Tensor.untyped_storage: lambda self: -1,
  1313. Tensor.storage_offset: lambda self: -1,
  1314. Tensor.storage_type: lambda self: -1,
  1315. Tensor.sum_to_size: lambda self, size: -1,
  1316. Tensor.tile: lambda self, *reps: -1,
  1317. Tensor.to: lambda self, dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format: -1,
  1318. Tensor.to_dense: lambda self, dtype=None: -1,
  1319. Tensor._to_dense: lambda self, dtype=None: -1,
  1320. Tensor.to_sparse: lambda self: -1,
  1321. Tensor.tolist: lambda self: -1,
  1322. Tensor.to_mkldnn: lambda self: -1,
  1323. Tensor.type_as: lambda self, other: -1,
  1324. Tensor.unfold: lambda self, dimension, size, step: -1,
  1325. Tensor.uniform_: lambda self, from_=0, to=1: -1,
  1326. Tensor.values: lambda self: -1,
  1327. Tensor.view: lambda self, shape: -1,
  1328. Tensor.view_as: lambda self, other: -1,
  1329. Tensor.zero_: lambda self: -1,
  1330. Tensor.__dlpack__: lambda self, stream=None: -1,
  1331. Tensor.__dlpack_device__: lambda self: -1,
  1332. torch.linalg.lstsq: lambda self, b, cond=None, driver=None: -1,
  1333. }
  1334. ret2 = {}
  1335. ignored = get_ignored_functions()
  1336. for k, v in ret.items():
  1337. # Generate methods like __add__ and add_ by default from add
  1338. names = [
  1339. k.__name__, # Default method
  1340. k.__name__ + "_", # Inplace variant
  1341. "__" + k.__name__ + "__", # Dunder method
  1342. "__i" + k.__name__ + "__", # Inplace dunder method
  1343. "__r" + k.__name__ + "__", # Reverse dunder method
  1344. ]
  1345. if k.__name__.startswith("bitwise_"):
  1346. # bitwise_<op> have dunder methods of the form __<op>__
  1347. # And so on.
  1348. subname = k.__name__[len("bitwise_"):]
  1349. names.extend([
  1350. "__" + subname + "__",
  1351. "__i" + subname + "__",
  1352. "__r" + subname + "__"
  1353. ])
  1354. for name in names:
  1355. func = getattr(Tensor, name, None)
  1356. if callable(func) and func not in ret and func not in ignored:
  1357. ret2[func] = v
  1358. ret.update(ret2)
  1359. return ret
  1360. def wrap_torch_function(dispatcher: Callable):
  1361. """Wraps a given function with ``__torch_function__`` -related functionality.
  1362. Parameters
  1363. ----------
  1364. dispatcher: Callable
  1365. A callable that returns an iterable of Tensor-likes passed into the function.
  1366. Note
  1367. ----
  1368. This decorator may reduce the performance of your code. Generally, it's enough to express
  1369. your code as a series of functions that, themselves, support __torch_function__. If you
  1370. find yourself in the rare situation where this is not the case, e.g. if you're wrapping a
  1371. low-level library and you also need it to work for Tensor-likes, then this function is available.
  1372. Examples
  1373. --------
  1374. >>> def dispatcher(a): # Must have the same signature as func
  1375. ... return (a,)
  1376. >>> @torch.overrides.wrap_torch_function(dispatcher)
  1377. >>> def func(a): # This will make func dispatchable by __torch_function__
  1378. ... return a + 0
  1379. """
  1380. def inner(func):
  1381. @functools.wraps(func)
  1382. def wrapped(*args, **kwargs):
  1383. relevant_args = dispatcher(*args, **kwargs)
  1384. if has_torch_function(relevant_args):
  1385. return handle_torch_function(wrapped, relevant_args, *args, **kwargs)
  1386. return func(*args, **kwargs)
  1387. return wrapped
  1388. return inner
  1389. def _get_overloaded_args(relevant_args: Iterable[Any]) -> List[Any]:
  1390. """Returns a list of arguments on which to call __torch_function__.
  1391. Checks arguments in relevant_args for __torch_function__ implementations,
  1392. storing references to the arguments and their types in overloaded_args and
  1393. overloaded_types in order of calling precedence. Only distinct types are
  1394. considered. If a type is a subclass of another type it will have higher
  1395. precedence, otherwise the precedence order is the same as the order of
  1396. arguments in relevant_args, that is, from left-to-right in the argument list.
  1397. The precedence-determining algorithm implemented in this function is
  1398. described in `NEP-0018`_.
  1399. See torch::append_overloaded_arg for the equivalent function in the C++
  1400. implementation.
  1401. Parameters
  1402. ----------
  1403. relevant_args : iterable of array-like
  1404. Iterable of array-like arguments to check for __torch_function__
  1405. methods.
  1406. Returns
  1407. -------
  1408. overloaded_args : list
  1409. Arguments from relevant_args on which to call __torch_function__
  1410. methods, in the order in which they should be called.
  1411. .. _NEP-0018:
  1412. https://numpy.org/neps/nep-0018-array-function-protocol.html
  1413. """
  1414. # If torch function is not enabled, there are no overloaded types
  1415. if not torch._C._is_torch_function_enabled():
  1416. return []
  1417. # Runtime is O(num_arguments * num_unique_types)
  1418. overloaded_types: Set[Type] = set()
  1419. overloaded_args: List[Any] = []
  1420. for arg in relevant_args:
  1421. arg_type = type(arg)
  1422. # We only collect arguments if they have a unique type, which ensures
  1423. # reasonable performance even with a long list of possibly overloaded
  1424. # arguments.
  1425. #
  1426. # NB: Important to exclude _disabled_torch_function_impl, otherwise
  1427. # https://github.com/pytorch/pytorch/issues/64687
  1428. if (arg_type not in overloaded_types and hasattr(arg_type, '__torch_function__') and
  1429. arg_type.__torch_function__ != torch._C._disabled_torch_function_impl):
  1430. # Create lists explicitly for the first type (usually the only one
  1431. # done) to avoid setting up the iterator for overloaded_args.
  1432. if overloaded_types:
  1433. overloaded_types.add(arg_type)
  1434. # By default, insert argument at the end, but if it is
  1435. # subclass of another argument, insert it before that argument.
  1436. # This ensures "subclasses before superclasses".
  1437. index = len(overloaded_args)
  1438. for i, old_arg in enumerate(overloaded_args):
  1439. if issubclass(arg_type, type(old_arg)):
  1440. index = i
  1441. break
  1442. overloaded_args.insert(index, arg)
  1443. else:
  1444. overloaded_types = {arg_type}
  1445. overloaded_args = [arg]
  1446. return overloaded_args
  1447. def handle_torch_function(
  1448. public_api: Callable, relevant_args: Iterable[Any], *args, **kwargs) -> Any:
  1449. """Implement a function with checks for ``__torch_function__`` overrides.
  1450. See torch::autograd::handle_torch_function for the equivalent of this
  1451. function in the C++ implementation.
  1452. Arguments
  1453. ---------
  1454. public_api : function
  1455. Function exposed by the public torch API originally called like
  1456. ``public_api(*args, **kwargs)`` on which arguments are now being
  1457. checked.
  1458. relevant_args : iterable
  1459. Iterable of arguments to check for __torch_function__ methods.
  1460. args : tuple
  1461. Arbitrary positional arguments originally passed into ``public_api``.
  1462. kwargs : tuple
  1463. Arbitrary keyword arguments originally passed into ``public_api``.
  1464. Returns
  1465. -------
  1466. object
  1467. Result from calling ``implementation`` or an ``__torch_function__``
  1468. method, as appropriate.
  1469. Raises
  1470. ------
  1471. TypeError : if no implementation is found.
  1472. Example
  1473. -------
  1474. >>> def func(a):
  1475. ... if has_torch_function_unary(a):
  1476. ... return handle_torch_function(func, (a,), a)
  1477. ... return a + 0
  1478. """
  1479. # Check for __torch_function__ methods.
  1480. overloaded_args = _get_overloaded_args(relevant_args)
  1481. # overloaded_args already have unique types.
  1482. types = tuple(map(type, overloaded_args))
  1483. # Check for __torch_function__ mode.
  1484. if _is_torch_function_mode_enabled():
  1485. # if we're here, the mode must be set to a TorchFunctionStackMode
  1486. # this unsets it and calls directly into TorchFunctionStackMode's torch function
  1487. with _pop_mode_temporarily() as mode:
  1488. result = mode.__torch_function__(public_api, types, args, kwargs)
  1489. if result is not NotImplemented:
  1490. return result
  1491. # Call overrides
  1492. for overloaded_arg in overloaded_args:
  1493. # This call needs to become a classmethod call in the future.
  1494. # See https://github.com/pytorch/pytorch/issues/63767
  1495. torch_func_method = overloaded_arg.__torch_function__
  1496. if hasattr(torch_func_method, "__self__") and torch_func_method.__self__ is overloaded_arg and \
  1497. torch_func_method is not torch._C._disabled_torch_function_impl:
  1498. warnings.warn("Defining your `__torch_function__ as a plain method is deprecated and "
  1499. "will be an error in future, please define it as a classmethod.",
  1500. DeprecationWarning)
  1501. # Use `public_api` instead of `implementation` so __torch_function__
  1502. # implementations can do equality/identity comparisons.
  1503. result = torch_func_method(public_api, types, args, kwargs)
  1504. if result is not NotImplemented:
  1505. return result
  1506. func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
  1507. msg = (
  1508. "no implementation found for '{}' on types that implement "
  1509. '__torch_function__: {}'
  1510. ).format(func_name, [type(arg) for arg in overloaded_args])
  1511. if _is_torch_function_mode_enabled():
  1512. msg += f" nor in mode {_get_current_function_mode()}"
  1513. raise TypeError(msg)
  1514. has_torch_function = _add_docstr(
  1515. _has_torch_function,
  1516. r"""Check for __torch_function__ implementations in the elements of an iterable
  1517. or if a __torch_function__ mode is enabled. Considers exact ``Tensor`` s
  1518. and ``Parameter`` s non-dispatchable. Use this to guard a call to
  1519. :func:`handle_torch_function`; don't use it to test if something
  1520. is Tensor-like, use :func:`is_tensor_like` instead.
  1521. Arguments
  1522. ---------
  1523. relevant_args : iterable
  1524. Iterable or arguments to check for __torch_function__ methods.
  1525. Returns
  1526. -------
  1527. bool
  1528. True if any of the elements of relevant_args have __torch_function__
  1529. implementations, False otherwise.
  1530. See Also
  1531. ________
  1532. torch.is_tensor_like
  1533. Checks if something is a Tensor-like, including an exact ``Tensor``.
  1534. """
  1535. )
  1536. has_torch_function_unary = _add_docstr(
  1537. _has_torch_function_unary,
  1538. r"""Special case of `has_torch_function` for single inputs.
  1539. Instead of:
  1540. `has_torch_function((t,))`
  1541. call:
  1542. `has_torch_function_unary(t)`
  1543. which skips unnecessary packing and unpacking work.
  1544. """
  1545. )
  1546. has_torch_function_variadic = _add_docstr(
  1547. _has_torch_function_variadic,
  1548. r"""Special case of `has_torch_function` that skips tuple creation.
  1549. This uses the METH_FASTCALL protocol introduced in Python 3.7
  1550. Instead of:
  1551. `has_torch_function((a, b))`
  1552. call:
  1553. `has_torch_function_variadic(a, b)`
  1554. which skips unnecessary packing and unpacking work.
  1555. """
  1556. )
  1557. @functools.lru_cache(None)
  1558. def _get_overridable_functions() -> Tuple[Dict[Any, List[Callable]], Dict[Callable, str]]:
  1559. overridable_funcs = collections.defaultdict(list)
  1560. index = {}
  1561. tested_namespaces = [
  1562. ("torch", torch, torch.__all__),
  1563. ("torch.functional", torch.functional, torch.functional.__all__),
  1564. ("torch.nn.functional", torch.nn.functional, dir(torch.nn.functional)),
  1565. ("torch.nn.init", torch.nn.init, dir(torch.nn.init)),
  1566. ("torch.Tensor", torch.Tensor, dir(torch.Tensor)),
  1567. ("torch.linalg", torch.linalg, dir(torch.linalg)),
  1568. ("torch.fft", torch.fft, dir(torch.fft)),
  1569. ("torch.special", torch.special, dir(torch.special)),
  1570. ]
  1571. for namespace_str, namespace, ns_funcs in tested_namespaces:
  1572. for func_name in ns_funcs:
  1573. ignore = False
  1574. # ignore private functions or functions that are deleted in torch.__init__
  1575. if namespace is not torch.Tensor:
  1576. if func_name.startswith('__'):
  1577. continue
  1578. elif func_name.startswith('_'):
  1579. ignore = True
  1580. elif func_name.endswith('_'):
  1581. ignore = True
  1582. elif not func_name[0].islower():
  1583. ignore = True
  1584. elif func_name == 'unique_dim':
  1585. continue
  1586. else:
  1587. func = getattr(namespace, func_name)
  1588. if getattr(object, func_name, None) == func:
  1589. continue
  1590. if func_name == '__weakref__':
  1591. continue
  1592. func = getattr(namespace, func_name)
  1593. if namespace is torch.Tensor and getattr(object, func_name, None) == func:
  1594. continue
  1595. # ignore re-exported modules
  1596. if isinstance(func, types.ModuleType):
  1597. continue
  1598. # ignore __future__ imports
  1599. if isinstance(func, __future__._Feature):
  1600. continue
  1601. if not callable(func) and hasattr(func, "__get__"):
  1602. index[func.__get__] = f"{namespace_str}.{func_name}.__get__"
  1603. index[func.__set__] = f"{namespace_str}.{func_name}.__set__"
  1604. if ignore:
  1605. continue
  1606. if func.__get__ in get_ignored_functions():
  1607. msg = ("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions "
  1608. "but still has an explicit override")
  1609. assert func.__get__ not in get_testing_overrides(), msg.format(namespace, func.__name__)
  1610. continue
  1611. else:
  1612. overridable_funcs[func].append(func.__get__)
  1613. continue
  1614. if not callable(func):
  1615. continue
  1616. index[func] = f"{namespace_str}.{func_name}"
  1617. if ignore:
  1618. continue
  1619. # cannot be overriden by __torch_function__
  1620. if func in get_ignored_functions():
  1621. msg = ("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions "
  1622. "but still has an explicit override")
  1623. assert func not in get_testing_overrides(), msg.format(namespace, func.__name__)
  1624. continue
  1625. overridable_funcs[namespace].append(func)
  1626. return overridable_funcs, index
  1627. def get_overridable_functions() -> Dict[Any, List[Callable]]:
  1628. """List functions that are overridable via __torch_function__
  1629. Returns
  1630. -------
  1631. Dict[Any, List[Callable]]
  1632. A dictionary that maps namespaces that contain overridable functions
  1633. to functions in that namespace that can be overridden.
  1634. """
  1635. return _get_overridable_functions()[0]
  1636. def resolve_name(f):
  1637. """Get a human readable string name for a function passed to
  1638. __torch_function__
  1639. Arguments
  1640. ---------
  1641. f : Callable
  1642. Function to resolve the name of.
  1643. Returns
  1644. -------
  1645. str
  1646. Name of the function; if eval'ed it should give back the input
  1647. function.
  1648. """
  1649. if isinstance(f, (torch._ops.OpOverload, torch._ops.OpOverloadPacket)):
  1650. return str(f)
  1651. return _get_overridable_functions()[1].get(f)
  1652. @functools.lru_cache(None)
  1653. def _get_tensor_methods() -> Set[Callable]:
  1654. """ Returns a set of the overridable methods on ``torch.Tensor`` """
  1655. overridable_funcs = get_overridable_functions()
  1656. methods = set(overridable_funcs[torch.Tensor])
  1657. return methods
  1658. def is_tensor_method_or_property(func: Callable) -> bool:
  1659. """
  1660. Returns True if the function passed in is a handler for a
  1661. method or property belonging to ``torch.Tensor``, as passed
  1662. into ``__torch_function__``.
  1663. .. note::
  1664. For properties, their ``__get__`` method must be passed in.
  1665. This may be needed, in particular, for the following reasons:
  1666. 1. Methods/properties sometimes don't contain a `__module__` slot.
  1667. 2. They require that the first passed-in argument is an instance
  1668. of ``torch.Tensor``.
  1669. Examples
  1670. --------
  1671. >>> is_tensor_method_or_property(torch.Tensor.add)
  1672. True
  1673. >>> is_tensor_method_or_property(torch.add)
  1674. False
  1675. """
  1676. return func in _get_tensor_methods() or func.__name__ == "__get__"
  1677. def is_tensor_like(inp):
  1678. """
  1679. Returns ``True`` if the passed-in input is a Tensor-like.
  1680. Currently, this occurs whenever there's a ``__torch_function__``
  1681. attribute on the type of the input.
  1682. Examples
  1683. --------
  1684. A subclass of tensor is generally a Tensor-like.
  1685. >>> class SubTensor(torch.Tensor): ...
  1686. >>> is_tensor_like(SubTensor([0]))
  1687. True
  1688. Built-in or user types aren't usually Tensor-like.
  1689. >>> is_tensor_like(6)
  1690. False
  1691. >>> is_tensor_like(None)
  1692. False
  1693. >>> class NotATensor: ...
  1694. >>> is_tensor_like(NotATensor())
  1695. False
  1696. But, they can be made Tensor-like by implementing __torch_function__.
  1697. >>> class TensorLike:
  1698. ... @classmethod
  1699. ... def __torch_function__(cls, func, types, args, kwargs):
  1700. ... return -1
  1701. >>> is_tensor_like(TensorLike())
  1702. True
  1703. """
  1704. return type(inp) is torch.Tensor or hasattr(type(inp), "__torch_function__")
  1705. class TorchFunctionMode:
  1706. """
  1707. A ``TorchFunctionMode`` allows you to override the meaning of all
  1708. ``__torch_function__`` overrideable functions within a dynamic scope,
  1709. without having to actually create a tensor subclass or manually
  1710. monkey-patch functions in the PyTorch API. Some common situations
  1711. where you should use a mode:
  1712. * You want to override the meaning of factory functions, or other
  1713. functions that do not otherwise take a tensor as an argument
  1714. (these cannot be overridden with tensor subclasses).
  1715. * You want to override the behavior of all functions without needing
  1716. to wrap your inputs in tensor subclasses; e.g., if you are just
  1717. interested in logging intermediate computations.
  1718. * You want to control the order of execution of various tensor
  1719. subclasses explicitly, rather than implicitly via the return of
  1720. ``NotImplemented``.
  1721. Independent subclasses of :class:`TorchFunctionMode` are compositional:
  1722. modes can be pushed onto a stack using ``with MyMode():``.
  1723. When you call functions in the PyTorch API inside your
  1724. ``__torch_function__`` implementation, by default, they will forward on to
  1725. the next mode on the mode stack. If you want recursively call back into
  1726. your current ``__torch_function__`` implementation, either explicitly
  1727. invoke ``self.__torch_function__(...)``, or use the context manager
  1728. ``enable_torch_function_mode(self, replace=self.inner)`` to make PyTorch
  1729. API self-referential (beware of infinite loops, in this case!)
  1730. """
  1731. inner: "TorchFunctionMode"
  1732. # Force metaclass to generate constructor at the base of the hierarchy
  1733. def __init__(self):
  1734. pass
  1735. def __torch_function__(self, func, types, args=(), kwargs=None):
  1736. raise NotImplementedError()
  1737. def __enter__(self):
  1738. _push_mode(self)
  1739. return self
  1740. def __exit__(self, exc_type, exc_val, exc_tb):
  1741. _pop_mode()
  1742. @classmethod
  1743. def push(cls, *args, **kwargs):
  1744. warnings.warn("`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`")
  1745. instance = cls(*args, **kwargs)
  1746. return instance
  1747. def _get_current_function_mode():
  1748. stack_len = _len_torch_function_stack()
  1749. return _get_function_stack_at(stack_len - 1) if stack_len > 0 else None
  1750. def _get_current_function_mode_stack():
  1751. stack_len = _len_torch_function_stack()
  1752. return [_get_function_stack_at(i) for i in range(stack_len)]
  1753. def _push_mode(mode):
  1754. _push_on_torch_function_stack(mode)
  1755. def _pop_mode():
  1756. old = _pop_torch_function_stack()
  1757. return old
  1758. @contextlib.contextmanager
  1759. def _pop_mode_temporarily():
  1760. old = _pop_mode()
  1761. try:
  1762. yield old
  1763. finally:
  1764. _push_mode(old)
  1765. class BaseTorchFunctionMode(TorchFunctionMode):
  1766. def __torch_function__(self, func, types, args=(), kwargs=None):
  1767. if kwargs is None:
  1768. kwargs = {}
  1769. return func(*args, **kwargs)
  1770. class enable_reentrant_dispatch():
  1771. def __enter__(self):
  1772. self._raii_guard = torch._C._RestorePythonTLSSnapshot()
  1773. def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
  1774. del self._raii_guard
  1775. def get_buffer(tensor_subclass, data, prefix):
  1776. import ctypes
  1777. assert prefix in {"stride", "size", "sym_size"}
  1778. buffer_name = f"_{prefix}_buffer"
  1779. if not hasattr(tensor_subclass, buffer_name):
  1780. SizeType = ctypes.c_longlong * len(data)
  1781. setattr(tensor_subclass, buffer_name, SizeType(*data))
  1782. ptr = ctypes.addressof(getattr(tensor_subclass, buffer_name))
  1783. return (ptr, len(data))