_VariableFunctions.pyi 163 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903
  1. # @generated from torch/_C/_VariableFunctions.pyi.in
  2. from torch import Tensor, Generator, strided, memory_format, contiguous_format, strided, inf
  3. from typing import List, Tuple, Optional, Union, Any, ContextManager, Callable, overload, Iterator, NamedTuple, Sequence, Literal, TypeVar
  4. from torch.types import _int, _float, _bool, Number, _dtype, _device, _qscheme, _size, _layout, SymInt, Device
  5. import torch
  6. import builtins
  7. @overload
  8. def __and__(input: Tensor, other: Tensor) -> Tensor: ...
  9. @overload
  10. def __and__(input: Tensor, other: Number) -> Tensor: ...
  11. @overload
  12. def __lshift__(input: Tensor, other: Tensor) -> Tensor: ...
  13. @overload
  14. def __lshift__(input: Tensor, other: Number) -> Tensor: ...
  15. @overload
  16. def __or__(input: Tensor, other: Tensor) -> Tensor: ...
  17. @overload
  18. def __or__(input: Tensor, other: Number) -> Tensor: ...
  19. @overload
  20. def __rshift__(input: Tensor, other: Tensor) -> Tensor: ...
  21. @overload
  22. def __rshift__(input: Tensor, other: Number) -> Tensor: ...
  23. @overload
  24. def __xor__(input: Tensor, other: Tensor) -> Tensor: ...
  25. @overload
  26. def __xor__(input: Tensor, other: Number) -> Tensor: ...
  27. def _adaptive_avg_pool2d(input: Tensor, output_size: Sequence[Union[_int, SymInt]]) -> Tensor: ...
  28. def _adaptive_avg_pool3d(input: Tensor, output_size: Sequence[Union[_int, SymInt]]) -> Tensor: ...
  29. def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ...
  30. @overload
  31. def _add_relu(input: Tensor, other: Tensor, *, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  32. @overload
  33. def _add_relu(input: Tensor, other: Number, alpha: Number=1) -> Tensor: ...
  34. @overload
  35. def _add_relu_(input: Tensor, other: Tensor, *, alpha: Number=1) -> Tensor: ...
  36. @overload
  37. def _add_relu_(input: Tensor, other: Number, alpha: Number=1) -> Tensor: ...
  38. def _addmm_activation(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, use_gelu: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  39. @overload
  40. def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ...
  41. @overload
  42. def _aminmax(input: Tensor, dim: _int, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
  43. def _amp_foreach_non_finite_check_and_unscale_(self: Union[Tuple[Tensor, ...], List[Tensor]], found_inf: Tensor, inv_scale: Tensor) -> None: ...
  44. def _amp_update_scale_(input: Tensor, growth_tracker: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ...
  45. def _assert_async(input: Tensor) -> None: ...
  46. def _assert_tensor_metadata(a: Tensor, size: Optional[_size]=None, stride: Optional[_size]=None, dtype: Optional[_dtype]=None) -> None: ...
  47. def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ...
  48. def _cast_Byte(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
  49. def _cast_Char(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
  50. def _cast_Double(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
  51. def _cast_Float(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
  52. def _cast_Half(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
  53. def _cast_Int(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
  54. def _cast_Long(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
  55. def _cast_Short(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
  56. def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool=False) -> Tuple[_float, _int]: ...
  57. def _chunk_grad_outputs_efficient_attention(query: Tensor, key: Tensor, value: Tensor, is_causal: _bool=False) -> _bool: ...
  58. def _coalesce(input: Tensor) -> Tensor: ...
  59. def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  60. def _conj(input: Tensor) -> Tensor: ...
  61. def _conj_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  62. def _conj_physical(input: Tensor) -> Tensor: ...
  63. def _convert_indices_from_coo_to_csr(input: Tensor, size: _int, *, out_int32: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  64. def _convert_indices_from_csr_to_coo(crow_indices: Tensor, col_indices: Tensor, *, out_int32: _bool=False, transpose: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  65. @overload
  66. def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, transposed: _bool, output_padding: _size, groups: _int, benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ...
  67. @overload
  68. def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: Sequence[Union[_int, SymInt]], dilation: _size, transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: _int, benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ...
  69. def _convolution_mode(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: str, dilation: _size, groups: _int) -> Tensor: ...
  70. def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool=False) -> Tensor: ...
  71. def _copy_from_and_resize(input: Tensor, dst: Tensor) -> Tensor: ...
  72. @overload
  73. def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int=0, zero_infinity: _bool=False) -> Tuple[Tensor, Tensor]: ...
  74. @overload
  75. def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int=0, zero_infinity: _bool=False) -> Tuple[Tensor, Tensor]: ...
  76. @overload
  77. def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
  78. @overload
  79. def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
  80. def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  81. def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: Sequence[Union[_int, SymInt]], dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
  82. def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: Union[_int, SymInt], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ...
  83. def _cufft_clear_plan_cache(device_index: _int) -> None: ...
  84. def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ...
  85. def _cufft_get_plan_cache_size(device_index: _int) -> _int: ...
  86. def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ...
  87. def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
  88. def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
  89. def _debug_has_internal_overlap(input: Tensor) -> _int: ...
  90. def _dim_arange(like: Tensor, dim: _int) -> Tensor: ...
  91. def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ...
  92. def _disable_functionalization(): ...
  93. @overload
  94. def _efficientzerotensor(size: _size, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  95. @overload
  96. def _efficientzerotensor(*size: _int, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  97. def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False, padding_idx: _int=-1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  98. def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False, padding_idx: _int=-1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  99. @overload
  100. def _empty_affine_quantized(size: _size, *, scale: _float=1, zero_point: _int=0, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  101. @overload
  102. def _empty_affine_quantized(*size: _int, scale: _float=1, zero_point: _int=0, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  103. @overload
  104. def _empty_per_channel_affine_quantized(size: _size, *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  105. @overload
  106. def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  107. def _enable_functionalization(*, reapply_views: _bool = False): ...
  108. def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ...
  109. def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int, grad_factor: _float=1.0) -> Tensor: ...
  110. def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int, grad_factor: _float=1.0) -> Tensor: ...
  111. def _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(input: Tensor, scale: Tensor, zero_point: Tensor, fake_quant_enabled: Tensor, quant_min: _int, quant_max: _int) -> torch.return_types._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: ...
  112. def _fft_c2c(input: Tensor, dim: Sequence[Union[_int, SymInt]], normalization: _int, forward: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
  113. def _fft_c2r(input: Tensor, dim: _size, normalization: _int, last_dim_size: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
  114. def _fft_r2c(input: Tensor, dim: _size, normalization: _int, onesided: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
  115. def _foobar(input: Tensor, arg1: _bool=True, arg2: _bool=True, *, arg3: _bool=True) -> Tensor: ...
  116. def _foreach_abs(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  117. def _foreach_abs_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  118. def _foreach_acos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  119. def _foreach_acos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  120. @overload
  121. def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
  122. @overload
  123. def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
  124. @overload
  125. def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> List[Tensor]: ...
  126. @overload
  127. def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
  128. @overload
  129. def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
  130. @overload
  131. def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> None: ...
  132. @overload
  133. def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
  134. @overload
  135. def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> List[Tensor]: ...
  136. @overload
  137. def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> List[Tensor]: ...
  138. @overload
  139. def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
  140. @overload
  141. def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
  142. @overload
  143. def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> None: ...
  144. @overload
  145. def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
  146. @overload
  147. def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> List[Tensor]: ...
  148. @overload
  149. def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> List[Tensor]: ...
  150. @overload
  151. def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
  152. @overload
  153. def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
  154. @overload
  155. def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> None: ...
  156. def _foreach_asin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  157. def _foreach_asin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  158. def _foreach_atan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  159. def _foreach_atan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  160. def _foreach_ceil(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  161. def _foreach_ceil_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  162. @overload
  163. def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
  164. @overload
  165. def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
  166. @overload
  167. def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  168. @overload
  169. def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
  170. @overload
  171. def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
  172. @overload
  173. def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  174. @overload
  175. def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
  176. @overload
  177. def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
  178. @overload
  179. def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  180. @overload
  181. def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
  182. @overload
  183. def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
  184. @overload
  185. def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  186. def _foreach_cos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  187. def _foreach_cos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  188. def _foreach_cosh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  189. def _foreach_cosh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  190. @overload
  191. def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
  192. @overload
  193. def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
  194. @overload
  195. def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  196. @overload
  197. def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
  198. @overload
  199. def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
  200. @overload
  201. def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  202. def _foreach_erf(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  203. def _foreach_erf_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  204. def _foreach_erfc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  205. def _foreach_erfc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  206. def _foreach_exp(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  207. def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  208. def _foreach_expm1(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  209. def _foreach_expm1_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  210. def _foreach_floor(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  211. def _foreach_floor_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  212. def _foreach_frac(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  213. def _foreach_frac_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  214. @overload
  215. def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Number) -> List[Tensor]: ...
  216. @overload
  217. def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  218. @overload
  219. def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Number) -> None: ...
  220. @overload
  221. def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  222. def _foreach_lgamma(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  223. def _foreach_lgamma_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  224. def _foreach_log(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  225. def _foreach_log10(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  226. def _foreach_log10_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  227. def _foreach_log1p(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  228. def _foreach_log1p_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  229. def _foreach_log2(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  230. def _foreach_log2_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  231. def _foreach_log_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  232. @overload
  233. def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
  234. @overload
  235. def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
  236. @overload
  237. def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  238. @overload
  239. def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
  240. @overload
  241. def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
  242. @overload
  243. def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  244. @overload
  245. def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
  246. @overload
  247. def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
  248. @overload
  249. def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  250. @overload
  251. def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
  252. @overload
  253. def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
  254. @overload
  255. def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  256. @overload
  257. def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
  258. @overload
  259. def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
  260. @overload
  261. def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  262. @overload
  263. def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
  264. @overload
  265. def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
  266. @overload
  267. def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  268. def _foreach_neg(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  269. def _foreach_neg_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  270. def _foreach_norm(self: Union[Tuple[Tensor, ...], List[Tensor]], ord: Number=2) -> List[Tensor]: ...
  271. def _foreach_reciprocal(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  272. def _foreach_reciprocal_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  273. def _foreach_round(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  274. def _foreach_round_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  275. def _foreach_sigmoid(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  276. def _foreach_sigmoid_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  277. def _foreach_sin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  278. def _foreach_sin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  279. def _foreach_sinh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  280. def _foreach_sinh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  281. def _foreach_sqrt(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  282. def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  283. @overload
  284. def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
  285. @overload
  286. def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
  287. @overload
  288. def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> List[Tensor]: ...
  289. @overload
  290. def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
  291. @overload
  292. def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
  293. @overload
  294. def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> None: ...
  295. def _foreach_tan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  296. def _foreach_tan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  297. def _foreach_tanh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  298. def _foreach_tanh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  299. def _foreach_trunc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  300. def _foreach_trunc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  301. def _foreach_zero_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  302. def _from_functional_tensor(t: Tensor) -> Tensor: ...
  303. def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor]=None, found_inf: Optional[Tensor]=None) -> None: ...
  304. def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor]=None, found_inf: Optional[Tensor]=None) -> None: ...
  305. def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator]=None) -> Tuple[Tensor, Tensor]: ...
  306. def _fused_moving_avg_obs_fq_helper(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool=False, symmetric_quant: _bool=False) -> torch.return_types._fused_moving_avg_obs_fq_helper: ...
  307. def _fused_sdp_choice(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor]=None, dropout_p: _float=0.0, is_causal: _bool=False) -> _int: ...
  308. def _fw_primal_copy(input: Tensor, level: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
  309. def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
  310. def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ...
  311. def _histogramdd_bin_edges(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> List[Tensor]: ...
  312. def _histogramdd_from_bin_cts(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> Tensor: ...
  313. def _histogramdd_from_bin_tensors(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], *, weight: Optional[Tensor]=None, density: _bool=False) -> Tensor: ...
  314. def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False, unsafe: _bool=False) -> Tensor: ...
  315. def _indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  316. def _is_all_true(input: Tensor) -> Tensor: ...
  317. def _is_any_true(input: Tensor) -> Tensor: ...
  318. def _is_functional_tensor(t: Tensor) -> _bool: ...
  319. def _is_zerotensor(input: Tensor) -> _bool: ...
  320. def _linalg_check_errors(info: Tensor, api_name: str, *, is_matrix: _bool) -> None: ...
  321. def _linalg_det(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_det: ...
  322. def _linalg_eigh(A: Tensor, UPLO: str="L", compute_v: _bool=True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_eigh: ...
  323. def _linalg_slogdet(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_slogdet: ...
  324. def _linalg_solve_ex(A: Tensor, B: Tensor, *, left: _bool=True, check_errors: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_solve_ex: ...
  325. def _linalg_svd(A: Tensor, full_matrices: _bool=False, compute_uv: _bool=True, *, driver: Optional[str]=None, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_svd: ...
  326. def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
  327. def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, out: Optional[Tensor]=None) -> Tensor: ...
  328. def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
  329. def _lstm_mps(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: ...
  330. def _lu_with_info(input: Tensor, pivot: _bool=True, check_errors: _bool=True) -> torch.return_types._lu_with_info: ...
  331. def _make_dual(primal: Tensor, tangent: Tensor, level: _int) -> Tensor: ...
  332. def _make_dual_copy(primal: Tensor, tangent: Tensor, level: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
  333. def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ...
  334. def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ...
  335. def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ...
  336. def _masked_softmax(input: Tensor, mask: Tensor, dim: Optional[_int]=None, mask_type: Optional[_int]=None) -> Tensor: ...
  337. def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ...
  338. def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
  339. def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
  340. def _mps_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: _size, stride: _size, dilation: _size, groups: _int) -> Tensor: ...
  341. def _mps_convolution_transpose(input: Tensor, weight: Tensor, padding: _size, output_padding: _size, stride: _size, dilation: _size, groups: _int) -> Tensor: ...
  342. @overload
  343. def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> Tuple[Tensor, Tensor, Tensor]: ...
  344. @overload
  345. def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> Tuple[Tensor, Tensor, Tensor]: ...
  346. def _native_decoder_only_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor]=None, incr_key: Optional[Tensor]=None, incr_value: Optional[Tensor]=None, need_weights: _bool=True, average_attn_weights: _bool=True) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  347. def _native_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor]=None, need_weights: _bool=True, average_attn_weights: _bool=True, mask_type: Optional[_int]=None) -> Tuple[Tensor, Tensor]: ...
  348. def _neg_view(input: Tensor) -> Tensor: ...
  349. def _neg_view_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  350. def _nested_from_padded(padded: Tensor, cpu_nested_shape_example: Tensor, fuse_transform_0213: _bool=False) -> Tensor: ...
  351. def _nested_from_padded_and_nested_example(padded: Tensor, nt_example: Tensor) -> Tensor: ...
  352. def _nested_tensor_from_mask(t: Tensor, mask: Tensor, mask_check: _bool=True) -> Tensor: ...
  353. def _nested_tensor_from_mask_left_aligned(t: Tensor, mask: Tensor) -> _bool: ...
  354. def _nested_tensor_from_tensor_list(list: Union[Tuple[Tensor, ...], List[Tensor]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=None) -> Tensor: ...
  355. def _nested_tensor_softmax_with_shape(input: Tensor, query: Tensor) -> Tensor: ...
  356. def _nnpack_available() -> _bool: ...
  357. def _nnpack_spatial_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Union[_int, _size]=1) -> Tensor: ...
  358. def _pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
  359. def _pad_packed_sequence(data: Tensor, batch_sizes: Tensor, batch_first: _bool, padding_value: Number, total_length: _int) -> Tuple[Tensor, Tensor]: ...
  360. def _pin_memory(input: Tensor, device: Optional[Union[_device, str, None]]=None) -> Tensor: ...
  361. def _prelu_kernel(input: Tensor, weight: Tensor) -> Tensor: ...
  362. def _remove_batch_dim(input: Tensor, level: _int, batch_size: _int, out_dim: _int) -> Tensor: ...
  363. def _reshape_alias_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None) -> Tensor: ...
  364. def _reshape_from_tensor(input: Tensor, shape: Tensor) -> Tensor: ...
  365. def _resize_output_(input: Tensor, size: _size, device: Union[_device, str, None]) -> Tensor: ...
  366. def _rowwise_prune(weight: Tensor, mask: Tensor, compressed_indices_dtype: _dtype) -> Tuple[Tensor, Tensor]: ...
  367. def _sample_dirichlet(input: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
  368. def _saturate_weight_to_fp16(weight: Tensor) -> Tensor: ...
  369. def _scaled_dot_product_attention_math(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor]=None, dropout_p: _float=0.0, is_causal: _bool=False, dropout_mask: Optional[Tensor]=None) -> Tuple[Tensor, Tensor]: ...
  370. def _scaled_dot_product_efficient_attention(query: Tensor, key: Tensor, value: Tensor, compute_log_sumexp: _bool, is_causal: _bool=False) -> Tuple[Tensor, Tensor]: ...
  371. def _scaled_dot_product_flash_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float=0.0, is_causal: _bool=False, return_debug_mask: _bool=False) -> torch.return_types._scaled_dot_product_flash_attention: ...
  372. def _shape_as_tensor(input: Tensor) -> Tensor: ...
  373. def _sobol_engine_draw(quasi: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int, dtype: Optional[_dtype]) -> Tuple[Tensor, Tensor]: ...
  374. def _sobol_engine_ff_(input: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int) -> Tensor: ...
  375. def _sobol_engine_initialize_state_(input: Tensor, dimension: _int) -> Tensor: ...
  376. def _sobol_engine_scramble_(input: Tensor, ltm: Tensor, dimension: _int) -> Tensor: ...
  377. def _softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
  378. def _softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, grad_input: Tensor=None) -> Tensor: ...
  379. def _sparse_broadcast_to(input: Tensor, size: _size) -> Tensor: ...
  380. def _sparse_broadcast_to_copy(input: Tensor, size: _size, *, out: Optional[Tensor]=None) -> Tensor: ...
  381. def _sparse_csr_prod(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
  382. def _sparse_csr_sum(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
  383. def _sparse_log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
  384. def _sparse_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
  385. def _sparse_sparse_matmul(input: Tensor, other: Tensor) -> Tensor: ...
  386. @overload
  387. def _sparse_sum(input: Tensor) -> Tensor: ...
  388. @overload
  389. def _sparse_sum(input: Tensor, *, dtype: _dtype) -> Tensor: ...
  390. @overload
  391. def _sparse_sum(input: Tensor, dim: Union[_int, _size]) -> Tensor: ...
  392. @overload
  393. def _sparse_sum(input: Tensor, dim: Union[_int, _size], *, dtype: _dtype) -> Tensor: ...
  394. def _stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
  395. def _standard_gamma(input: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
  396. def _standard_gamma_grad(input: Tensor, output: Tensor) -> Tensor: ...
  397. def _sync(t: Tensor) -> None: ...
  398. @overload
  399. def _test_autograd_multiple_dispatch(input: Tensor) -> Tensor: ...
  400. @overload
  401. def _test_autograd_multiple_dispatch(input: Tensor, b: _bool) -> Tensor: ...
  402. def _test_autograd_multiple_dispatch_view(input: Tensor) -> Tensor: ...
  403. def _test_autograd_multiple_dispatch_view_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  404. def _test_check_tensor(input: Tensor) -> Tensor: ...
  405. def _test_serialization_subcmul(input: Tensor, other: Tensor, alpha: Number=1) -> Tensor: ...
  406. def _to_cpu(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  407. def _to_functional_tensor(t: Tensor) -> Tensor: ...
  408. def _transform_bias_rescale_qkv(qkv: Tensor, qkv_bias: Tensor, num_heads: _int) -> Tuple[Tensor, Tensor, Tensor]: ...
  409. def _transformer_decoder_only_layer_fwd(src: Tensor, embed_dim: _int, num_heads: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, use_gelu: _bool, norm_first: _bool, eps: _float, norm_weight_1: Tensor, norm_bias_1: Tensor, norm_weight_2: Tensor, norm_bias_2: Tensor, ffn_weight_1: Tensor, ffn_bias_1: Tensor, ffn_weight_2: Tensor, ffn_bias_2: Tensor, mask: Optional[Tensor]=None, incr_key: Optional[Tensor]=None, incr_value: Optional[Tensor]=None) -> Tuple[Tensor, Tensor, Tensor]: ...
  410. def _transformer_encoder_layer_fwd(src: Tensor, embed_dim: _int, num_heads: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, use_gelu: _bool, norm_first: _bool, eps: _float, norm_weight_1: Tensor, norm_bias_1: Tensor, norm_weight_2: Tensor, norm_bias_2: Tensor, ffn_weight_1: Tensor, ffn_bias_1: Tensor, ffn_weight_2: Tensor, ffn_bias_2: Tensor, mask: Optional[Tensor]=None, mask_type: Optional[_int]=None) -> Tensor: ...
  411. def _trilinear(i1: Tensor, i2: Tensor, i3: Tensor, expand1: _size, expand2: _size, expand3: _size, sumdim: _size, unroll_dim: _int=1) -> Tensor: ...
  412. def _triton_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor]=None) -> Tensor: ...
  413. def _triton_scaled_dot_attention(q: Tensor, k: Tensor, v: Tensor, dropout_p: _float=0.0) -> Tensor: ...
  414. def _unique(input: Tensor, sorted: _bool=True, return_inverse: _bool=False) -> Tuple[Tensor, Tensor]: ...
  415. def _unique2(input: Tensor, sorted: _bool=True, return_inverse: _bool=False, return_counts: _bool=False) -> Tuple[Tensor, Tensor, Tensor]: ...
  416. def _unpack_dual(dual: Tensor, level: _int) -> torch.return_types._unpack_dual: ...
  417. @overload
  418. def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int) -> _bool: ...
  419. @overload
  420. def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int) -> _bool: ...
  421. def _use_cudnn_rnn_flatten_weight() -> _bool: ...
  422. def _validate_compressed_sparse_indices(is_crow: _bool, compressed_idx: Tensor, plain_idx: Tensor, cdim: _int, dim: _int, nnz: _int) -> None: ...
  423. def _validate_sparse_bsc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
  424. def _validate_sparse_bsr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
  425. def _validate_sparse_compressed_tensor_args(compressed_indices: Tensor, plain_indices: Tensor, values: Tensor, size: _size, layout: _layout) -> None: ...
  426. def _validate_sparse_coo_tensor_args(indices: Tensor, values: Tensor, size: _size) -> None: ...
  427. def _validate_sparse_csc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
  428. def _validate_sparse_csr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
  429. def _values_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  430. def _weight_norm(v: Tensor, g: Tensor, dim: _int=0) -> Tensor: ...
  431. def _weight_norm_interface(v: Tensor, g: Tensor, dim: _int=0) -> Tuple[Tensor, Tensor]: ...
  432. def abs(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  433. def abs_(input: Tensor) -> Tensor: ...
  434. def absolute(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  435. def acos(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  436. def acos_(input: Tensor) -> Tensor: ...
  437. def acosh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  438. def acosh_(input: Tensor) -> Tensor: ...
  439. def adaptive_avg_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ...
  440. def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
  441. @overload
  442. def add(input: Union[Tensor, Number], other: Union[Tensor, Number], *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ...
  443. @overload
  444. def add(self: Tensor, alpha: Number, other: Tensor) -> Tensor: ...
  445. @overload
  446. def add(self: Tensor, alpha: Number, other: Tensor, *, out: Tensor) -> Tensor: ...
  447. @overload
  448. def addbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor) -> Tensor: ...
  449. @overload
  450. def addbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
  451. @overload
  452. def addbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: ...
  453. @overload
  454. def addbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
  455. @overload
  456. def addbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  457. @overload
  458. def addcdiv(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor) -> Tensor: ...
  459. @overload
  460. def addcdiv(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ...
  461. @overload
  462. def addcdiv(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  463. @overload
  464. def addcmul(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor) -> Tensor: ...
  465. @overload
  466. def addcmul(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ...
  467. @overload
  468. def addcmul(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  469. @overload
  470. def addmm(beta: Number, self: Tensor, alpha: Number, mat1: Tensor, mat2: Tensor) -> Tensor: ...
  471. @overload
  472. def addmm(beta: Number, self: Tensor, alpha: Number, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: ...
  473. @overload
  474. def addmm(beta: Number, self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: ...
  475. @overload
  476. def addmm(beta: Number, self: Tensor, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: ...
  477. @overload
  478. def addmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  479. @overload
  480. def addmv(beta: Number, self: Tensor, alpha: Number, mat: Tensor, vec: Tensor) -> Tensor: ...
  481. @overload
  482. def addmv(beta: Number, self: Tensor, alpha: Number, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: ...
  483. @overload
  484. def addmv(beta: Number, self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ...
  485. @overload
  486. def addmv(beta: Number, self: Tensor, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: ...
  487. @overload
  488. def addmv(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  489. @overload
  490. def addmv_(beta: Number, self: Tensor, alpha: Number, mat: Tensor, vec: Tensor) -> Tensor: ...
  491. @overload
  492. def addmv_(beta: Number, self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ...
  493. @overload
  494. def addmv_(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
  495. @overload
  496. def addr(beta: Number, self: Tensor, alpha: Number, vec1: Tensor, vec2: Tensor) -> Tensor: ...
  497. @overload
  498. def addr(beta: Number, self: Tensor, alpha: Number, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: ...
  499. @overload
  500. def addr(beta: Number, self: Tensor, vec1: Tensor, vec2: Tensor) -> Tensor: ...
  501. @overload
  502. def addr(beta: Number, self: Tensor, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: ...
  503. @overload
  504. def addr(input: Tensor, vec1: Tensor, vec2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  505. def adjoint(input: Tensor) -> Tensor: ...
  506. def affine_grid_generator(theta: Tensor, size: _size, align_corners: _bool) -> Tensor: ...
  507. def alias_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  508. @overload
  509. def all(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  510. @overload
  511. def all(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  512. @overload
  513. def all(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  514. def allclose(input: Tensor, other: Tensor, rtol: _float=1e-05, atol: _float=1e-08, equal_nan: _bool=False) -> _bool: ...
  515. def alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  516. def alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  517. def amax(input: Tensor, dim: Union[_int, _size]=(), keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  518. def amin(input: Tensor, dim: Union[_int, _size]=(), keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  519. def aminmax(input: Tensor, *, dim: Optional[_int]=None, keepdim: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.aminmax: ...
  520. def angle(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  521. @overload
  522. def any(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  523. @overload
  524. def any(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  525. @overload
  526. def any(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  527. @overload
  528. def arange(start: Number, end: Number, step: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
  529. @overload
  530. def arange(start: Number, end: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
  531. @overload
  532. def arange(end: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
  533. @overload
  534. def arange(end: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  535. @overload
  536. def arange(start: Number, end: Number, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  537. @overload
  538. def arange(start: Number, end: Number, step: Number=1, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  539. def arccos(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  540. def arccos_(input: Tensor) -> Tensor: ...
  541. def arccosh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  542. def arccosh_(input: Tensor) -> Tensor: ...
  543. def arcsin(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  544. def arcsin_(input: Tensor) -> Tensor: ...
  545. def arcsinh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  546. def arcsinh_(input: Tensor) -> Tensor: ...
  547. def arctan(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  548. def arctan2(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  549. def arctan_(input: Tensor) -> Tensor: ...
  550. def arctanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  551. def arctanh_(input: Tensor) -> Tensor: ...
  552. def argmax(input: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  553. def argmin(input: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  554. @overload
  555. def argsort(input: Tensor, *, stable: _bool, dim: _int=-1, descending: _bool=False) -> Tensor: ...
  556. @overload
  557. def argsort(input: Tensor, dim: _int=-1, descending: _bool=False) -> Tensor: ...
  558. @overload
  559. def argsort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool=False) -> Tensor: ...
  560. def argwhere(input: Tensor) -> Tensor: ...
  561. def as_strided(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ...
  562. def as_strided_(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ...
  563. def as_strided_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  564. def as_strided_scatter(input: Tensor, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ...
  565. def as_tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None) -> Tensor: ...
  566. def asarray(obj: Any, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, copy: Optional[_bool]=None, requires_grad: _bool=False) -> Tensor: ...
  567. def asin(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  568. def asin_(input: Tensor) -> Tensor: ...
  569. def asinh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  570. def asinh_(input: Tensor) -> Tensor: ...
  571. def atan(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  572. def atan2(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  573. def atan_(input: Tensor) -> Tensor: ...
  574. def atanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  575. def atanh_(input: Tensor) -> Tensor: ...
  576. def avg_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, ceil_mode: _bool=False, count_include_pad: _bool=True) -> Tensor: ...
  577. @overload
  578. def baddbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor) -> Tensor: ...
  579. @overload
  580. def baddbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
  581. @overload
  582. def baddbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: ...
  583. @overload
  584. def baddbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
  585. @overload
  586. def baddbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  587. @overload
  588. def bartlett_window(window_length: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  589. @overload
  590. def bartlett_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  591. def batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
  592. def batch_norm_backward_elemt(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], mean_dy: Tensor, mean_dy_xmu: Tensor, count: Tensor) -> Tensor: ...
  593. def batch_norm_backward_reduce(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], input_g: _bool, weight_g: _bool, bias_g: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  594. def batch_norm_elemt(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, invstd: Tensor, eps: _float, *, out: Optional[Tensor]=None) -> Tensor: ...
  595. def batch_norm_gather_stats(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, count: _int) -> Tuple[Tensor, Tensor]: ...
  596. def batch_norm_gather_stats_with_counts(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, counts: Tensor) -> Tuple[Tensor, Tensor]: ...
  597. def batch_norm_stats(input: Tensor, eps: _float) -> Tuple[Tensor, Tensor]: ...
  598. def batch_norm_update_stats(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float) -> Tuple[Tensor, Tensor]: ...
  599. @overload
  600. def bernoulli(input: Tensor, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
  601. @overload
  602. def bernoulli(input: Tensor, p: _float, *, generator: Optional[Generator]=None) -> Tensor: ...
  603. def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor]=None) -> Tensor: ...
  604. def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor] = None, size_average: Optional[bool] = None, reduce: Optional[bool] = None, reduction: str = ..., pos_weight: Optional[Tensor] = None) -> Tensor: ...
  605. def bincount(input: Tensor, weights: Optional[Tensor]=None, minlength: _int=0) -> Tensor: ...
  606. def binomial(count: Tensor, prob: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
  607. @overload
  608. def bitwise_and(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  609. @overload
  610. def bitwise_and(self: Number, other: Tensor) -> Tensor: ...
  611. @overload
  612. def bitwise_and(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  613. @overload
  614. def bitwise_left_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  615. @overload
  616. def bitwise_left_shift(self: Number, other: Tensor) -> Tensor: ...
  617. @overload
  618. def bitwise_left_shift(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  619. def bitwise_not(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  620. @overload
  621. def bitwise_or(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  622. @overload
  623. def bitwise_or(self: Number, other: Tensor) -> Tensor: ...
  624. @overload
  625. def bitwise_or(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  626. @overload
  627. def bitwise_right_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  628. @overload
  629. def bitwise_right_shift(self: Number, other: Tensor) -> Tensor: ...
  630. @overload
  631. def bitwise_right_shift(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  632. @overload
  633. def bitwise_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  634. @overload
  635. def bitwise_xor(self: Number, other: Tensor) -> Tensor: ...
  636. @overload
  637. def bitwise_xor(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  638. @overload
  639. def blackman_window(window_length: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  640. @overload
  641. def blackman_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  642. def bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  643. def broadcast_to(input: Tensor, size: Sequence[Union[_int, SymInt]]) -> Tensor: ...
  644. @overload
  645. def bucketize(input: Tensor, boundaries: Tensor, *, out_int32: _bool=False, right: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  646. @overload
  647. def bucketize(self: Number, boundaries: Tensor, *, out_int32: _bool=False, right: _bool=False) -> Tensor: ...
  648. def can_cast(from_: _dtype, to: _dtype) -> _bool: ...
  649. @overload
  650. def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
  651. @overload
  652. def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> Tensor: ...
  653. def ccol_indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  654. def ceil(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  655. def ceil_(input: Tensor) -> Tensor: ...
  656. def celu(input: Tensor, alpha: Number=1.0) -> Tensor: ...
  657. def celu_(input: Tensor, alpha: Number=1.0) -> Tensor: ...
  658. def channel_shuffle(input: Tensor, groups: _int) -> Tensor: ...
  659. def cholesky(input: Tensor, upper: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  660. def cholesky_inverse(input: Tensor, upper: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  661. def cholesky_solve(input: Tensor, input2: Tensor, upper: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  662. def choose_qparams_optimized(input: Tensor, numel: _int, n_bins: _int, ratio: _float, bit_width: _int) -> Tuple[Tensor, Tensor]: ...
  663. def chunk(input: Tensor, chunks: _int, dim: _int=0) -> List[Tensor]: ...
  664. @overload
  665. def clamp(input: Tensor, min: Optional[Tensor]=None, max: Optional[Tensor]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  666. @overload
  667. def clamp(input: Tensor, min: Optional[Number]=None, max: Optional[Number]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  668. @overload
  669. def clamp_(input: Tensor, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ...
  670. @overload
  671. def clamp_(input: Tensor, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ...
  672. @overload
  673. def clamp_max(input: Tensor, max: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  674. @overload
  675. def clamp_max(input: Tensor, max: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  676. @overload
  677. def clamp_max_(input: Tensor, max: Tensor) -> Tensor: ...
  678. @overload
  679. def clamp_max_(input: Tensor, max: Number) -> Tensor: ...
  680. @overload
  681. def clamp_min(input: Tensor, min: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  682. @overload
  683. def clamp_min(input: Tensor, min: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  684. @overload
  685. def clamp_min_(input: Tensor, min: Tensor) -> Tensor: ...
  686. @overload
  687. def clamp_min_(input: Tensor, min: Number) -> Tensor: ...
  688. @overload
  689. def clip(input: Tensor, min: Optional[Tensor]=None, max: Optional[Tensor]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  690. @overload
  691. def clip(input: Tensor, min: Optional[Number]=None, max: Optional[Number]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  692. @overload
  693. def clip_(input: Tensor, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ...
  694. @overload
  695. def clip_(input: Tensor, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ...
  696. def clone(input: Tensor, *, memory_format: Optional[memory_format]=None) -> Tensor: ...
  697. def col_indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  698. def column_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
  699. def combinations(input: Tensor, r: _int=2, with_replacement: _bool=False) -> Tensor: ...
  700. def complex(real: Tensor, imag: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  701. @overload
  702. def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
  703. @overload
  704. def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> Tensor: ...
  705. @overload
  706. def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
  707. @overload
  708. def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> Tensor: ...
  709. def conj(input: Tensor) -> Tensor: ...
  710. def conj_physical(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  711. def conj_physical_(input: Tensor) -> Tensor: ...
  712. def constant_pad_nd(input: Tensor, pad: Sequence[Union[_int, SymInt]], value: Number=0) -> Tensor: ...
  713. @overload
  714. def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
  715. @overload
  716. def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
  717. @overload
  718. def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
  719. @overload
  720. def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
  721. @overload
  722. def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
  723. @overload
  724. def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
  725. def conv_tbc(input: Tensor, weight: Tensor, bias: Tensor, pad: _int=0) -> Tensor: ...
  726. def conv_transpose1d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, output_padding: Union[_int, _size]=0, groups: _int=1, dilation: Union[_int, _size]=1) -> Tensor: ...
  727. def conv_transpose2d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, output_padding: Union[_int, _size]=0, groups: _int=1, dilation: Union[_int, _size]=1) -> Tensor: ...
  728. def conv_transpose3d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, output_padding: Union[_int, _size]=0, groups: _int=1, dilation: Union[_int, _size]=1) -> Tensor: ...
  729. def convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: Sequence[Union[_int, SymInt]], dilation: _size, transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: _int) -> Tensor: ...
  730. @overload
  731. def copysign(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  732. @overload
  733. def copysign(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  734. def corrcoef(input: Tensor) -> Tensor: ...
  735. def cos(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  736. def cos_(input: Tensor) -> Tensor: ...
  737. def cosh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  738. def cosh_(input: Tensor) -> Tensor: ...
  739. def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: float = ..., size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
  740. def cosine_similarity(x1: Tensor, x2: Tensor, dim: _int=1, eps: _float=1e-08) -> Tensor: ...
  741. @overload
  742. def count_nonzero(input: Tensor, dim: Optional[_int]=None) -> Tensor: ...
  743. @overload
  744. def count_nonzero(input: Tensor, dim: _size) -> Tensor: ...
  745. def cov(input: Tensor, *, correction: _int=1, fweights: Optional[Tensor]=None, aweights: Optional[Tensor]=None) -> Tensor: ...
  746. def cross(input: Tensor, other: Tensor, dim: Optional[_int]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  747. def crow_indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  748. def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: int = ..., reduction: str = ..., zero_infinity: bool = ...) -> Tensor: ...
  749. def cudnn_affine_grid_generator(theta: Tensor, N: _int, C: _int, H: _int, W: _int) -> Tensor: ...
  750. def cudnn_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  751. def cudnn_convolution(input: Tensor, weight: Tensor, padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ...
  752. def cudnn_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Number], bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, groups: _int) -> Tensor: ...
  753. def cudnn_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, groups: _int) -> Tensor: ...
  754. def cudnn_convolution_transpose(input: Tensor, weight: Tensor, padding: _size, output_padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ...
  755. def cudnn_grid_sampler(input: Tensor, grid: Tensor) -> Tensor: ...
  756. def cudnn_is_acceptable(input: Tensor) -> _bool: ...
  757. @overload
  758. def cummax(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.cummax: ...
  759. @overload
  760. def cummax(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.cummax: ...
  761. @overload
  762. def cummin(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.cummin: ...
  763. @overload
  764. def cummin(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.cummin: ...
  765. @overload
  766. def cumprod(input: Tensor, dim: _int, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  767. @overload
  768. def cumprod(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  769. @overload
  770. def cumsum(input: Tensor, dim: _int, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  771. @overload
  772. def cumsum(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  773. @overload
  774. def cumulative_trapezoid(y: Tensor, x: Tensor, *, dim: _int=-1) -> Tensor: ...
  775. @overload
  776. def cumulative_trapezoid(y: Tensor, *, dx: Number=1, dim: _int=-1) -> Tensor: ...
  777. def deg2rad(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  778. def deg2rad_(input: Tensor) -> Tensor: ...
  779. @overload
  780. def dequantize(input: Tensor) -> Tensor: ...
  781. @overload
  782. def dequantize(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
  783. def det(input: Tensor) -> Tensor: ...
  784. def detach(input: Tensor) -> Tensor: ...
  785. def detach_(input: Tensor) -> Tensor: ...
  786. def detach_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  787. def diag(input: Tensor, diagonal: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
  788. def diag_embed(input: Tensor, offset: _int=0, dim1: _int=-2, dim2: _int=-1) -> Tensor: ...
  789. def diagflat(input: Tensor, offset: _int=0) -> Tensor: ...
  790. @overload
  791. def diagonal(input: Tensor, offset: _int=0, dim1: _int=0, dim2: _int=1) -> Tensor: ...
  792. @overload
  793. def diagonal(input: Tensor, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int=0) -> Tensor: ...
  794. def diagonal_copy(input: Tensor, offset: _int=0, dim1: _int=0, dim2: _int=1, *, out: Optional[Tensor]=None) -> Tensor: ...
  795. def diagonal_scatter(input: Tensor, src: Tensor, offset: _int=0, dim1: _int=0, dim2: _int=1) -> Tensor: ...
  796. def diff(input: Tensor, n: _int=1, dim: _int=-1, prepend: Optional[Tensor]=None, append: Optional[Tensor]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  797. def digamma(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  798. def dist(input: Tensor, other: Tensor, p: Number=2) -> Tensor: ...
  799. def div(input: Union[Tensor, Number], other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None, out: Optional[Tensor]=None) -> Tensor: ...
  800. @overload
  801. def divide(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  802. @overload
  803. def divide(input: Tensor, other: Tensor, *, rounding_mode: Optional[str], out: Optional[Tensor]=None) -> Tensor: ...
  804. @overload
  805. def divide(input: Tensor, other: Number, *, rounding_mode: Optional[str]) -> Tensor: ...
  806. @overload
  807. def divide(input: Tensor, other: Number) -> Tensor: ...
  808. def dot(input: Tensor, tensor: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  809. def dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  810. def dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  811. def dsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
  812. @overload
  813. def dsplit(input: Tensor, sections: _int) -> List[Tensor]: ...
  814. @overload
  815. def dsplit(input: Tensor, indices: _size) -> List[Tensor]: ...
  816. def dstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
  817. def embedding(weight: Tensor, indices: Tensor, padding_idx: Union[_int, SymInt]=-1, scale_grad_by_freq: _bool=False, sparse: _bool=False) -> Tensor: ...
  818. @overload
  819. def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool, mode: _int, sparse: _bool, per_sample_weights: Optional[Tensor], include_last_offset: _bool, padding_idx: Optional[_int]) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  820. @overload
  821. def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  822. def embedding_renorm_(input: Tensor, indices: Tensor, max_norm: _float, norm_type: _float) -> Tensor: ...
  823. @overload
  824. def empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  825. @overload
  826. def empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  827. @overload
  828. def empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  829. @overload
  830. def empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  831. def empty_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  832. def empty_quantized(size: _size, qtensor: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  833. def empty_strided(size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  834. @overload
  835. def eq(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  836. @overload
  837. def eq(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  838. def equal(input: Tensor, other: Tensor) -> _bool: ...
  839. def erf(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  840. def erf_(input: Tensor) -> Tensor: ...
  841. def erfc(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  842. def erfc_(input: Tensor) -> Tensor: ...
  843. def erfinv(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  844. def exp(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  845. def exp2(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  846. def exp2_(input: Tensor) -> Tensor: ...
  847. def exp_(input: Tensor) -> Tensor: ...
  848. def expand_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, implicit: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  849. def expm1(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  850. def expm1_(input: Tensor) -> Tensor: ...
  851. @overload
  852. def eye(n: _int, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  853. @overload
  854. def eye(n: _int, m: _int, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  855. def fake_quantize_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int) -> Tensor: ...
  856. @overload
  857. def fake_quantize_per_tensor_affine(input: Tensor, scale: _float, zero_point: _int, quant_min: _int, quant_max: _int) -> Tensor: ...
  858. @overload
  859. def fake_quantize_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int) -> Tensor: ...
  860. def fbgemm_linear_fp16_weight(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
  861. def fbgemm_linear_fp16_weight_fp32_activation(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
  862. def fbgemm_linear_int8_weight(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Number, weight_zero_point: Number, bias: Tensor) -> Tensor: ...
  863. def fbgemm_linear_int8_weight_fp32_activation(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Number, weight_zero_point: Number, bias: Tensor) -> Tensor: ...
  864. def fbgemm_linear_quantize_weight(input: Tensor) -> Tuple[Tensor, Tensor, _float, _int]: ...
  865. def fbgemm_pack_gemm_matrix_fp16(input: Tensor) -> Tensor: ...
  866. @overload
  867. def fbgemm_pack_quantized_matrix(input: Tensor) -> Tensor: ...
  868. @overload
  869. def fbgemm_pack_quantized_matrix(input: Tensor, K: _int, N: _int) -> Tensor: ...
  870. def feature_alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  871. def feature_alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  872. def feature_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  873. def feature_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  874. @overload
  875. def fill(input: Tensor, value: Tensor) -> Tensor: ...
  876. @overload
  877. def fill(input: Tensor, value: Number) -> Tensor: ...
  878. @overload
  879. def fill_(input: Tensor, value: Tensor) -> Tensor: ...
  880. @overload
  881. def fill_(input: Tensor, value: Number) -> Tensor: ...
  882. def fix(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  883. def fix_(input: Tensor) -> Tensor: ...
  884. @overload
  885. def flatten(input: Tensor, start_dim: _int=0, end_dim: _int=-1) -> Tensor: ...
  886. @overload
  887. def flatten(input: Tensor, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor: ...
  888. @overload
  889. def flatten(input: Tensor, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor: ...
  890. @overload
  891. def flatten(input: Tensor, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor: ...
  892. def flip(input: Tensor, dims: _size) -> Tensor: ...
  893. def fliplr(input: Tensor) -> Tensor: ...
  894. def flipud(input: Tensor) -> Tensor: ...
  895. @overload
  896. def float_power(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  897. @overload
  898. def float_power(self: Number, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  899. @overload
  900. def float_power(input: Tensor, exponent: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  901. def floor(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  902. def floor_(input: Tensor) -> Tensor: ...
  903. def floor_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor]=None) -> Tensor: ...
  904. def fmax(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  905. def fmin(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  906. @overload
  907. def fmod(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  908. @overload
  909. def fmod(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  910. def frac(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  911. def frac_(input: Tensor) -> Tensor: ...
  912. def frexp(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.frexp: ...
  913. def frobenius_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  914. def from_file(filename: str, shared: Optional[_bool]=None, size: Optional[_int]=0, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  915. def from_numpy(ndarray) -> Tensor: ...
  916. def frombuffer(buffer: Any, *, dtype: _dtype, count: int=-1, offset: int=0, device: Union[_device, str, None]=None, requires_grad: _bool=False) -> Tensor: ...
  917. @overload
  918. def full(size: _size, fill_value: Number, *, out: Optional[Tensor]=None, layout: _layout=strided, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
  919. @overload
  920. def full(size: _size, fill_value: Number, *, names: List[Union[str, None]], layout: _layout=strided, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
  921. @overload
  922. def full(size: Sequence[Union[_int, SymInt]], fill_value: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  923. @overload
  924. def full(size: _size, fill_value: Number, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  925. def full_like(input: Tensor, fill_value: Number, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  926. def fused_moving_avg_obs_fake_quant(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool=False, symmetric_quant: _bool=False) -> Tensor: ...
  927. @overload
  928. def gather(input: Tensor, dim: _int, index: Tensor, *, sparse_grad: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  929. @overload
  930. def gather(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  931. def gcd(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  932. def gcd_(input: Tensor, other: Tensor) -> Tensor: ...
  933. @overload
  934. def ge(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  935. @overload
  936. def ge(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  937. def geqrf(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.geqrf: ...
  938. def ger(input: Tensor, vec2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  939. def get_default_dtype() -> _dtype: ...
  940. def get_num_interop_threads() -> _int: ...
  941. def get_num_threads() -> _int: ...
  942. @overload
  943. def gradient(input: Tensor, *, spacing: Optional[Number]=None, dim: Optional[_int]=None, edge_order: _int=1) -> List[Tensor]: ...
  944. @overload
  945. def gradient(input: Tensor, *, spacing: Sequence[Number], dim: Optional[_int]=None, edge_order: _int=1) -> List[Tensor]: ...
  946. @overload
  947. def gradient(input: Tensor, *, spacing: Number, dim: _size, edge_order: _int=1) -> List[Tensor]: ...
  948. @overload
  949. def gradient(input: Tensor, *, spacing: Sequence[Number], dim: _size, edge_order: _int=1) -> List[Tensor]: ...
  950. @overload
  951. def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: Optional[_int]=None, edge_order: _int=1) -> List[Tensor]: ...
  952. @overload
  953. def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: _size, edge_order: _int=1) -> List[Tensor]: ...
  954. @overload
  955. def gradient(input: Tensor, *, dim: _size, edge_order: _int=1) -> List[Tensor]: ...
  956. @overload
  957. def greater(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  958. @overload
  959. def greater(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  960. @overload
  961. def greater_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  962. @overload
  963. def greater_equal(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  964. def grid_sampler(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
  965. def grid_sampler_2d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
  966. def grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
  967. def group_norm(input: Tensor, num_groups: _int, weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, eps: _float=1e-05, cudnn_enabled: _bool=True) -> Tensor: ...
  968. @overload
  969. def gru(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
  970. @overload
  971. def gru(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
  972. def gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tensor: ...
  973. @overload
  974. def gt(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  975. @overload
  976. def gt(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  977. @overload
  978. def hamming_window(window_length: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  979. @overload
  980. def hamming_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  981. @overload
  982. def hamming_window(window_length: _int, periodic: _bool, alpha: _float, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  983. @overload
  984. def hamming_window(window_length: _int, periodic: _bool, alpha: _float, beta: _float, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  985. @overload
  986. def hann_window(window_length: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  987. @overload
  988. def hann_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  989. def hardshrink(input: Tensor, lambd: Number=0.5, *, out: Optional[Tensor]=None) -> Tensor: ...
  990. def heaviside(input: Tensor, values: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  991. def hinge_embedding_loss(input: Tensor, target: Tensor, margin: float = ..., size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
  992. def histc(input: Tensor, bins: _int=100, min: Number=0, max: Number=0, *, out: Optional[Tensor]=None) -> Tensor: ...
  993. @overload
  994. def histogram(input: Tensor, bins: Tensor, *, weight: Optional[Tensor]=None, density: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.histogram: ...
  995. @overload
  996. def histogram(input: Tensor, bins: _int=100, *, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.histogram: ...
  997. @overload
  998. def histogramdd(input: Tensor, bins: _int, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> torch.return_types.histogramdd: ...
  999. @overload
  1000. def histogramdd(input: Tensor, bins: _size, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> torch.return_types.histogramdd: ...
  1001. @overload
  1002. def histogramdd(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> torch.return_types.histogramdd: ...
  1003. def hsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
  1004. @overload
  1005. def hsplit(input: Tensor, sections: _int) -> List[Tensor]: ...
  1006. @overload
  1007. def hsplit(input: Tensor, indices: _size) -> List[Tensor]: ...
  1008. def hspmm(mat1: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1009. def hstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
  1010. def hypot(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1011. def i0(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1012. def i0_(input: Tensor) -> Tensor: ...
  1013. def igamma(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1014. def igammac(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1015. def imag(input: Tensor) -> Tensor: ...
  1016. @overload
  1017. def index_add(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  1018. @overload
  1019. def index_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Number=1) -> Tensor: ...
  1020. @overload
  1021. def index_copy(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1022. @overload
  1023. def index_copy(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ...
  1024. @overload
  1025. def index_fill(input: Tensor, dim: _int, index: Tensor, value: Tensor) -> Tensor: ...
  1026. @overload
  1027. def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ...
  1028. @overload
  1029. def index_fill(input: Tensor, dim: _int, index: Tensor, value: Number) -> Tensor: ...
  1030. @overload
  1031. def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ...
  1032. def index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False) -> Tensor: ...
  1033. def index_put_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False) -> Tensor: ...
  1034. def index_reduce(input: Tensor, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool=True, out: Optional[Tensor]=None) -> Tensor: ...
  1035. @overload
  1036. def index_select(input: Tensor, dim: _int, index: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1037. @overload
  1038. def index_select(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1039. def indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1040. def init_num_threads() -> None: ...
  1041. def inner(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1042. def instance_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], use_input_stats: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
  1043. def int_repr(input: Tensor) -> Tensor: ...
  1044. def inverse(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1045. def is_complex(input: Tensor) -> _bool: ...
  1046. def is_conj(input: Tensor) -> _bool: ...
  1047. def is_distributed(input: Tensor) -> _bool: ...
  1048. def is_floating_point(input: Tensor) -> _bool: ...
  1049. def is_grad_enabled() -> _bool: ...
  1050. def is_inference(input: Tensor) -> _bool: ...
  1051. def is_inference_mode_enabled() -> _bool: ...
  1052. def is_neg(input: Tensor) -> _bool: ...
  1053. def is_nonzero(input: Tensor) -> _bool: ...
  1054. def is_same_size(input: Tensor, other: Tensor) -> _bool: ...
  1055. def is_signed(input: Tensor) -> _bool: ...
  1056. def is_vulkan_available() -> _bool: ...
  1057. def isclose(input: Tensor, other: Tensor, rtol: _float=1e-05, atol: _float=1e-08, equal_nan: _bool=False) -> Tensor: ...
  1058. def isfinite(input: Tensor) -> Tensor: ...
  1059. @overload
  1060. def isin(elements: Tensor, test_elements: Tensor, *, assume_unique: _bool=False, invert: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  1061. @overload
  1062. def isin(element: Number, test_elements: Tensor, *, assume_unique: _bool=False, invert: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  1063. @overload
  1064. def isin(elements: Tensor, test_element: Number, *, assume_unique: _bool=False, invert: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  1065. def isinf(input: Tensor) -> Tensor: ...
  1066. def isnan(input: Tensor) -> Tensor: ...
  1067. def isneginf(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1068. def isposinf(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1069. def isreal(input: Tensor) -> Tensor: ...
  1070. def istft(input: Tensor, n_fft: _int, hop_length: Optional[_int]=None, win_length: Optional[_int]=None, window: Optional[Tensor]=None, center: _bool=True, normalized: _bool=False, onesided: Optional[_bool]=None, length: Optional[_int]=None, return_complex: _bool=False) -> Tensor: ...
  1071. @overload
  1072. def kaiser_window(window_length: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1073. @overload
  1074. def kaiser_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1075. @overload
  1076. def kaiser_window(window_length: _int, periodic: _bool, beta: _float, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1077. def kl_div(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., reduction: str = ..., log_target: bool = ...) -> Tensor: ...
  1078. def kron(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1079. @overload
  1080. def kthvalue(input: Tensor, k: _int, dim: _int=-1, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.kthvalue: ...
  1081. @overload
  1082. def kthvalue(input: Tensor, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.kthvalue: ...
  1083. def layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, eps: _float=1e-05, cudnn_enable: _bool=True) -> Tensor: ...
  1084. def lcm(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1085. def lcm_(input: Tensor, other: Tensor) -> Tensor: ...
  1086. def ldexp(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1087. def ldexp_(input: Tensor, other: Tensor) -> Tensor: ...
  1088. @overload
  1089. def le(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1090. @overload
  1091. def le(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1092. @overload
  1093. def lerp(input: Tensor, end: Tensor, weight: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1094. @overload
  1095. def lerp(input: Tensor, end: Tensor, weight: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1096. @overload
  1097. def less(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1098. @overload
  1099. def less(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1100. @overload
  1101. def less_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1102. @overload
  1103. def less_equal(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1104. def lgamma(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1105. @overload
  1106. def linspace(start: Number, end: Number, steps: Optional[_int]=None, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
  1107. @overload
  1108. def linspace(start: Number, end: Number, steps: _int, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1109. def log(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1110. def log10(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1111. def log10_(input: Tensor) -> Tensor: ...
  1112. def log1p(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1113. def log1p_(input: Tensor) -> Tensor: ...
  1114. def log2(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1115. def log2_(input: Tensor) -> Tensor: ...
  1116. def log_(input: Tensor) -> Tensor: ...
  1117. @overload
  1118. def log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  1119. @overload
  1120. def log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
  1121. def logaddexp(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1122. def logaddexp2(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1123. @overload
  1124. def logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
  1125. @overload
  1126. def logcumsumexp(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> Tensor: ...
  1127. def logdet(input: Tensor) -> Tensor: ...
  1128. def logical_and(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1129. def logical_not(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1130. def logical_or(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1131. def logical_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1132. def logit(input: Tensor, eps: Optional[_float]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  1133. def logit_(input: Tensor, eps: Optional[_float]=None) -> Tensor: ...
  1134. @overload
  1135. def logspace(start: Number, end: Number, steps: Optional[_int]=None, base: _float=10.0, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
  1136. @overload
  1137. def logspace(start: Number, end: Number, steps: _int, base: _float=10.0, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1138. @overload
  1139. def logsumexp(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  1140. @overload
  1141. def logsumexp(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  1142. @overload
  1143. def lstm(data: Tensor, batch_sizes: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
  1144. @overload
  1145. def lstm(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
  1146. def lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tuple[Tensor, Tensor]: ...
  1147. @overload
  1148. def lt(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1149. @overload
  1150. def lt(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1151. def lu_solve(input: Tensor, LU_data: Tensor, LU_pivots: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1152. def lu_unpack(LU_data: Tensor, LU_pivots: Tensor, unpack_data: _bool=True, unpack_pivots: _bool=True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.lu_unpack: ...
  1153. def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: float = ..., size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
  1154. @overload
  1155. def masked_fill(input: Tensor, mask: Tensor, value: Tensor) -> Tensor: ...
  1156. @overload
  1157. def masked_fill(input: Tensor, mask: Tensor, value: Number) -> Tensor: ...
  1158. def masked_scatter(input: Tensor, mask: Tensor, source: Tensor) -> Tensor: ...
  1159. def masked_select(input: Tensor, mask: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1160. def matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1161. def matrix_exp(input: Tensor) -> Tensor: ...
  1162. def matrix_power(input: Tensor, n: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
  1163. @overload
  1164. def max(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1165. @overload
  1166. def max(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1167. @overload
  1168. def max(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.max: ...
  1169. @overload
  1170. def max(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.max: ...
  1171. def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
  1172. def max_pool1d_with_indices(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tuple[Tensor, Tensor]: ...
  1173. def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
  1174. def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
  1175. def maximum(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1176. @overload
  1177. def mean(input: Tensor, *, dtype: Optional[_dtype]=None) -> Tensor: ...
  1178. @overload
  1179. def mean(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1180. @overload
  1181. def mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1182. @overload
  1183. def median(input: Tensor) -> Tensor: ...
  1184. @overload
  1185. def median(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.median: ...
  1186. @overload
  1187. def median(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.median: ...
  1188. @overload
  1189. def min(input: Tensor) -> Tensor: ...
  1190. @overload
  1191. def min(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1192. @overload
  1193. def min(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.min: ...
  1194. @overload
  1195. def min(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.min: ...
  1196. def minimum(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1197. def miopen_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
  1198. def miopen_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
  1199. def miopen_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Number], bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, groups: _int) -> Tensor: ...
  1200. def miopen_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, groups: _int) -> Tensor: ...
  1201. def miopen_convolution_transpose(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
  1202. def miopen_depthwise_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
  1203. def miopen_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
  1204. def mkldnn_adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size], *, out: Optional[Tensor]=None) -> Tensor: ...
  1205. def mkldnn_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: _size, dilation: _size, groups: _int) -> Tensor: ...
  1206. def mkldnn_linear_backward_weights(grad_output: Tensor, input: Tensor, weight: Tensor, bias_defined: _bool) -> Tuple[Tensor, Tensor]: ...
  1207. def mkldnn_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
  1208. def mkldnn_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
  1209. def mkldnn_rnn_layer(input: Tensor, weight0: Tensor, weight1: Tensor, weight2: Tensor, weight3: Tensor, hx_: Tensor, cx_: Tensor, reverse: _bool, batch_sizes: _size, mode: _int, hidden_size: _int, num_layers: _int, has_biases: _bool, bidirectional: _bool, batch_first: _bool, train: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  1210. def mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1211. @overload
  1212. def mode(input: Tensor, dim: _int=-1, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.mode: ...
  1213. @overload
  1214. def mode(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.mode: ...
  1215. @overload
  1216. def moveaxis(input: Tensor, source: _int, destination: _int) -> Tensor: ...
  1217. @overload
  1218. def moveaxis(input: Tensor, source: _size, destination: _size) -> Tensor: ...
  1219. @overload
  1220. def movedim(input: Tensor, source: _int, destination: _int) -> Tensor: ...
  1221. @overload
  1222. def movedim(input: Tensor, source: _size, destination: _size) -> Tensor: ...
  1223. def msort(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1224. def mul(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor]=None) -> Tensor: ...
  1225. def multinomial(input: Tensor, num_samples: _int, replacement: _bool=False, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1226. @overload
  1227. def multiply(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1228. @overload
  1229. def multiply(input: Tensor, other: Number) -> Tensor: ...
  1230. def mv(input: Tensor, vec: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1231. def mvlgamma(input: Tensor, p: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
  1232. def nan_to_num(input: Tensor, nan: Optional[_float]=None, posinf: Optional[_float]=None, neginf: Optional[_float]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  1233. def nan_to_num_(input: Tensor, nan: Optional[_float]=None, posinf: Optional[_float]=None, neginf: Optional[_float]=None) -> Tensor: ...
  1234. def nanmean(input: Tensor, dim: Optional[Union[_int, _size]]=None, keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1235. @overload
  1236. def nanmedian(input: Tensor) -> Tensor: ...
  1237. @overload
  1238. def nanmedian(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.nanmedian: ...
  1239. @overload
  1240. def nanmedian(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.nanmedian: ...
  1241. @overload
  1242. def nanquantile(input: Tensor, q: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear", out: Optional[Tensor]=None) -> Tensor: ...
  1243. @overload
  1244. def nanquantile(input: Tensor, q: _float, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear", out: Optional[Tensor]=None) -> Tensor: ...
  1245. def nansum(input: Tensor, dim: Optional[Union[_int, _size]]=None, keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1246. @overload
  1247. def narrow(input: Tensor, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor: ...
  1248. @overload
  1249. def narrow(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: ...
  1250. def narrow_copy(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt], *, out: Optional[Tensor]=None) -> Tensor: ...
  1251. def native_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> Tuple[Tensor, Tensor, Tensor]: ...
  1252. def native_channel_shuffle(input: Tensor, groups: _int) -> Tensor: ...
  1253. def native_dropout(input: Tensor, p: _float, train: Optional[_bool]) -> Tuple[Tensor, Tensor]: ...
  1254. def native_group_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], N: Union[_int, SymInt], C: Union[_int, SymInt], HxW: Union[_int, SymInt], group: _int, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
  1255. def native_layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor], bias: Optional[Tensor], eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
  1256. @overload
  1257. def native_norm(input: Tensor, p: Optional[Number], dim: Union[_int, _size], keepdim: _bool, dtype: Optional[_dtype]) -> Tensor: ...
  1258. @overload
  1259. def native_norm(input: Tensor, p: Number=2) -> Tensor: ...
  1260. @overload
  1261. def ne(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1262. @overload
  1263. def ne(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1264. def neg(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1265. def neg_(input: Tensor) -> Tensor: ...
  1266. def negative(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1267. def negative_(input: Tensor) -> Tensor: ...
  1268. def nextafter(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1269. @overload
  1270. def nonzero(input: Tensor, *, as_tuple: Literal[False]=False, out: Optional[Tensor]=None) -> Tensor: ...
  1271. @overload
  1272. def nonzero(input: Tensor, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: ...
  1273. def norm_except_dim(v: Tensor, pow: _int=2, dim: _int=0) -> Tensor: ...
  1274. @overload
  1275. def normal(mean: Tensor, std: Tensor, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1276. @overload
  1277. def normal(mean: Tensor, std: _float=1, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1278. @overload
  1279. def normal(mean: _float, std: Tensor, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1280. @overload
  1281. def normal(mean: _float, std: _float, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1282. @overload
  1283. def not_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1284. @overload
  1285. def not_equal(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1286. @overload
  1287. def nuclear_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  1288. @overload
  1289. def nuclear_norm(input: Tensor, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  1290. def numel(self: Tensor) -> _int: ...
  1291. @overload
  1292. def ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1293. @overload
  1294. def ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1295. @overload
  1296. def ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1297. @overload
  1298. def ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1299. def ones_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1300. def orgqr(input: Tensor, input2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1301. def ormqr(input: Tensor, input2: Tensor, input3: Tensor, left: _bool=True, transpose: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  1302. def outer(input: Tensor, vec2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1303. def pairwise_distance(x1: Tensor, x2: Tensor, p: _float=2, eps: _float=1e-06, keepdim: _bool=False) -> Tensor: ...
  1304. def pdist(input: Tensor, p: _float=2) -> Tensor: ...
  1305. def permute(input: Tensor, dims: _size) -> Tensor: ...
  1306. def permute_copy(input: Tensor, dims: _size, *, out: Optional[Tensor]=None) -> Tensor: ...
  1307. def pinverse(input: Tensor, rcond: _float=1e-15) -> Tensor: ...
  1308. def pixel_shuffle(input: Tensor, upscale_factor: _int) -> Tensor: ...
  1309. def pixel_unshuffle(input: Tensor, downscale_factor: _int) -> Tensor: ...
  1310. def poisson(input: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
  1311. def poisson_nll_loss(input: Tensor, target: Tensor, log_input: _bool, full: _bool, eps: _float, reduction: _int) -> Tensor: ...
  1312. def polar(abs: Tensor, angle: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1313. def polygamma(n: _int, input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1314. def positive(input: Tensor) -> Tensor: ...
  1315. @overload
  1316. def pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1317. @overload
  1318. def pow(self: Number, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1319. @overload
  1320. def pow(input: Tensor, exponent: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1321. def prelu(input: Tensor, weight: Tensor) -> Tensor: ...
  1322. @overload
  1323. def prod(input: Tensor, *, dtype: Optional[_dtype]=None) -> Tensor: ...
  1324. @overload
  1325. def prod(input: Tensor, dim: _int, keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1326. @overload
  1327. def prod(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1328. def promote_types(type1: _dtype, type2: _dtype) -> _dtype: ...
  1329. def put(input: Tensor, index: Tensor, source: Tensor, accumulate: _bool=False) -> Tensor: ...
  1330. def q_per_channel_axis(input: Tensor) -> _int: ...
  1331. def q_per_channel_scales(input: Tensor) -> Tensor: ...
  1332. def q_per_channel_zero_points(input: Tensor) -> Tensor: ...
  1333. def q_scale(input: Tensor) -> _float: ...
  1334. def q_zero_point(input: Tensor) -> _int: ...
  1335. def qr(input: Tensor, some: _bool=True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.qr: ...
  1336. @overload
  1337. def quantile(input: Tensor, q: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear", out: Optional[Tensor]=None) -> Tensor: ...
  1338. @overload
  1339. def quantile(input: Tensor, q: _float, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear", out: Optional[Tensor]=None) -> Tensor: ...
  1340. def quantize_per_channel(input: Tensor, scales: Tensor, zero_points: Tensor, axis: _int, dtype: _dtype) -> Tensor: ...
  1341. @overload
  1342. def quantize_per_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, dtype: _dtype) -> Tensor: ...
  1343. @overload
  1344. def quantize_per_tensor(input: Tensor, scale: _float, zero_point: _int, dtype: _dtype) -> Tensor: ...
  1345. @overload
  1346. def quantize_per_tensor(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scales: Tensor, zero_points: Tensor, dtype: _dtype) -> List[Tensor]: ...
  1347. def quantize_per_tensor_dynamic(input: Tensor, dtype: _dtype, reduce_range: _bool) -> Tensor: ...
  1348. def quantized_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, var: Tensor, eps: _float, output_scale: _float, output_zero_point: _int) -> Tensor: ...
  1349. def quantized_gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tensor: ...
  1350. def quantized_lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tuple[Tensor, Tensor]: ...
  1351. def quantized_max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
  1352. def quantized_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
  1353. def quantized_rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tensor: ...
  1354. def quantized_rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tensor: ...
  1355. def rad2deg(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1356. def rad2deg_(input: Tensor) -> Tensor: ...
  1357. @overload
  1358. def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1359. @overload
  1360. def rand(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1361. @overload
  1362. def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1363. @overload
  1364. def rand(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1365. @overload
  1366. def rand(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1367. @overload
  1368. def rand(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1369. @overload
  1370. def rand(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1371. @overload
  1372. def rand(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1373. def rand_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1374. @overload
  1375. def randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
  1376. @overload
  1377. def randint(high: _int, size: _size, *, generator: Optional[Generator]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
  1378. @overload
  1379. def randint(high: _int, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1380. @overload
  1381. def randint(high: _int, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1382. @overload
  1383. def randint(low: _int, high: _int, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1384. @overload
  1385. def randint(low: _int, high: _int, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1386. @overload
  1387. def randint_like(input: Tensor, high: _int, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1388. @overload
  1389. def randint_like(input: Tensor, low: _int, high: _int, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1390. @overload
  1391. def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1392. @overload
  1393. def randn(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1394. @overload
  1395. def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1396. @overload
  1397. def randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1398. @overload
  1399. def randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1400. @overload
  1401. def randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1402. @overload
  1403. def randn(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1404. @overload
  1405. def randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1406. def randn_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1407. @overload
  1408. def randperm(n: _int, *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1409. @overload
  1410. def randperm(n: _int, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1411. def range(start: Number, end: Number, step: Number=1, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
  1412. def ravel(input: Tensor) -> Tensor: ...
  1413. def real(input: Tensor) -> Tensor: ...
  1414. def reciprocal(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1415. def reciprocal_(input: Tensor) -> Tensor: ...
  1416. def relu(input: Tensor) -> Tensor: ...
  1417. def relu_(input: Tensor) -> Tensor: ...
  1418. @overload
  1419. def remainder(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1420. @overload
  1421. def remainder(self: Number, other: Tensor) -> Tensor: ...
  1422. @overload
  1423. def remainder(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1424. def renorm(input: Tensor, p: Number, dim: _int, maxnorm: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1425. @overload
  1426. def repeat_interleave(input: Tensor, repeats: Tensor, dim: Optional[_int]=None, *, output_size: Optional[_int]=None) -> Tensor: ...
  1427. @overload
  1428. def repeat_interleave(repeats: Tensor, *, output_size: Optional[_int]=None) -> Tensor: ...
  1429. @overload
  1430. def repeat_interleave(input: Tensor, repeats: Union[_int, SymInt], dim: Optional[_int]=None, *, output_size: Optional[_int]=None) -> Tensor: ...
  1431. def reshape(input: Tensor, shape: Sequence[Union[_int, SymInt]]) -> Tensor: ...
  1432. def resize_as_(input: Tensor, the_template: Tensor, *, memory_format: Optional[memory_format]=None) -> Tensor: ...
  1433. def resize_as_sparse_(input: Tensor, the_template: Tensor) -> Tensor: ...
  1434. def resolve_conj(input: Tensor) -> Tensor: ...
  1435. def resolve_neg(input: Tensor) -> Tensor: ...
  1436. @overload
  1437. def result_type(tensor: Tensor, other: Tensor) -> _dtype: ...
  1438. @overload
  1439. def result_type(scalar: Number, tensor: Tensor) -> _dtype: ...
  1440. @overload
  1441. def result_type(tensor: Tensor, other: Number) -> _dtype: ...
  1442. @overload
  1443. def result_type(scalar1: Number, scalar2: Number) -> _dtype: ...
  1444. @overload
  1445. def rnn_relu(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
  1446. @overload
  1447. def rnn_relu(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
  1448. def rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tensor: ...
  1449. @overload
  1450. def rnn_tanh(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
  1451. @overload
  1452. def rnn_tanh(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
  1453. def rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tensor: ...
  1454. def roll(input: Tensor, shifts: Union[_int, _size], dims: Union[_int, _size]=()) -> Tensor: ...
  1455. def rot90(input: Tensor, k: _int=1, dims: _size=(0,1)) -> Tensor: ...
  1456. @overload
  1457. def round(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1458. @overload
  1459. def round(input: Tensor, *, decimals: _int, out: Optional[Tensor]=None) -> Tensor: ...
  1460. @overload
  1461. def round_(input: Tensor) -> Tensor: ...
  1462. @overload
  1463. def round_(input: Tensor, *, decimals: _int) -> Tensor: ...
  1464. def row_indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1465. def row_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
  1466. def rrelu(input: Tensor, lower: Number=0.125, upper: Number=0.3333333333333333, training: _bool=False, generator: Optional[Generator]=None) -> Tensor: ...
  1467. def rrelu_(input: Tensor, lower: Number=0.125, upper: Number=0.3333333333333333, training: _bool=False, generator: Optional[Generator]=None) -> Tensor: ...
  1468. def rsqrt(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1469. def rsqrt_(input: Tensor) -> Tensor: ...
  1470. @overload
  1471. def rsub(input: Tensor, other: Tensor, *, alpha: Number=1) -> Tensor: ...
  1472. @overload
  1473. def rsub(input: Tensor, other: Number, alpha: Number=1) -> Tensor: ...
  1474. def saddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  1475. def scalar_tensor(s: Number, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1476. @overload
  1477. def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1478. @overload
  1479. def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, reduce: str, out: Optional[Tensor]=None) -> Tensor: ...
  1480. @overload
  1481. def scatter(input: Tensor, dim: _int, index: Tensor, value: Number, *, reduce: str, out: Optional[Tensor]=None) -> Tensor: ...
  1482. @overload
  1483. def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ...
  1484. @overload
  1485. def scatter(input: Tensor, dim: _int, index: Tensor, value: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1486. @overload
  1487. def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ...
  1488. @overload
  1489. def scatter_add(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1490. @overload
  1491. def scatter_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ...
  1492. def scatter_reduce(input: Tensor, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool=True, out: Optional[Tensor]=None) -> Tensor: ...
  1493. @overload
  1494. def searchsorted(sorted_sequence: Tensor, input: Tensor, *, out_int32: _bool=False, right: _bool=False, side: Optional[str]=None, sorter: Optional[Tensor]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1495. @overload
  1496. def searchsorted(sorted_sequence: Tensor, self: Number, *, out_int32: _bool=False, right: _bool=False, side: Optional[str]=None, sorter: Optional[Tensor]=None) -> Tensor: ...
  1497. def segment_reduce(data: Tensor, reduce: str, *, lengths: Optional[Tensor]=None, indices: Optional[Tensor]=None, offsets: Optional[Tensor]=None, axis: _int=0, unsafe: _bool=False, initial: Optional[Number]=None) -> Tensor: ...
  1498. @overload
  1499. def select(input: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: ...
  1500. @overload
  1501. def select(input: Tensor, dim: Union[str, ellipsis, None], index: _int) -> Tensor: ...
  1502. def select_copy(input: Tensor, dim: _int, index: Union[_int, SymInt], *, out: Optional[Tensor]=None) -> Tensor: ...
  1503. def select_scatter(input: Tensor, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: ...
  1504. def selu(input: Tensor) -> Tensor: ...
  1505. def selu_(input: Tensor) -> Tensor: ...
  1506. def set_flush_denormal(mode: _bool) -> _bool: ...
  1507. def set_num_interop_threads(num: _int) -> None: ...
  1508. def set_num_threads(num: _int) -> None: ...
  1509. def sgn(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1510. def sigmoid(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1511. def sigmoid_(input: Tensor) -> Tensor: ...
  1512. def sign(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1513. def signbit(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1514. def sin(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1515. def sin_(input: Tensor) -> Tensor: ...
  1516. def sinc(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1517. def sinc_(input: Tensor) -> Tensor: ...
  1518. def sinh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1519. def sinh_(input: Tensor) -> Tensor: ...
  1520. def slice_copy(input: Tensor, dim: _int=0, start: Optional[Union[_int, SymInt]]=None, end: Optional[Union[_int, SymInt]]=None, step: Union[_int, SymInt]=1, *, out: Optional[Tensor]=None) -> Tensor: ...
  1521. def slice_scatter(input: Tensor, src: Tensor, dim: _int=0, start: Optional[Union[_int, SymInt]]=None, end: Optional[Union[_int, SymInt]]=None, step: Union[_int, SymInt]=1) -> Tensor: ...
  1522. def slogdet(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.slogdet: ...
  1523. def smm(input: Tensor, mat2: Tensor) -> Tensor: ...
  1524. @overload
  1525. def softmax(input: Tensor, dim: _int, dtype: Optional[_dtype]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  1526. @overload
  1527. def softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
  1528. @overload
  1529. def sort(input: Tensor, *, stable: Optional[_bool], dim: _int=-1, descending: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.sort: ...
  1530. @overload
  1531. def sort(input: Tensor, dim: _int=-1, descending: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.sort: ...
  1532. @overload
  1533. def sort(input: Tensor, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.sort: ...
  1534. @overload
  1535. def sort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.sort: ...
  1536. def sparse_bsc_tensor(ccol_indices: Union[Tensor, List],row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
  1537. def sparse_bsr_tensor(crow_indices: Union[Tensor, List],col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
  1538. def sparse_compressed_tensor(compressed_indices: Union[Tensor, List],plain_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, layout: Optional[_layout] = None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
  1539. def sparse_coo_tensor(indices: Tensor, values: Union[Tensor,List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
  1540. def sparse_csc_tensor(ccol_indices: Union[Tensor, List],row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
  1541. def sparse_csr_tensor(crow_indices: Union[Tensor, List],col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
  1542. def split_copy(input: Tensor, split_size: Union[_int, SymInt], dim: _int=0, *, out: Union[Tuple[Tensor, ...], List[Tensor]]=None) -> None: ...
  1543. def split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int=0) -> List[Tensor]: ...
  1544. def split_with_sizes_copy(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int=0, *, out: Union[Tuple[Tensor, ...], List[Tensor]]=None) -> None: ...
  1545. def spmm(input: Tensor, mat2: Tensor) -> Tensor: ...
  1546. def sqrt(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1547. def sqrt_(input: Tensor) -> Tensor: ...
  1548. def square(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1549. def square_(input: Tensor) -> Tensor: ...
  1550. @overload
  1551. def squeeze(input: Tensor) -> Tensor: ...
  1552. @overload
  1553. def squeeze(input: Tensor, dim: _int) -> Tensor: ...
  1554. @overload
  1555. def squeeze(input: Tensor, dim: _size) -> Tensor: ...
  1556. @overload
  1557. def squeeze(input: Tensor, dim: Union[str, ellipsis, None]) -> Tensor: ...
  1558. @overload
  1559. def squeeze_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1560. @overload
  1561. def squeeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
  1562. @overload
  1563. def squeeze_copy(input: Tensor, dim: _size, *, out: Optional[Tensor]=None) -> Tensor: ...
  1564. @overload
  1565. def sspaddmm(beta: Number, self: Tensor, alpha: Number, mat1: Tensor, mat2: Tensor) -> Tensor: ...
  1566. @overload
  1567. def sspaddmm(beta: Number, self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: ...
  1568. @overload
  1569. def sspaddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  1570. def stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
  1571. @overload
  1572. def std(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  1573. @overload
  1574. def std(input: Tensor, dim: Optional[Union[_int, _size]]=None, *, correction: Optional[_int]=None, keepdim: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  1575. @overload
  1576. def std(input: Tensor, unbiased: _bool=True) -> Tensor: ...
  1577. @overload
  1578. def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int]=None, keepdim: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  1579. @overload
  1580. def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  1581. @overload
  1582. def std_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
  1583. @overload
  1584. def std_mean(input: Tensor, dim: Optional[Union[_int, _size]]=None, *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
  1585. @overload
  1586. def std_mean(input: Tensor, unbiased: _bool=True) -> Tuple[Tensor, Tensor]: ...
  1587. @overload
  1588. def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
  1589. @overload
  1590. def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
  1591. @overload
  1592. def sub(input: Union[Tensor, Number], other: Union[Tensor, Number], *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ...
  1593. @overload
  1594. def sub(self: Tensor, alpha: Number, other: Tensor) -> Tensor: ...
  1595. @overload
  1596. def sub(self: Tensor, alpha: Number, other: Tensor, *, out: Tensor) -> Tensor: ...
  1597. @overload
  1598. def subtract(input: Tensor, other: Tensor, *, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
  1599. @overload
  1600. def subtract(input: Tensor, other: Number, alpha: Number=1) -> Tensor: ...
  1601. @overload
  1602. def sum(input: Tensor, *, dtype: Optional[_dtype]=None) -> Tensor: ...
  1603. @overload
  1604. def sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1605. @overload
  1606. def sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
  1607. def svd(input: Tensor, some: _bool=True, compute_uv: _bool=True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.svd: ...
  1608. def swapaxes(input: Tensor, axis0: _int, axis1: _int) -> Tensor: ...
  1609. def swapdims(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
  1610. def t(input: Tensor) -> Tensor: ...
  1611. def t_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1612. def take(input: Tensor, index: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1613. def take_along_dim(input: Tensor, indices: Tensor, dim: Optional[_int]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
  1614. def tan(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1615. def tan_(input: Tensor) -> Tensor: ...
  1616. def tanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1617. def tanh_(input: Tensor) -> Tensor: ...
  1618. def tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
  1619. @overload
  1620. def tensor_split(input: Tensor, tensor_indices_or_sections: Tensor, dim: _int=0) -> List[Tensor]: ...
  1621. @overload
  1622. def tensor_split(input: Tensor, sections: Union[_int, SymInt], dim: _int=0) -> List[Tensor]: ...
  1623. @overload
  1624. def tensor_split(input: Tensor, indices: Sequence[Union[_int, SymInt]], dim: _int=0) -> List[Tensor]: ...
  1625. def threshold(input: Tensor, threshold: Number, value: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1626. def threshold_(input: Tensor, threshold: Number, value: Number) -> Tensor: ...
  1627. def tile(input: Tensor, dims: _size) -> Tensor: ...
  1628. def topk(input: Tensor, k: _int, dim: _int=-1, largest: _bool=True, sorted: _bool=True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.topk: ...
  1629. def trace(input: Tensor) -> Tensor: ...
  1630. @overload
  1631. def transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
  1632. @overload
  1633. def transpose(input: Tensor, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor: ...
  1634. def transpose_copy(input: Tensor, dim0: _int, dim1: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
  1635. @overload
  1636. def trapezoid(y: Tensor, x: Tensor, *, dim: _int=-1) -> Tensor: ...
  1637. @overload
  1638. def trapezoid(y: Tensor, *, dx: Number=1, dim: _int=-1) -> Tensor: ...
  1639. @overload
  1640. def trapz(y: Tensor, *, dx: _float=1, dim: _int=-1) -> Tensor: ...
  1641. @overload
  1642. def trapz(y: Tensor, x: Tensor, *, dim: _int=-1) -> Tensor: ...
  1643. def triangular_solve(input: Tensor, A: Tensor, upper: _bool=True, transpose: _bool=False, unitriangular: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.triangular_solve: ...
  1644. def tril(input: Tensor, diagonal: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
  1645. def tril_indices(row: _int, col: _int, offset: _int=0, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1646. def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: float = ..., p: float = ..., eps: float = ..., swap: bool = ..., size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
  1647. def triu(input: Tensor, diagonal: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
  1648. def triu_indices(row: _int, col: _int, offset: _int=0, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1649. def true_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor]=None) -> Tensor: ...
  1650. def trunc(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1651. def trunc_(input: Tensor) -> Tensor: ...
  1652. @overload
  1653. def unbind(input: Tensor, dim: _int=0) -> List[Tensor]: ...
  1654. @overload
  1655. def unbind(input: Tensor, dim: Union[str, ellipsis, None]) -> List[Tensor]: ...
  1656. def unbind_copy(input: Tensor, dim: _int=0, *, out: Union[Tuple[Tensor, ...], List[Tensor]]=None) -> None: ...
  1657. @overload
  1658. def unflatten(input: Tensor, dim: _int, sizes: _size) -> Tensor: ...
  1659. @overload
  1660. def unflatten(input: Tensor, dim: Union[str, ellipsis, None], sizes: _size, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
  1661. def unfold_copy(input: Tensor, dimension: _int, size: _int, step: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
  1662. def unique_dim(input: Tensor, dim: _int, sorted: _bool=True, return_inverse: _bool=False, return_counts: _bool=False) -> Tuple[Tensor, Tensor, Tensor]: ...
  1663. def unsafe_chunk(input: Tensor, chunks: _int, dim: _int=0) -> List[Tensor]: ...
  1664. def unsafe_split(input: Tensor, split_size: Union[_int, SymInt], dim: _int=0) -> List[Tensor]: ...
  1665. def unsafe_split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int=0) -> List[Tensor]: ...
  1666. def unsqueeze(input: Tensor, dim: _int) -> Tensor: ...
  1667. def unsqueeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
  1668. def values_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1669. def vander(x: Tensor, N: Optional[_int]=None, increasing: _bool=False) -> Tensor: ...
  1670. @overload
  1671. def var(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  1672. @overload
  1673. def var(input: Tensor, dim: Optional[Union[_int, _size]]=None, *, correction: Optional[_int]=None, keepdim: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  1674. @overload
  1675. def var(input: Tensor, unbiased: _bool=True) -> Tensor: ...
  1676. @overload
  1677. def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int]=None, keepdim: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
  1678. @overload
  1679. def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
  1680. @overload
  1681. def var_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
  1682. @overload
  1683. def var_mean(input: Tensor, dim: Optional[Union[_int, _size]]=None, *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
  1684. @overload
  1685. def var_mean(input: Tensor, unbiased: _bool=True) -> Tuple[Tensor, Tensor]: ...
  1686. @overload
  1687. def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
  1688. @overload
  1689. def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
  1690. def vdot(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1691. def view_as_complex(input: Tensor) -> Tensor: ...
  1692. def view_as_complex_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1693. def view_as_real(input: Tensor) -> Tensor: ...
  1694. def view_as_real_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1695. @overload
  1696. def view_copy(input: Tensor, dtype: _dtype, *, out: Optional[Tensor]=None) -> Tensor: ...
  1697. @overload
  1698. def view_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None) -> Tensor: ...
  1699. @overload
  1700. def vsplit(input: Tensor, sections: _int) -> List[Tensor]: ...
  1701. @overload
  1702. def vsplit(input: Tensor, indices: _size) -> List[Tensor]: ...
  1703. def vstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
  1704. @overload
  1705. def where(condition: Tensor) -> List[Tensor]: ...
  1706. @overload
  1707. def where(condition: Tensor, input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1708. @overload
  1709. def where(condition: Tensor, self: Number, other: Tensor) -> Tensor: ...
  1710. @overload
  1711. def where(condition: Tensor, input: Tensor, other: Number) -> Tensor: ...
  1712. @overload
  1713. def where(condition: Tensor, self: Number, other: Number) -> Tensor: ...
  1714. @overload
  1715. def xlogy(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1716. @overload
  1717. def xlogy(self: Number, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
  1718. @overload
  1719. def xlogy(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
  1720. @overload
  1721. def xlogy_(input: Tensor, other: Tensor) -> Tensor: ...
  1722. @overload
  1723. def xlogy_(input: Tensor, other: Number) -> Tensor: ...
  1724. def zero_(input: Tensor) -> Tensor: ...
  1725. @overload
  1726. def zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1727. @overload
  1728. def zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1729. @overload
  1730. def zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1731. @overload
  1732. def zeros(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1733. def zeros_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
  1734. __all__ = ['__and__', '__lshift__', '__or__', '__rshift__', '__xor__', '_adaptive_avg_pool2d',
  1735. '_adaptive_avg_pool3d', '_add_batch_dim', '_add_relu', '_add_relu_', '_addmm_activation',
  1736. '_aminmax', '_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_assert_async',
  1737. '_assert_tensor_metadata', '_batch_norm_impl_index', '_cast_Byte', '_cast_Char', '_cast_Double',
  1738. '_cast_Float', '_cast_Half', '_cast_Int', '_cast_Long', '_cast_Short',
  1739. '_choose_qparams_per_tensor', '_chunk_grad_outputs_efficient_attention', '_coalesce',
  1740. '_compute_linear_combination', '_conj', '_conj_copy', '_conj_physical',
  1741. '_convert_indices_from_coo_to_csr', '_convert_indices_from_csr_to_coo', '_convolution',
  1742. '_convolution_mode', '_copy_from', '_copy_from_and_resize', '_ctc_loss', '_cudnn_ctc_loss',
  1743. '_cudnn_init_dropout_state', '_cudnn_rnn', '_cudnn_rnn_flatten_weight', '_cufft_clear_plan_cache',
  1744. '_cufft_get_plan_cache_max_size', '_cufft_get_plan_cache_size', '_cufft_set_plan_cache_max_size',
  1745. '_cummax_helper', '_cummin_helper', '_debug_has_internal_overlap', '_dim_arange',
  1746. '_dirichlet_grad', '_disable_functionalization', '_efficientzerotensor', '_embedding_bag',
  1747. '_embedding_bag_forward_only', '_empty_affine_quantized', '_empty_per_channel_affine_quantized',
  1748. '_enable_functionalization', '_euclidean_dist', '_fake_quantize_learnable_per_channel_affine',
  1749. '_fake_quantize_learnable_per_tensor_affine',
  1750. '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams',
  1751. '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', '_fft_c2c', '_fft_c2r', '_fft_r2c',
  1752. '_foobar', '_foreach_abs', '_foreach_abs_', '_foreach_acos', '_foreach_acos_', '_foreach_add',
  1753. '_foreach_add_', '_foreach_addcdiv', '_foreach_addcdiv_', '_foreach_addcmul', '_foreach_addcmul_',
  1754. '_foreach_asin', '_foreach_asin_', '_foreach_atan', '_foreach_atan_', '_foreach_ceil',
  1755. '_foreach_ceil_', '_foreach_clamp_max', '_foreach_clamp_max_', '_foreach_clamp_min',
  1756. '_foreach_clamp_min_', '_foreach_cos', '_foreach_cos_', '_foreach_cosh', '_foreach_cosh_',
  1757. '_foreach_div', '_foreach_div_', '_foreach_erf', '_foreach_erf_', '_foreach_erfc',
  1758. '_foreach_erfc_', '_foreach_exp', '_foreach_exp_', '_foreach_expm1', '_foreach_expm1_',
  1759. '_foreach_floor', '_foreach_floor_', '_foreach_frac', '_foreach_frac_', '_foreach_lerp',
  1760. '_foreach_lerp_', '_foreach_lgamma', '_foreach_lgamma_', '_foreach_log', '_foreach_log10',
  1761. '_foreach_log10_', '_foreach_log1p', '_foreach_log1p_', '_foreach_log2', '_foreach_log2_',
  1762. '_foreach_log_', '_foreach_maximum', '_foreach_maximum_', '_foreach_minimum', '_foreach_minimum_',
  1763. '_foreach_mul', '_foreach_mul_', '_foreach_neg', '_foreach_neg_', '_foreach_norm',
  1764. '_foreach_reciprocal', '_foreach_reciprocal_', '_foreach_round', '_foreach_round_',
  1765. '_foreach_sigmoid', '_foreach_sigmoid_', '_foreach_sin', '_foreach_sin_', '_foreach_sinh',
  1766. '_foreach_sinh_', '_foreach_sqrt', '_foreach_sqrt_', '_foreach_sub', '_foreach_sub_',
  1767. '_foreach_tan', '_foreach_tan_', '_foreach_tanh', '_foreach_tanh_', '_foreach_trunc',
  1768. '_foreach_trunc_', '_foreach_zero_', '_from_functional_tensor', '_fused_adam_', '_fused_adamw_',
  1769. '_fused_dropout', '_fused_moving_avg_obs_fq_helper', '_fused_moving_avg_obs_fq_helper',
  1770. '_fused_sdp_choice', '_fw_primal_copy', '_grid_sampler_2d_cpu_fallback',
  1771. '_has_compatible_shallow_copy_type', '_histogramdd_bin_edges', '_histogramdd_from_bin_cts',
  1772. '_histogramdd_from_bin_tensors', '_index_put_impl_', '_indices_copy', '_is_all_true',
  1773. '_is_any_true', '_is_functional_tensor', '_is_zerotensor', '_linalg_check_errors', '_linalg_det',
  1774. '_linalg_det', '_linalg_eigh', '_linalg_eigh', '_linalg_slogdet', '_linalg_slogdet',
  1775. '_linalg_solve_ex', '_linalg_solve_ex', '_linalg_svd', '_linalg_svd', '_log_softmax',
  1776. '_log_softmax_backward_data', '_logcumsumexp', '_lstm_mps', '_lu_with_info', '_lu_with_info',
  1777. '_make_dual', '_make_dual_copy', '_make_per_channel_quantized_tensor',
  1778. '_make_per_tensor_quantized_tensor', '_masked_scale', '_masked_softmax', '_mkldnn_reshape',
  1779. '_mkldnn_transpose', '_mkldnn_transpose_', '_mps_convolution', '_mps_convolution_transpose',
  1780. '_native_batch_norm_legit', '_native_decoder_only_multi_head_attention',
  1781. '_native_multi_head_attention', '_neg_view', '_neg_view_copy', '_nested_from_padded',
  1782. '_nested_from_padded_and_nested_example', '_nested_tensor_from_mask',
  1783. '_nested_tensor_from_mask_left_aligned', '_nested_tensor_from_tensor_list',
  1784. '_nested_tensor_softmax_with_shape', '_nnpack_available', '_nnpack_spatial_convolution',
  1785. '_pack_padded_sequence', '_pad_packed_sequence', '_pin_memory', '_prelu_kernel',
  1786. '_remove_batch_dim', '_reshape_alias_copy', '_reshape_from_tensor', '_resize_output_',
  1787. '_rowwise_prune', '_sample_dirichlet', '_saturate_weight_to_fp16',
  1788. '_scaled_dot_product_attention_math', '_scaled_dot_product_efficient_attention',
  1789. '_scaled_dot_product_flash_attention', '_scaled_dot_product_flash_attention', '_shape_as_tensor',
  1790. '_sobol_engine_draw', '_sobol_engine_ff_', '_sobol_engine_initialize_state_',
  1791. '_sobol_engine_scramble_', '_softmax', '_softmax_backward_data', '_sparse_broadcast_to',
  1792. '_sparse_broadcast_to_copy', '_sparse_csr_prod', '_sparse_csr_sum',
  1793. '_sparse_log_softmax_backward_data', '_sparse_softmax_backward_data', '_sparse_sparse_matmul',
  1794. '_sparse_sum', '_stack', '_standard_gamma', '_standard_gamma_grad', '_sync',
  1795. '_test_autograd_multiple_dispatch', '_test_autograd_multiple_dispatch_view',
  1796. '_test_autograd_multiple_dispatch_view_copy', '_test_check_tensor', '_test_serialization_subcmul',
  1797. '_to_cpu', '_to_functional_tensor', '_transform_bias_rescale_qkv',
  1798. '_transformer_decoder_only_layer_fwd', '_transformer_encoder_layer_fwd', '_trilinear',
  1799. '_triton_multi_head_attention', '_triton_scaled_dot_attention', '_unique', '_unique2',
  1800. '_unpack_dual', '_unpack_dual', '_use_cudnn_ctc_loss', '_use_cudnn_rnn_flatten_weight',
  1801. '_validate_compressed_sparse_indices', '_validate_sparse_bsc_tensor_args',
  1802. '_validate_sparse_bsr_tensor_args', '_validate_sparse_compressed_tensor_args',
  1803. '_validate_sparse_coo_tensor_args', '_validate_sparse_csc_tensor_args',
  1804. '_validate_sparse_csr_tensor_args', '_values_copy', '_weight_norm', '_weight_norm_interface',
  1805. 'abs', 'abs_', 'absolute', 'acos', 'acos_', 'acosh', 'acosh_', 'adaptive_avg_pool1d',
  1806. 'adaptive_max_pool1d', 'add', 'addbmm', 'addcdiv', 'addcmul', 'addmm', 'addmv', 'addmv_', 'addr',
  1807. 'adjoint', 'affine_grid_generator', 'alias_copy', 'all', 'allclose', 'alpha_dropout',
  1808. 'alpha_dropout_', 'amax', 'amin', 'aminmax', 'aminmax', 'angle', 'any', 'arange', 'arccos',
  1809. 'arccos_', 'arccosh', 'arccosh_', 'arcsin', 'arcsin_', 'arcsinh', 'arcsinh_', 'arctan', 'arctan2',
  1810. 'arctan_', 'arctanh', 'arctanh_', 'argmax', 'argmin', 'argsort', 'argwhere', 'as_strided',
  1811. 'as_strided_', 'as_strided_copy', 'as_strided_scatter', 'as_tensor', 'asarray', 'asin', 'asin_',
  1812. 'asinh', 'asinh_', 'atan', 'atan2', 'atan_', 'atanh', 'atanh_', 'avg_pool1d', 'baddbmm',
  1813. 'bartlett_window', 'batch_norm', 'batch_norm_backward_elemt', 'batch_norm_backward_reduce',
  1814. 'batch_norm_elemt', 'batch_norm_gather_stats', 'batch_norm_gather_stats_with_counts',
  1815. 'batch_norm_stats', 'batch_norm_update_stats', 'bernoulli', 'bilinear',
  1816. 'binary_cross_entropy_with_logits', 'bincount', 'binomial', 'bitwise_and', 'bitwise_left_shift',
  1817. 'bitwise_not', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'blackman_window', 'bmm',
  1818. 'broadcast_to', 'bucketize', 'can_cast', 'cat', 'ccol_indices_copy', 'ceil', 'ceil_', 'celu',
  1819. 'celu_', 'channel_shuffle', 'cholesky', 'cholesky_inverse', 'cholesky_solve',
  1820. 'choose_qparams_optimized', 'chunk', 'clamp', 'clamp_', 'clamp_max', 'clamp_max_', 'clamp_min',
  1821. 'clamp_min_', 'clip', 'clip_', 'clone', 'col_indices_copy', 'column_stack', 'combinations',
  1822. 'complex', 'concat', 'concatenate', 'conj', 'conj_physical', 'conj_physical_', 'constant_pad_nd',
  1823. 'conv1d', 'conv2d', 'conv3d', 'conv_tbc', 'conv_transpose1d', 'conv_transpose2d',
  1824. 'conv_transpose3d', 'convolution', 'copysign', 'corrcoef', 'cos', 'cos_', 'cosh', 'cosh_',
  1825. 'cosine_embedding_loss', 'cosine_similarity', 'count_nonzero', 'cov', 'cross', 'crow_indices_copy',
  1826. 'ctc_loss', 'cudnn_affine_grid_generator', 'cudnn_batch_norm', 'cudnn_convolution',
  1827. 'cudnn_convolution_add_relu', 'cudnn_convolution_relu', 'cudnn_convolution_transpose',
  1828. 'cudnn_grid_sampler', 'cudnn_is_acceptable', 'cummax', 'cummax', 'cummin', 'cummin', 'cumprod',
  1829. 'cumsum', 'cumulative_trapezoid', 'deg2rad', 'deg2rad_', 'dequantize', 'det', 'detach', 'detach_',
  1830. 'detach_copy', 'diag', 'diag_embed', 'diagflat', 'diagonal', 'diagonal_copy', 'diagonal_scatter',
  1831. 'diff', 'digamma', 'dist', 'div', 'divide', 'dot', 'dropout', 'dropout_', 'dsmm', 'dsplit',
  1832. 'dstack', 'embedding', 'embedding_bag', 'embedding_renorm_', 'empty', 'empty_like',
  1833. 'empty_quantized', 'empty_strided', 'eq', 'equal', 'erf', 'erf_', 'erfc', 'erfc_', 'erfinv', 'exp',
  1834. 'exp2', 'exp2_', 'exp_', 'expand_copy', 'expm1', 'expm1_', 'eye',
  1835. 'fake_quantize_per_channel_affine', 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight',
  1836. 'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight',
  1837. 'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight',
  1838. 'fbgemm_pack_gemm_matrix_fp16', 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout',
  1839. 'feature_alpha_dropout_', 'feature_dropout', 'feature_dropout_', 'fill', 'fill_', 'fix', 'fix_',
  1840. 'flatten', 'flip', 'fliplr', 'flipud', 'float_power', 'floor', 'floor_', 'floor_divide', 'fmax',
  1841. 'fmin', 'fmod', 'frac', 'frac_', 'frexp', 'frexp', 'frobenius_norm', 'from_file', 'from_numpy',
  1842. 'frombuffer', 'full', 'full_like', 'fused_moving_avg_obs_fake_quant', 'gather', 'gcd', 'gcd_',
  1843. 'ge', 'geqrf', 'geqrf', 'ger', 'get_default_dtype', 'get_num_interop_threads', 'get_num_threads',
  1844. 'gradient', 'greater', 'greater_equal', 'grid_sampler', 'grid_sampler_2d', 'grid_sampler_3d',
  1845. 'group_norm', 'gru', 'gru_cell', 'gt', 'hamming_window', 'hann_window', 'hardshrink', 'heaviside',
  1846. 'hinge_embedding_loss', 'histc', 'histogram', 'histogram', 'histogramdd', 'histogramdd', 'hsmm',
  1847. 'hsplit', 'hspmm', 'hstack', 'hypot', 'i0', 'i0_', 'igamma', 'igammac', 'imag', 'index_add',
  1848. 'index_copy', 'index_fill', 'index_put', 'index_put_', 'index_reduce', 'index_select',
  1849. 'indices_copy', 'init_num_threads', 'inner', 'instance_norm', 'int_repr', 'inverse', 'is_complex',
  1850. 'is_conj', 'is_distributed', 'is_floating_point', 'is_grad_enabled', 'is_inference',
  1851. 'is_inference_mode_enabled', 'is_neg', 'is_nonzero', 'is_same_size', 'is_signed',
  1852. 'is_vulkan_available', 'isclose', 'isfinite', 'isin', 'isinf', 'isnan', 'isneginf', 'isposinf',
  1853. 'isreal', 'istft', 'kaiser_window', 'kl_div', 'kron', 'kthvalue', 'kthvalue', 'layer_norm', 'lcm',
  1854. 'lcm_', 'ldexp', 'ldexp_', 'le', 'lerp', 'less', 'less_equal', 'lgamma', 'linspace', 'log',
  1855. 'log10', 'log10_', 'log1p', 'log1p_', 'log2', 'log2_', 'log_', 'log_softmax', 'logaddexp',
  1856. 'logaddexp2', 'logcumsumexp', 'logdet', 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
  1857. 'logit', 'logit_', 'logspace', 'logsumexp', 'lstm', 'lstm_cell', 'lt', 'lu_solve', 'lu_unpack',
  1858. 'lu_unpack', 'margin_ranking_loss', 'masked_fill', 'masked_scatter', 'masked_select', 'matmul',
  1859. 'matrix_exp', 'matrix_power', 'max', 'max', 'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d',
  1860. 'max_pool3d', 'maximum', 'mean', 'median', 'median', 'min', 'min', 'minimum', 'miopen_batch_norm',
  1861. 'miopen_convolution', 'miopen_convolution_add_relu', 'miopen_convolution_relu',
  1862. 'miopen_convolution_transpose', 'miopen_depthwise_convolution', 'miopen_rnn',
  1863. 'mkldnn_adaptive_avg_pool2d', 'mkldnn_convolution', 'mkldnn_linear_backward_weights',
  1864. 'mkldnn_max_pool2d', 'mkldnn_max_pool3d', 'mkldnn_rnn_layer', 'mm', 'mode', 'mode', 'moveaxis',
  1865. 'movedim', 'msort', 'mul', 'multinomial', 'multiply', 'mv', 'mvlgamma', 'nan_to_num',
  1866. 'nan_to_num_', 'nanmean', 'nanmedian', 'nanmedian', 'nanquantile', 'nansum', 'narrow',
  1867. 'narrow_copy', 'native_batch_norm', 'native_channel_shuffle', 'native_dropout',
  1868. 'native_group_norm', 'native_layer_norm', 'native_norm', 'ne', 'neg', 'neg_', 'negative',
  1869. 'negative_', 'nextafter', 'nonzero', 'norm_except_dim', 'normal', 'not_equal', 'nuclear_norm',
  1870. 'numel', 'ones', 'ones_like', 'orgqr', 'ormqr', 'outer', 'pairwise_distance', 'pdist', 'permute',
  1871. 'permute_copy', 'pinverse', 'pixel_shuffle', 'pixel_unshuffle', 'poisson', 'poisson_nll_loss',
  1872. 'polar', 'polygamma', 'positive', 'pow', 'prelu', 'prod', 'promote_types', 'put',
  1873. 'q_per_channel_axis', 'q_per_channel_scales', 'q_per_channel_zero_points', 'q_scale',
  1874. 'q_zero_point', 'qr', 'qr', 'quantile', 'quantize_per_channel', 'quantize_per_tensor',
  1875. 'quantize_per_tensor_dynamic', 'quantized_batch_norm', 'quantized_gru_cell', 'quantized_lstm_cell',
  1876. 'quantized_max_pool1d', 'quantized_max_pool2d', 'quantized_rnn_relu_cell',
  1877. 'quantized_rnn_tanh_cell', 'rad2deg', 'rad2deg_', 'rand', 'rand_like', 'randint', 'randint_like',
  1878. 'randn', 'randn_like', 'randperm', 'range', 'ravel', 'real', 'reciprocal', 'reciprocal_', 'relu',
  1879. 'relu_', 'remainder', 'renorm', 'repeat_interleave', 'reshape', 'resize_as_', 'resize_as_sparse_',
  1880. 'resolve_conj', 'resolve_neg', 'result_type', 'rnn_relu', 'rnn_relu_cell', 'rnn_tanh',
  1881. 'rnn_tanh_cell', 'roll', 'rot90', 'round', 'round_', 'row_indices_copy', 'row_stack', 'rrelu',
  1882. 'rrelu_', 'rsqrt', 'rsqrt_', 'rsub', 'saddmm', 'scalar_tensor', 'scatter', 'scatter_add',
  1883. 'scatter_reduce', 'searchsorted', 'segment_reduce', 'select', 'select_copy', 'select_scatter',
  1884. 'selu', 'selu_', 'set_flush_denormal', 'set_num_interop_threads', 'set_num_threads', 'sgn',
  1885. 'sigmoid', 'sigmoid_', 'sign', 'signbit', 'sin', 'sin_', 'sinc', 'sinc_', 'sinh', 'sinh_',
  1886. 'slice_copy', 'slice_scatter', 'slogdet', 'slogdet', 'smm', 'softmax', 'sort', 'sort',
  1887. 'sparse_bsc_tensor', 'sparse_bsr_tensor', 'sparse_compressed_tensor', 'sparse_coo_tensor',
  1888. 'sparse_csc_tensor', 'sparse_csr_tensor', 'split_copy', 'split_with_sizes',
  1889. 'split_with_sizes_copy', 'spmm', 'sqrt', 'sqrt_', 'square', 'square_', 'squeeze', 'squeeze_copy',
  1890. 'sspaddmm', 'stack', 'std', 'std_mean', 'sub', 'subtract', 'sum', 'svd', 'svd', 'swapaxes',
  1891. 'swapdims', 't', 't_copy', 'take', 'take_along_dim', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor',
  1892. 'tensor_split', 'threshold', 'threshold_', 'tile', 'topk', 'topk', 'trace', 'transpose',
  1893. 'transpose_copy', 'trapezoid', 'trapz', 'triangular_solve', 'triangular_solve', 'tril',
  1894. 'tril_indices', 'triplet_margin_loss', 'triu', 'triu_indices', 'true_divide', 'trunc', 'trunc_',
  1895. 'unbind', 'unbind_copy', 'unflatten', 'unfold_copy', 'unique_dim', 'unsafe_chunk', 'unsafe_split',
  1896. 'unsafe_split_with_sizes', 'unsqueeze', 'unsqueeze_copy', 'values_copy', 'vander', 'var',
  1897. 'var_mean', 'vdot', 'view_as_complex', 'view_as_complex_copy', 'view_as_real', 'view_as_real_copy',
  1898. 'view_copy', 'vsplit', 'vstack', 'where', 'xlogy', 'xlogy_', 'zero_', 'zeros', 'zeros_like']