1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903 |
- # @generated from torch/_C/_VariableFunctions.pyi.in
- from torch import Tensor, Generator, strided, memory_format, contiguous_format, strided, inf
- from typing import List, Tuple, Optional, Union, Any, ContextManager, Callable, overload, Iterator, NamedTuple, Sequence, Literal, TypeVar
- from torch.types import _int, _float, _bool, Number, _dtype, _device, _qscheme, _size, _layout, SymInt, Device
- import torch
- import builtins
- @overload
- def __and__(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def __and__(input: Tensor, other: Number) -> Tensor: ...
- @overload
- def __lshift__(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def __lshift__(input: Tensor, other: Number) -> Tensor: ...
- @overload
- def __or__(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def __or__(input: Tensor, other: Number) -> Tensor: ...
- @overload
- def __rshift__(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def __rshift__(input: Tensor, other: Number) -> Tensor: ...
- @overload
- def __xor__(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def __xor__(input: Tensor, other: Number) -> Tensor: ...
- def _adaptive_avg_pool2d(input: Tensor, output_size: Sequence[Union[_int, SymInt]]) -> Tensor: ...
- def _adaptive_avg_pool3d(input: Tensor, output_size: Sequence[Union[_int, SymInt]]) -> Tensor: ...
- def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ...
- @overload
- def _add_relu(input: Tensor, other: Tensor, *, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def _add_relu(input: Tensor, other: Number, alpha: Number=1) -> Tensor: ...
- @overload
- def _add_relu_(input: Tensor, other: Tensor, *, alpha: Number=1) -> Tensor: ...
- @overload
- def _add_relu_(input: Tensor, other: Number, alpha: Number=1) -> Tensor: ...
- def _addmm_activation(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, use_gelu: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ...
- @overload
- def _aminmax(input: Tensor, dim: _int, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
- def _amp_foreach_non_finite_check_and_unscale_(self: Union[Tuple[Tensor, ...], List[Tensor]], found_inf: Tensor, inv_scale: Tensor) -> None: ...
- def _amp_update_scale_(input: Tensor, growth_tracker: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ...
- def _assert_async(input: Tensor) -> None: ...
- def _assert_tensor_metadata(a: Tensor, size: Optional[_size]=None, stride: Optional[_size]=None, dtype: Optional[_dtype]=None) -> None: ...
- def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ...
- def _cast_Byte(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
- def _cast_Char(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
- def _cast_Double(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
- def _cast_Float(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
- def _cast_Half(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
- def _cast_Int(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
- def _cast_Long(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
- def _cast_Short(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
- def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool=False) -> Tuple[_float, _int]: ...
- def _chunk_grad_outputs_efficient_attention(query: Tensor, key: Tensor, value: Tensor, is_causal: _bool=False) -> _bool: ...
- def _coalesce(input: Tensor) -> Tensor: ...
- def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _conj(input: Tensor) -> Tensor: ...
- def _conj_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _conj_physical(input: Tensor) -> Tensor: ...
- def _convert_indices_from_coo_to_csr(input: Tensor, size: _int, *, out_int32: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- def _convert_indices_from_csr_to_coo(crow_indices: Tensor, col_indices: Tensor, *, out_int32: _bool=False, transpose: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, transposed: _bool, output_padding: _size, groups: _int, benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ...
- @overload
- def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: Sequence[Union[_int, SymInt]], dilation: _size, transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: _int, benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ...
- def _convolution_mode(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: str, dilation: _size, groups: _int) -> Tensor: ...
- def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool=False) -> Tensor: ...
- def _copy_from_and_resize(input: Tensor, dst: Tensor) -> Tensor: ...
- @overload
- def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int=0, zero_infinity: _bool=False) -> Tuple[Tensor, Tensor]: ...
- @overload
- def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int=0, zero_infinity: _bool=False) -> Tuple[Tensor, Tensor]: ...
- @overload
- def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
- @overload
- def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
- def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: Sequence[Union[_int, SymInt]], dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
- def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: Union[_int, SymInt], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ...
- def _cufft_clear_plan_cache(device_index: _int) -> None: ...
- def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ...
- def _cufft_get_plan_cache_size(device_index: _int) -> _int: ...
- def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ...
- def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
- def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
- def _debug_has_internal_overlap(input: Tensor) -> _int: ...
- def _dim_arange(like: Tensor, dim: _int) -> Tensor: ...
- def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ...
- def _disable_functionalization(): ...
- @overload
- def _efficientzerotensor(size: _size, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def _efficientzerotensor(*size: _int, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False, padding_idx: _int=-1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False, padding_idx: _int=-1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- @overload
- def _empty_affine_quantized(size: _size, *, scale: _float=1, zero_point: _int=0, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def _empty_affine_quantized(*size: _int, scale: _float=1, zero_point: _int=0, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def _empty_per_channel_affine_quantized(size: _size, *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def _enable_functionalization(*, reapply_views: _bool = False): ...
- def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ...
- def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int, grad_factor: _float=1.0) -> Tensor: ...
- def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int, grad_factor: _float=1.0) -> Tensor: ...
- def _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(input: Tensor, scale: Tensor, zero_point: Tensor, fake_quant_enabled: Tensor, quant_min: _int, quant_max: _int) -> torch.return_types._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: ...
- def _fft_c2c(input: Tensor, dim: Sequence[Union[_int, SymInt]], normalization: _int, forward: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _fft_c2r(input: Tensor, dim: _size, normalization: _int, last_dim_size: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _fft_r2c(input: Tensor, dim: _size, normalization: _int, onesided: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _foobar(input: Tensor, arg1: _bool=True, arg2: _bool=True, *, arg3: _bool=True) -> Tensor: ...
- def _foreach_abs(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_abs_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_acos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_acos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
- @overload
- def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
- @overload
- def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> List[Tensor]: ...
- @overload
- def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
- @overload
- def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
- @overload
- def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> None: ...
- @overload
- def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
- @overload
- def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> List[Tensor]: ...
- @overload
- def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> List[Tensor]: ...
- @overload
- def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
- @overload
- def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
- @overload
- def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> None: ...
- @overload
- def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
- @overload
- def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> List[Tensor]: ...
- @overload
- def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> List[Tensor]: ...
- @overload
- def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
- @overload
- def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
- @overload
- def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> None: ...
- def _foreach_asin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_asin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_atan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_atan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_ceil(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_ceil_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
- @overload
- def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
- @overload
- def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- @overload
- def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
- @overload
- def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
- @overload
- def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
- @overload
- def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
- @overload
- def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- @overload
- def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
- @overload
- def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
- @overload
- def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_cos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_cos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_cosh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_cosh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
- @overload
- def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
- @overload
- def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- @overload
- def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
- @overload
- def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
- @overload
- def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_erf(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_erf_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_erfc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_erfc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_exp(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_expm1(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_expm1_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_floor(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_floor_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_frac(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_frac_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Number) -> List[Tensor]: ...
- @overload
- def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- @overload
- def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Number) -> None: ...
- @overload
- def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_lgamma(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_lgamma_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_log(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_log10(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_log10_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_log1p(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_log1p_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_log2(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_log2_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_log_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
- @overload
- def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
- @overload
- def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- @overload
- def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
- @overload
- def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
- @overload
- def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
- @overload
- def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
- @overload
- def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- @overload
- def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
- @overload
- def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
- @overload
- def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
- @overload
- def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
- @overload
- def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- @overload
- def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
- @overload
- def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
- @overload
- def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_neg(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_neg_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_norm(self: Union[Tuple[Tensor, ...], List[Tensor]], ord: Number=2) -> List[Tensor]: ...
- def _foreach_reciprocal(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_reciprocal_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_round(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_round_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_sigmoid(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_sigmoid_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_sin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_sin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_sinh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_sinh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_sqrt(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
- @overload
- def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
- @overload
- def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> List[Tensor]: ...
- @overload
- def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
- @overload
- def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
- @overload
- def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> None: ...
- def _foreach_tan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_tan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_tanh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_tanh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_trunc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _foreach_trunc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_zero_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _from_functional_tensor(t: Tensor) -> Tensor: ...
- def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor]=None, found_inf: Optional[Tensor]=None) -> None: ...
- def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor]=None, found_inf: Optional[Tensor]=None) -> None: ...
- def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator]=None) -> Tuple[Tensor, Tensor]: ...
- def _fused_moving_avg_obs_fq_helper(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool=False, symmetric_quant: _bool=False) -> torch.return_types._fused_moving_avg_obs_fq_helper: ...
- def _fused_sdp_choice(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor]=None, dropout_p: _float=0.0, is_causal: _bool=False) -> _int: ...
- def _fw_primal_copy(input: Tensor, level: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
- def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ...
- def _histogramdd_bin_edges(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> List[Tensor]: ...
- def _histogramdd_from_bin_cts(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> Tensor: ...
- def _histogramdd_from_bin_tensors(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], *, weight: Optional[Tensor]=None, density: _bool=False) -> Tensor: ...
- def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False, unsafe: _bool=False) -> Tensor: ...
- def _indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _is_all_true(input: Tensor) -> Tensor: ...
- def _is_any_true(input: Tensor) -> Tensor: ...
- def _is_functional_tensor(t: Tensor) -> _bool: ...
- def _is_zerotensor(input: Tensor) -> _bool: ...
- def _linalg_check_errors(info: Tensor, api_name: str, *, is_matrix: _bool) -> None: ...
- def _linalg_det(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_det: ...
- def _linalg_eigh(A: Tensor, UPLO: str="L", compute_v: _bool=True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_eigh: ...
- def _linalg_slogdet(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_slogdet: ...
- def _linalg_solve_ex(A: Tensor, B: Tensor, *, left: _bool=True, check_errors: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_solve_ex: ...
- def _linalg_svd(A: Tensor, full_matrices: _bool=False, compute_uv: _bool=True, *, driver: Optional[str]=None, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_svd: ...
- def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _lstm_mps(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: ...
- def _lu_with_info(input: Tensor, pivot: _bool=True, check_errors: _bool=True) -> torch.return_types._lu_with_info: ...
- def _make_dual(primal: Tensor, tangent: Tensor, level: _int) -> Tensor: ...
- def _make_dual_copy(primal: Tensor, tangent: Tensor, level: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ...
- def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ...
- def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ...
- def _masked_softmax(input: Tensor, mask: Tensor, dim: Optional[_int]=None, mask_type: Optional[_int]=None) -> Tensor: ...
- def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ...
- def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
- def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
- def _mps_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: _size, stride: _size, dilation: _size, groups: _int) -> Tensor: ...
- def _mps_convolution_transpose(input: Tensor, weight: Tensor, padding: _size, output_padding: _size, stride: _size, dilation: _size, groups: _int) -> Tensor: ...
- @overload
- def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> Tuple[Tensor, Tensor, Tensor]: ...
- @overload
- def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> Tuple[Tensor, Tensor, Tensor]: ...
- def _native_decoder_only_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor]=None, incr_key: Optional[Tensor]=None, incr_value: Optional[Tensor]=None, need_weights: _bool=True, average_attn_weights: _bool=True) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- def _native_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor]=None, need_weights: _bool=True, average_attn_weights: _bool=True, mask_type: Optional[_int]=None) -> Tuple[Tensor, Tensor]: ...
- def _neg_view(input: Tensor) -> Tensor: ...
- def _neg_view_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _nested_from_padded(padded: Tensor, cpu_nested_shape_example: Tensor, fuse_transform_0213: _bool=False) -> Tensor: ...
- def _nested_from_padded_and_nested_example(padded: Tensor, nt_example: Tensor) -> Tensor: ...
- def _nested_tensor_from_mask(t: Tensor, mask: Tensor, mask_check: _bool=True) -> Tensor: ...
- def _nested_tensor_from_mask_left_aligned(t: Tensor, mask: Tensor) -> _bool: ...
- def _nested_tensor_from_tensor_list(list: Union[Tuple[Tensor, ...], List[Tensor]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=None) -> Tensor: ...
- def _nested_tensor_softmax_with_shape(input: Tensor, query: Tensor) -> Tensor: ...
- def _nnpack_available() -> _bool: ...
- def _nnpack_spatial_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Union[_int, _size]=1) -> Tensor: ...
- def _pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
- def _pad_packed_sequence(data: Tensor, batch_sizes: Tensor, batch_first: _bool, padding_value: Number, total_length: _int) -> Tuple[Tensor, Tensor]: ...
- def _pin_memory(input: Tensor, device: Optional[Union[_device, str, None]]=None) -> Tensor: ...
- def _prelu_kernel(input: Tensor, weight: Tensor) -> Tensor: ...
- def _remove_batch_dim(input: Tensor, level: _int, batch_size: _int, out_dim: _int) -> Tensor: ...
- def _reshape_alias_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None) -> Tensor: ...
- def _reshape_from_tensor(input: Tensor, shape: Tensor) -> Tensor: ...
- def _resize_output_(input: Tensor, size: _size, device: Union[_device, str, None]) -> Tensor: ...
- def _rowwise_prune(weight: Tensor, mask: Tensor, compressed_indices_dtype: _dtype) -> Tuple[Tensor, Tensor]: ...
- def _sample_dirichlet(input: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
- def _saturate_weight_to_fp16(weight: Tensor) -> Tensor: ...
- def _scaled_dot_product_attention_math(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor]=None, dropout_p: _float=0.0, is_causal: _bool=False, dropout_mask: Optional[Tensor]=None) -> Tuple[Tensor, Tensor]: ...
- def _scaled_dot_product_efficient_attention(query: Tensor, key: Tensor, value: Tensor, compute_log_sumexp: _bool, is_causal: _bool=False) -> Tuple[Tensor, Tensor]: ...
- def _scaled_dot_product_flash_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float=0.0, is_causal: _bool=False, return_debug_mask: _bool=False) -> torch.return_types._scaled_dot_product_flash_attention: ...
- def _shape_as_tensor(input: Tensor) -> Tensor: ...
- def _sobol_engine_draw(quasi: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int, dtype: Optional[_dtype]) -> Tuple[Tensor, Tensor]: ...
- def _sobol_engine_ff_(input: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int) -> Tensor: ...
- def _sobol_engine_initialize_state_(input: Tensor, dimension: _int) -> Tensor: ...
- def _sobol_engine_scramble_(input: Tensor, ltm: Tensor, dimension: _int) -> Tensor: ...
- def _softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, grad_input: Tensor=None) -> Tensor: ...
- def _sparse_broadcast_to(input: Tensor, size: _size) -> Tensor: ...
- def _sparse_broadcast_to_copy(input: Tensor, size: _size, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _sparse_csr_prod(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- def _sparse_csr_sum(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- def _sparse_log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
- def _sparse_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
- def _sparse_sparse_matmul(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def _sparse_sum(input: Tensor) -> Tensor: ...
- @overload
- def _sparse_sum(input: Tensor, *, dtype: _dtype) -> Tensor: ...
- @overload
- def _sparse_sum(input: Tensor, dim: Union[_int, _size]) -> Tensor: ...
- @overload
- def _sparse_sum(input: Tensor, dim: Union[_int, _size], *, dtype: _dtype) -> Tensor: ...
- def _stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _standard_gamma(input: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
- def _standard_gamma_grad(input: Tensor, output: Tensor) -> Tensor: ...
- def _sync(t: Tensor) -> None: ...
- @overload
- def _test_autograd_multiple_dispatch(input: Tensor) -> Tensor: ...
- @overload
- def _test_autograd_multiple_dispatch(input: Tensor, b: _bool) -> Tensor: ...
- def _test_autograd_multiple_dispatch_view(input: Tensor) -> Tensor: ...
- def _test_autograd_multiple_dispatch_view_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _test_check_tensor(input: Tensor) -> Tensor: ...
- def _test_serialization_subcmul(input: Tensor, other: Tensor, alpha: Number=1) -> Tensor: ...
- def _to_cpu(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def _to_functional_tensor(t: Tensor) -> Tensor: ...
- def _transform_bias_rescale_qkv(qkv: Tensor, qkv_bias: Tensor, num_heads: _int) -> Tuple[Tensor, Tensor, Tensor]: ...
- def _transformer_decoder_only_layer_fwd(src: Tensor, embed_dim: _int, num_heads: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, use_gelu: _bool, norm_first: _bool, eps: _float, norm_weight_1: Tensor, norm_bias_1: Tensor, norm_weight_2: Tensor, norm_bias_2: Tensor, ffn_weight_1: Tensor, ffn_bias_1: Tensor, ffn_weight_2: Tensor, ffn_bias_2: Tensor, mask: Optional[Tensor]=None, incr_key: Optional[Tensor]=None, incr_value: Optional[Tensor]=None) -> Tuple[Tensor, Tensor, Tensor]: ...
- def _transformer_encoder_layer_fwd(src: Tensor, embed_dim: _int, num_heads: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, use_gelu: _bool, norm_first: _bool, eps: _float, norm_weight_1: Tensor, norm_bias_1: Tensor, norm_weight_2: Tensor, norm_bias_2: Tensor, ffn_weight_1: Tensor, ffn_bias_1: Tensor, ffn_weight_2: Tensor, ffn_bias_2: Tensor, mask: Optional[Tensor]=None, mask_type: Optional[_int]=None) -> Tensor: ...
- def _trilinear(i1: Tensor, i2: Tensor, i3: Tensor, expand1: _size, expand2: _size, expand3: _size, sumdim: _size, unroll_dim: _int=1) -> Tensor: ...
- def _triton_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor]=None) -> Tensor: ...
- def _triton_scaled_dot_attention(q: Tensor, k: Tensor, v: Tensor, dropout_p: _float=0.0) -> Tensor: ...
- def _unique(input: Tensor, sorted: _bool=True, return_inverse: _bool=False) -> Tuple[Tensor, Tensor]: ...
- def _unique2(input: Tensor, sorted: _bool=True, return_inverse: _bool=False, return_counts: _bool=False) -> Tuple[Tensor, Tensor, Tensor]: ...
- def _unpack_dual(dual: Tensor, level: _int) -> torch.return_types._unpack_dual: ...
- @overload
- def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int) -> _bool: ...
- @overload
- def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int) -> _bool: ...
- def _use_cudnn_rnn_flatten_weight() -> _bool: ...
- def _validate_compressed_sparse_indices(is_crow: _bool, compressed_idx: Tensor, plain_idx: Tensor, cdim: _int, dim: _int, nnz: _int) -> None: ...
- def _validate_sparse_bsc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
- def _validate_sparse_bsr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
- def _validate_sparse_compressed_tensor_args(compressed_indices: Tensor, plain_indices: Tensor, values: Tensor, size: _size, layout: _layout) -> None: ...
- def _validate_sparse_coo_tensor_args(indices: Tensor, values: Tensor, size: _size) -> None: ...
- def _validate_sparse_csc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
- def _validate_sparse_csr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
- def _values_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def _weight_norm(v: Tensor, g: Tensor, dim: _int=0) -> Tensor: ...
- def _weight_norm_interface(v: Tensor, g: Tensor, dim: _int=0) -> Tuple[Tensor, Tensor]: ...
- def abs(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def abs_(input: Tensor) -> Tensor: ...
- def absolute(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def acos(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def acos_(input: Tensor) -> Tensor: ...
- def acosh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def acosh_(input: Tensor) -> Tensor: ...
- def adaptive_avg_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ...
- def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
- @overload
- def add(input: Union[Tensor, Number], other: Union[Tensor, Number], *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def add(self: Tensor, alpha: Number, other: Tensor) -> Tensor: ...
- @overload
- def add(self: Tensor, alpha: Number, other: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def addbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor) -> Tensor: ...
- @overload
- def addbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def addbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: ...
- @overload
- def addbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def addbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def addcdiv(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor) -> Tensor: ...
- @overload
- def addcdiv(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def addcdiv(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def addcmul(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor) -> Tensor: ...
- @overload
- def addcmul(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def addcmul(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def addmm(beta: Number, self: Tensor, alpha: Number, mat1: Tensor, mat2: Tensor) -> Tensor: ...
- @overload
- def addmm(beta: Number, self: Tensor, alpha: Number, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def addmm(beta: Number, self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: ...
- @overload
- def addmm(beta: Number, self: Tensor, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def addmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def addmv(beta: Number, self: Tensor, alpha: Number, mat: Tensor, vec: Tensor) -> Tensor: ...
- @overload
- def addmv(beta: Number, self: Tensor, alpha: Number, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def addmv(beta: Number, self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ...
- @overload
- def addmv(beta: Number, self: Tensor, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def addmv(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def addmv_(beta: Number, self: Tensor, alpha: Number, mat: Tensor, vec: Tensor) -> Tensor: ...
- @overload
- def addmv_(beta: Number, self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ...
- @overload
- def addmv_(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- @overload
- def addr(beta: Number, self: Tensor, alpha: Number, vec1: Tensor, vec2: Tensor) -> Tensor: ...
- @overload
- def addr(beta: Number, self: Tensor, alpha: Number, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def addr(beta: Number, self: Tensor, vec1: Tensor, vec2: Tensor) -> Tensor: ...
- @overload
- def addr(beta: Number, self: Tensor, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def addr(input: Tensor, vec1: Tensor, vec2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- def adjoint(input: Tensor) -> Tensor: ...
- def affine_grid_generator(theta: Tensor, size: _size, align_corners: _bool) -> Tensor: ...
- def alias_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def all(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def all(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def all(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- def allclose(input: Tensor, other: Tensor, rtol: _float=1e-05, atol: _float=1e-08, equal_nan: _bool=False) -> _bool: ...
- def alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def amax(input: Tensor, dim: Union[_int, _size]=(), keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- def amin(input: Tensor, dim: Union[_int, _size]=(), keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- def aminmax(input: Tensor, *, dim: Optional[_int]=None, keepdim: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.aminmax: ...
- def angle(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def any(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def any(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def any(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def arange(start: Number, end: Number, step: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def arange(start: Number, end: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def arange(end: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def arange(end: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def arange(start: Number, end: Number, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def arange(start: Number, end: Number, step: Number=1, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def arccos(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def arccos_(input: Tensor) -> Tensor: ...
- def arccosh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def arccosh_(input: Tensor) -> Tensor: ...
- def arcsin(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def arcsin_(input: Tensor) -> Tensor: ...
- def arcsinh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def arcsinh_(input: Tensor) -> Tensor: ...
- def arctan(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def arctan2(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def arctan_(input: Tensor) -> Tensor: ...
- def arctanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def arctanh_(input: Tensor) -> Tensor: ...
- def argmax(input: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- def argmin(input: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def argsort(input: Tensor, *, stable: _bool, dim: _int=-1, descending: _bool=False) -> Tensor: ...
- @overload
- def argsort(input: Tensor, dim: _int=-1, descending: _bool=False) -> Tensor: ...
- @overload
- def argsort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool=False) -> Tensor: ...
- def argwhere(input: Tensor) -> Tensor: ...
- def as_strided(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ...
- def as_strided_(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ...
- def as_strided_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- def as_strided_scatter(input: Tensor, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ...
- def as_tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None) -> Tensor: ...
- def asarray(obj: Any, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, copy: Optional[_bool]=None, requires_grad: _bool=False) -> Tensor: ...
- def asin(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def asin_(input: Tensor) -> Tensor: ...
- def asinh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def asinh_(input: Tensor) -> Tensor: ...
- def atan(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def atan2(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def atan_(input: Tensor) -> Tensor: ...
- def atanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def atanh_(input: Tensor) -> Tensor: ...
- def avg_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, ceil_mode: _bool=False, count_include_pad: _bool=True) -> Tensor: ...
- @overload
- def baddbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor) -> Tensor: ...
- @overload
- def baddbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def baddbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: ...
- @overload
- def baddbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def baddbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bartlett_window(window_length: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def bartlett_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
- def batch_norm_backward_elemt(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], mean_dy: Tensor, mean_dy_xmu: Tensor, count: Tensor) -> Tensor: ...
- def batch_norm_backward_reduce(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], input_g: _bool, weight_g: _bool, bias_g: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- def batch_norm_elemt(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, invstd: Tensor, eps: _float, *, out: Optional[Tensor]=None) -> Tensor: ...
- def batch_norm_gather_stats(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, count: _int) -> Tuple[Tensor, Tensor]: ...
- def batch_norm_gather_stats_with_counts(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, counts: Tensor) -> Tuple[Tensor, Tensor]: ...
- def batch_norm_stats(input: Tensor, eps: _float) -> Tuple[Tensor, Tensor]: ...
- def batch_norm_update_stats(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float) -> Tuple[Tensor, Tensor]: ...
- @overload
- def bernoulli(input: Tensor, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bernoulli(input: Tensor, p: _float, *, generator: Optional[Generator]=None) -> Tensor: ...
- def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor]=None) -> Tensor: ...
- def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor] = None, size_average: Optional[bool] = None, reduce: Optional[bool] = None, reduction: str = ..., pos_weight: Optional[Tensor] = None) -> Tensor: ...
- def bincount(input: Tensor, weights: Optional[Tensor]=None, minlength: _int=0) -> Tensor: ...
- def binomial(count: Tensor, prob: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
- @overload
- def bitwise_and(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bitwise_and(self: Number, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_and(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bitwise_left_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bitwise_left_shift(self: Number, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_left_shift(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def bitwise_not(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bitwise_or(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bitwise_or(self: Number, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_or(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bitwise_right_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bitwise_right_shift(self: Number, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_right_shift(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bitwise_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bitwise_xor(self: Number, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_xor(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def blackman_window(window_length: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def blackman_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def broadcast_to(input: Tensor, size: Sequence[Union[_int, SymInt]]) -> Tensor: ...
- @overload
- def bucketize(input: Tensor, boundaries: Tensor, *, out_int32: _bool=False, right: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def bucketize(self: Number, boundaries: Tensor, *, out_int32: _bool=False, right: _bool=False) -> Tensor: ...
- def can_cast(from_: _dtype, to: _dtype) -> _bool: ...
- @overload
- def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> Tensor: ...
- def ccol_indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def ceil(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def ceil_(input: Tensor) -> Tensor: ...
- def celu(input: Tensor, alpha: Number=1.0) -> Tensor: ...
- def celu_(input: Tensor, alpha: Number=1.0) -> Tensor: ...
- def channel_shuffle(input: Tensor, groups: _int) -> Tensor: ...
- def cholesky(input: Tensor, upper: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- def cholesky_inverse(input: Tensor, upper: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- def cholesky_solve(input: Tensor, input2: Tensor, upper: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- def choose_qparams_optimized(input: Tensor, numel: _int, n_bins: _int, ratio: _float, bit_width: _int) -> Tuple[Tensor, Tensor]: ...
- def chunk(input: Tensor, chunks: _int, dim: _int=0) -> List[Tensor]: ...
- @overload
- def clamp(input: Tensor, min: Optional[Tensor]=None, max: Optional[Tensor]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clamp(input: Tensor, min: Optional[Number]=None, max: Optional[Number]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clamp_(input: Tensor, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clamp_(input: Tensor, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ...
- @overload
- def clamp_max(input: Tensor, max: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clamp_max(input: Tensor, max: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clamp_max_(input: Tensor, max: Tensor) -> Tensor: ...
- @overload
- def clamp_max_(input: Tensor, max: Number) -> Tensor: ...
- @overload
- def clamp_min(input: Tensor, min: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clamp_min(input: Tensor, min: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clamp_min_(input: Tensor, min: Tensor) -> Tensor: ...
- @overload
- def clamp_min_(input: Tensor, min: Number) -> Tensor: ...
- @overload
- def clip(input: Tensor, min: Optional[Tensor]=None, max: Optional[Tensor]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clip(input: Tensor, min: Optional[Number]=None, max: Optional[Number]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clip_(input: Tensor, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clip_(input: Tensor, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ...
- def clone(input: Tensor, *, memory_format: Optional[memory_format]=None) -> Tensor: ...
- def col_indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def column_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
- def combinations(input: Tensor, r: _int=2, with_replacement: _bool=False) -> Tensor: ...
- def complex(real: Tensor, imag: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> Tensor: ...
- def conj(input: Tensor) -> Tensor: ...
- def conj_physical(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def conj_physical_(input: Tensor) -> Tensor: ...
- def constant_pad_nd(input: Tensor, pad: Sequence[Union[_int, SymInt]], value: Number=0) -> Tensor: ...
- @overload
- def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
- @overload
- def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
- @overload
- def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
- @overload
- def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
- @overload
- def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
- @overload
- def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor: ...
- def conv_tbc(input: Tensor, weight: Tensor, bias: Tensor, pad: _int=0) -> Tensor: ...
- def conv_transpose1d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, output_padding: Union[_int, _size]=0, groups: _int=1, dilation: Union[_int, _size]=1) -> Tensor: ...
- def conv_transpose2d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, output_padding: Union[_int, _size]=0, groups: _int=1, dilation: Union[_int, _size]=1) -> Tensor: ...
- def conv_transpose3d(input: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: Union[_int, _size]=0, output_padding: Union[_int, _size]=0, groups: _int=1, dilation: Union[_int, _size]=1) -> Tensor: ...
- def convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: Sequence[Union[_int, SymInt]], dilation: _size, transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: _int) -> Tensor: ...
- @overload
- def copysign(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def copysign(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def corrcoef(input: Tensor) -> Tensor: ...
- def cos(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def cos_(input: Tensor) -> Tensor: ...
- def cosh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def cosh_(input: Tensor) -> Tensor: ...
- def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: float = ..., size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
- def cosine_similarity(x1: Tensor, x2: Tensor, dim: _int=1, eps: _float=1e-08) -> Tensor: ...
- @overload
- def count_nonzero(input: Tensor, dim: Optional[_int]=None) -> Tensor: ...
- @overload
- def count_nonzero(input: Tensor, dim: _size) -> Tensor: ...
- def cov(input: Tensor, *, correction: _int=1, fweights: Optional[Tensor]=None, aweights: Optional[Tensor]=None) -> Tensor: ...
- def cross(input: Tensor, other: Tensor, dim: Optional[_int]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- def crow_indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: int = ..., reduction: str = ..., zero_infinity: bool = ...) -> Tensor: ...
- def cudnn_affine_grid_generator(theta: Tensor, N: _int, C: _int, H: _int, W: _int) -> Tensor: ...
- def cudnn_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- def cudnn_convolution(input: Tensor, weight: Tensor, padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ...
- def cudnn_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Number], bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, groups: _int) -> Tensor: ...
- def cudnn_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, groups: _int) -> Tensor: ...
- def cudnn_convolution_transpose(input: Tensor, weight: Tensor, padding: _size, output_padding: _size, stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ...
- def cudnn_grid_sampler(input: Tensor, grid: Tensor) -> Tensor: ...
- def cudnn_is_acceptable(input: Tensor) -> _bool: ...
- @overload
- def cummax(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.cummax: ...
- @overload
- def cummax(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.cummax: ...
- @overload
- def cummin(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.cummin: ...
- @overload
- def cummin(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.cummin: ...
- @overload
- def cumprod(input: Tensor, dim: _int, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def cumprod(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def cumsum(input: Tensor, dim: _int, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def cumsum(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def cumulative_trapezoid(y: Tensor, x: Tensor, *, dim: _int=-1) -> Tensor: ...
- @overload
- def cumulative_trapezoid(y: Tensor, *, dx: Number=1, dim: _int=-1) -> Tensor: ...
- def deg2rad(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def deg2rad_(input: Tensor) -> Tensor: ...
- @overload
- def dequantize(input: Tensor) -> Tensor: ...
- @overload
- def dequantize(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
- def det(input: Tensor) -> Tensor: ...
- def detach(input: Tensor) -> Tensor: ...
- def detach_(input: Tensor) -> Tensor: ...
- def detach_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def diag(input: Tensor, diagonal: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
- def diag_embed(input: Tensor, offset: _int=0, dim1: _int=-2, dim2: _int=-1) -> Tensor: ...
- def diagflat(input: Tensor, offset: _int=0) -> Tensor: ...
- @overload
- def diagonal(input: Tensor, offset: _int=0, dim1: _int=0, dim2: _int=1) -> Tensor: ...
- @overload
- def diagonal(input: Tensor, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int=0) -> Tensor: ...
- def diagonal_copy(input: Tensor, offset: _int=0, dim1: _int=0, dim2: _int=1, *, out: Optional[Tensor]=None) -> Tensor: ...
- def diagonal_scatter(input: Tensor, src: Tensor, offset: _int=0, dim1: _int=0, dim2: _int=1) -> Tensor: ...
- def diff(input: Tensor, n: _int=1, dim: _int=-1, prepend: Optional[Tensor]=None, append: Optional[Tensor]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- def digamma(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def dist(input: Tensor, other: Tensor, p: Number=2) -> Tensor: ...
- def div(input: Union[Tensor, Number], other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def divide(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def divide(input: Tensor, other: Tensor, *, rounding_mode: Optional[str], out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def divide(input: Tensor, other: Number, *, rounding_mode: Optional[str]) -> Tensor: ...
- @overload
- def divide(input: Tensor, other: Number) -> Tensor: ...
- def dot(input: Tensor, tensor: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def dsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
- @overload
- def dsplit(input: Tensor, sections: _int) -> List[Tensor]: ...
- @overload
- def dsplit(input: Tensor, indices: _size) -> List[Tensor]: ...
- def dstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
- def embedding(weight: Tensor, indices: Tensor, padding_idx: Union[_int, SymInt]=-1, scale_grad_by_freq: _bool=False, sparse: _bool=False) -> Tensor: ...
- @overload
- def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool, mode: _int, sparse: _bool, per_sample_weights: Optional[Tensor], include_last_offset: _bool, padding_idx: Optional[_int]) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- @overload
- def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- def embedding_renorm_(input: Tensor, indices: Tensor, max_norm: _float, norm_type: _float) -> Tensor: ...
- @overload
- def empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def empty_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def empty_quantized(size: _size, qtensor: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def empty_strided(size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def eq(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def eq(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def equal(input: Tensor, other: Tensor) -> _bool: ...
- def erf(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def erf_(input: Tensor) -> Tensor: ...
- def erfc(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def erfc_(input: Tensor) -> Tensor: ...
- def erfinv(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def exp(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def exp2(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def exp2_(input: Tensor) -> Tensor: ...
- def exp_(input: Tensor) -> Tensor: ...
- def expand_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, implicit: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- def expm1(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def expm1_(input: Tensor) -> Tensor: ...
- @overload
- def eye(n: _int, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def eye(n: _int, m: _int, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def fake_quantize_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int) -> Tensor: ...
- @overload
- def fake_quantize_per_tensor_affine(input: Tensor, scale: _float, zero_point: _int, quant_min: _int, quant_max: _int) -> Tensor: ...
- @overload
- def fake_quantize_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int) -> Tensor: ...
- def fbgemm_linear_fp16_weight(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
- def fbgemm_linear_fp16_weight_fp32_activation(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
- def fbgemm_linear_int8_weight(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Number, weight_zero_point: Number, bias: Tensor) -> Tensor: ...
- def fbgemm_linear_int8_weight_fp32_activation(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Number, weight_zero_point: Number, bias: Tensor) -> Tensor: ...
- def fbgemm_linear_quantize_weight(input: Tensor) -> Tuple[Tensor, Tensor, _float, _int]: ...
- def fbgemm_pack_gemm_matrix_fp16(input: Tensor) -> Tensor: ...
- @overload
- def fbgemm_pack_quantized_matrix(input: Tensor) -> Tensor: ...
- @overload
- def fbgemm_pack_quantized_matrix(input: Tensor, K: _int, N: _int) -> Tensor: ...
- def feature_alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def feature_alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def feature_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def feature_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- @overload
- def fill(input: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def fill(input: Tensor, value: Number) -> Tensor: ...
- @overload
- def fill_(input: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def fill_(input: Tensor, value: Number) -> Tensor: ...
- def fix(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def fix_(input: Tensor) -> Tensor: ...
- @overload
- def flatten(input: Tensor, start_dim: _int=0, end_dim: _int=-1) -> Tensor: ...
- @overload
- def flatten(input: Tensor, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor: ...
- @overload
- def flatten(input: Tensor, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor: ...
- @overload
- def flatten(input: Tensor, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor: ...
- def flip(input: Tensor, dims: _size) -> Tensor: ...
- def fliplr(input: Tensor) -> Tensor: ...
- def flipud(input: Tensor) -> Tensor: ...
- @overload
- def float_power(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def float_power(self: Number, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def float_power(input: Tensor, exponent: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def floor(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def floor_(input: Tensor) -> Tensor: ...
- def floor_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor]=None) -> Tensor: ...
- def fmax(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def fmin(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def fmod(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def fmod(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def frac(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def frac_(input: Tensor) -> Tensor: ...
- def frexp(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.frexp: ...
- def frobenius_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- def from_file(filename: str, shared: Optional[_bool]=None, size: Optional[_int]=0, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def from_numpy(ndarray) -> Tensor: ...
- def frombuffer(buffer: Any, *, dtype: _dtype, count: int=-1, offset: int=0, device: Union[_device, str, None]=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def full(size: _size, fill_value: Number, *, out: Optional[Tensor]=None, layout: _layout=strided, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def full(size: _size, fill_value: Number, *, names: List[Union[str, None]], layout: _layout=strided, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def full(size: Sequence[Union[_int, SymInt]], fill_value: Number, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def full(size: _size, fill_value: Number, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def full_like(input: Tensor, fill_value: Number, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def fused_moving_avg_obs_fake_quant(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool=False, symmetric_quant: _bool=False) -> Tensor: ...
- @overload
- def gather(input: Tensor, dim: _int, index: Tensor, *, sparse_grad: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def gather(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- def gcd(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def gcd_(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def ge(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def ge(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def geqrf(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.geqrf: ...
- def ger(input: Tensor, vec2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def get_default_dtype() -> _dtype: ...
- def get_num_interop_threads() -> _int: ...
- def get_num_threads() -> _int: ...
- @overload
- def gradient(input: Tensor, *, spacing: Optional[Number]=None, dim: Optional[_int]=None, edge_order: _int=1) -> List[Tensor]: ...
- @overload
- def gradient(input: Tensor, *, spacing: Sequence[Number], dim: Optional[_int]=None, edge_order: _int=1) -> List[Tensor]: ...
- @overload
- def gradient(input: Tensor, *, spacing: Number, dim: _size, edge_order: _int=1) -> List[Tensor]: ...
- @overload
- def gradient(input: Tensor, *, spacing: Sequence[Number], dim: _size, edge_order: _int=1) -> List[Tensor]: ...
- @overload
- def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: Optional[_int]=None, edge_order: _int=1) -> List[Tensor]: ...
- @overload
- def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: _size, edge_order: _int=1) -> List[Tensor]: ...
- @overload
- def gradient(input: Tensor, *, dim: _size, edge_order: _int=1) -> List[Tensor]: ...
- @overload
- def greater(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def greater(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def greater_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def greater_equal(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def grid_sampler(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
- def grid_sampler_2d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
- def grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
- def group_norm(input: Tensor, num_groups: _int, weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, eps: _float=1e-05, cudnn_enabled: _bool=True) -> Tensor: ...
- @overload
- def gru(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
- @overload
- def gru(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
- def gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def gt(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def gt(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def hamming_window(window_length: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def hamming_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def hamming_window(window_length: _int, periodic: _bool, alpha: _float, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def hamming_window(window_length: _int, periodic: _bool, alpha: _float, beta: _float, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def hann_window(window_length: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def hann_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def hardshrink(input: Tensor, lambd: Number=0.5, *, out: Optional[Tensor]=None) -> Tensor: ...
- def heaviside(input: Tensor, values: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def hinge_embedding_loss(input: Tensor, target: Tensor, margin: float = ..., size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
- def histc(input: Tensor, bins: _int=100, min: Number=0, max: Number=0, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def histogram(input: Tensor, bins: Tensor, *, weight: Optional[Tensor]=None, density: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.histogram: ...
- @overload
- def histogram(input: Tensor, bins: _int=100, *, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.histogram: ...
- @overload
- def histogramdd(input: Tensor, bins: _int, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> torch.return_types.histogramdd: ...
- @overload
- def histogramdd(input: Tensor, bins: _size, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> torch.return_types.histogramdd: ...
- @overload
- def histogramdd(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> torch.return_types.histogramdd: ...
- def hsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
- @overload
- def hsplit(input: Tensor, sections: _int) -> List[Tensor]: ...
- @overload
- def hsplit(input: Tensor, indices: _size) -> List[Tensor]: ...
- def hspmm(mat1: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def hstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
- def hypot(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def i0(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def i0_(input: Tensor) -> Tensor: ...
- def igamma(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def igammac(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def imag(input: Tensor) -> Tensor: ...
- @overload
- def index_add(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def index_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Number=1) -> Tensor: ...
- @overload
- def index_copy(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def index_copy(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ...
- @overload
- def index_fill(input: Tensor, dim: _int, index: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def index_fill(input: Tensor, dim: _int, index: Tensor, value: Number) -> Tensor: ...
- @overload
- def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ...
- def index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False) -> Tensor: ...
- def index_put_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False) -> Tensor: ...
- def index_reduce(input: Tensor, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool=True, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def index_select(input: Tensor, dim: _int, index: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def index_select(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def init_num_threads() -> None: ...
- def inner(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def instance_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], use_input_stats: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
- def int_repr(input: Tensor) -> Tensor: ...
- def inverse(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def is_complex(input: Tensor) -> _bool: ...
- def is_conj(input: Tensor) -> _bool: ...
- def is_distributed(input: Tensor) -> _bool: ...
- def is_floating_point(input: Tensor) -> _bool: ...
- def is_grad_enabled() -> _bool: ...
- def is_inference(input: Tensor) -> _bool: ...
- def is_inference_mode_enabled() -> _bool: ...
- def is_neg(input: Tensor) -> _bool: ...
- def is_nonzero(input: Tensor) -> _bool: ...
- def is_same_size(input: Tensor, other: Tensor) -> _bool: ...
- def is_signed(input: Tensor) -> _bool: ...
- def is_vulkan_available() -> _bool: ...
- def isclose(input: Tensor, other: Tensor, rtol: _float=1e-05, atol: _float=1e-08, equal_nan: _bool=False) -> Tensor: ...
- def isfinite(input: Tensor) -> Tensor: ...
- @overload
- def isin(elements: Tensor, test_elements: Tensor, *, assume_unique: _bool=False, invert: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def isin(element: Number, test_elements: Tensor, *, assume_unique: _bool=False, invert: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def isin(elements: Tensor, test_element: Number, *, assume_unique: _bool=False, invert: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- def isinf(input: Tensor) -> Tensor: ...
- def isnan(input: Tensor) -> Tensor: ...
- def isneginf(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def isposinf(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def isreal(input: Tensor) -> Tensor: ...
- def istft(input: Tensor, n_fft: _int, hop_length: Optional[_int]=None, win_length: Optional[_int]=None, window: Optional[Tensor]=None, center: _bool=True, normalized: _bool=False, onesided: Optional[_bool]=None, length: Optional[_int]=None, return_complex: _bool=False) -> Tensor: ...
- @overload
- def kaiser_window(window_length: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def kaiser_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def kaiser_window(window_length: _int, periodic: _bool, beta: _float, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def kl_div(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., reduction: str = ..., log_target: bool = ...) -> Tensor: ...
- def kron(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def kthvalue(input: Tensor, k: _int, dim: _int=-1, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.kthvalue: ...
- @overload
- def kthvalue(input: Tensor, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.kthvalue: ...
- def layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, eps: _float=1e-05, cudnn_enable: _bool=True) -> Tensor: ...
- def lcm(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def lcm_(input: Tensor, other: Tensor) -> Tensor: ...
- def ldexp(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def ldexp_(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def le(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def le(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def lerp(input: Tensor, end: Tensor, weight: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def lerp(input: Tensor, end: Tensor, weight: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def less(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def less(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def less_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def less_equal(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def lgamma(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def linspace(start: Number, end: Number, steps: Optional[_int]=None, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def linspace(start: Number, end: Number, steps: _int, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def log(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def log10(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def log10_(input: Tensor) -> Tensor: ...
- def log1p(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def log1p_(input: Tensor) -> Tensor: ...
- def log2(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def log2_(input: Tensor) -> Tensor: ...
- def log_(input: Tensor) -> Tensor: ...
- @overload
- def log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
- def logaddexp(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def logaddexp2(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def logcumsumexp(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor]=None) -> Tensor: ...
- def logdet(input: Tensor) -> Tensor: ...
- def logical_and(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def logical_not(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def logical_or(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def logical_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def logit(input: Tensor, eps: Optional[_float]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- def logit_(input: Tensor, eps: Optional[_float]=None) -> Tensor: ...
- @overload
- def logspace(start: Number, end: Number, steps: Optional[_int]=None, base: _float=10.0, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def logspace(start: Number, end: Number, steps: _int, base: _float=10.0, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def logsumexp(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def logsumexp(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def lstm(data: Tensor, batch_sizes: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
- @overload
- def lstm(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
- def lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tuple[Tensor, Tensor]: ...
- @overload
- def lt(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def lt(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def lu_solve(input: Tensor, LU_data: Tensor, LU_pivots: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def lu_unpack(LU_data: Tensor, LU_pivots: Tensor, unpack_data: _bool=True, unpack_pivots: _bool=True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.lu_unpack: ...
- def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: float = ..., size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
- @overload
- def masked_fill(input: Tensor, mask: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def masked_fill(input: Tensor, mask: Tensor, value: Number) -> Tensor: ...
- def masked_scatter(input: Tensor, mask: Tensor, source: Tensor) -> Tensor: ...
- def masked_select(input: Tensor, mask: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def matrix_exp(input: Tensor) -> Tensor: ...
- def matrix_power(input: Tensor, n: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def max(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def max(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def max(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.max: ...
- @overload
- def max(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.max: ...
- def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
- def max_pool1d_with_indices(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tuple[Tensor, Tensor]: ...
- def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
- def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
- def maximum(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def mean(input: Tensor, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def mean(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def median(input: Tensor) -> Tensor: ...
- @overload
- def median(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.median: ...
- @overload
- def median(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.median: ...
- @overload
- def min(input: Tensor) -> Tensor: ...
- @overload
- def min(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def min(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.min: ...
- @overload
- def min(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.min: ...
- def minimum(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def miopen_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
- def miopen_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
- def miopen_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Number], bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, groups: _int) -> Tensor: ...
- def miopen_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, groups: _int) -> Tensor: ...
- def miopen_convolution_transpose(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
- def miopen_depthwise_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: _size, dilation: _size, groups: _int, benchmark: _bool, deterministic: _bool) -> Tensor: ...
- def miopen_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
- def mkldnn_adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size], *, out: Optional[Tensor]=None) -> Tensor: ...
- def mkldnn_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: _size, dilation: _size, groups: _int) -> Tensor: ...
- def mkldnn_linear_backward_weights(grad_output: Tensor, input: Tensor, weight: Tensor, bias_defined: _bool) -> Tuple[Tensor, Tensor]: ...
- def mkldnn_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
- def mkldnn_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
- def mkldnn_rnn_layer(input: Tensor, weight0: Tensor, weight1: Tensor, weight2: Tensor, weight3: Tensor, hx_: Tensor, cx_: Tensor, reverse: _bool, batch_sizes: _size, mode: _int, hidden_size: _int, num_layers: _int, has_biases: _bool, bidirectional: _bool, batch_first: _bool, train: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- def mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def mode(input: Tensor, dim: _int=-1, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.mode: ...
- @overload
- def mode(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.mode: ...
- @overload
- def moveaxis(input: Tensor, source: _int, destination: _int) -> Tensor: ...
- @overload
- def moveaxis(input: Tensor, source: _size, destination: _size) -> Tensor: ...
- @overload
- def movedim(input: Tensor, source: _int, destination: _int) -> Tensor: ...
- @overload
- def movedim(input: Tensor, source: _size, destination: _size) -> Tensor: ...
- def msort(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def mul(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor]=None) -> Tensor: ...
- def multinomial(input: Tensor, num_samples: _int, replacement: _bool=False, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def multiply(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def multiply(input: Tensor, other: Number) -> Tensor: ...
- def mv(input: Tensor, vec: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def mvlgamma(input: Tensor, p: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
- def nan_to_num(input: Tensor, nan: Optional[_float]=None, posinf: Optional[_float]=None, neginf: Optional[_float]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- def nan_to_num_(input: Tensor, nan: Optional[_float]=None, posinf: Optional[_float]=None, neginf: Optional[_float]=None) -> Tensor: ...
- def nanmean(input: Tensor, dim: Optional[Union[_int, _size]]=None, keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def nanmedian(input: Tensor) -> Tensor: ...
- @overload
- def nanmedian(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.nanmedian: ...
- @overload
- def nanmedian(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.nanmedian: ...
- @overload
- def nanquantile(input: Tensor, q: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear", out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def nanquantile(input: Tensor, q: _float, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear", out: Optional[Tensor]=None) -> Tensor: ...
- def nansum(input: Tensor, dim: Optional[Union[_int, _size]]=None, keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def narrow(input: Tensor, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor: ...
- @overload
- def narrow(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: ...
- def narrow_copy(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt], *, out: Optional[Tensor]=None) -> Tensor: ...
- def native_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> Tuple[Tensor, Tensor, Tensor]: ...
- def native_channel_shuffle(input: Tensor, groups: _int) -> Tensor: ...
- def native_dropout(input: Tensor, p: _float, train: Optional[_bool]) -> Tuple[Tensor, Tensor]: ...
- def native_group_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], N: Union[_int, SymInt], C: Union[_int, SymInt], HxW: Union[_int, SymInt], group: _int, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
- def native_layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor], bias: Optional[Tensor], eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
- @overload
- def native_norm(input: Tensor, p: Optional[Number], dim: Union[_int, _size], keepdim: _bool, dtype: Optional[_dtype]) -> Tensor: ...
- @overload
- def native_norm(input: Tensor, p: Number=2) -> Tensor: ...
- @overload
- def ne(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def ne(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def neg(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def neg_(input: Tensor) -> Tensor: ...
- def negative(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def negative_(input: Tensor) -> Tensor: ...
- def nextafter(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def nonzero(input: Tensor, *, as_tuple: Literal[False]=False, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def nonzero(input: Tensor, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: ...
- def norm_except_dim(v: Tensor, pow: _int=2, dim: _int=0) -> Tensor: ...
- @overload
- def normal(mean: Tensor, std: Tensor, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def normal(mean: Tensor, std: _float=1, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def normal(mean: _float, std: Tensor, *, generator: Optional[Generator]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def normal(mean: _float, std: _float, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def not_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def not_equal(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def nuclear_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def nuclear_norm(input: Tensor, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- def numel(self: Tensor) -> _int: ...
- @overload
- def ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def ones_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def orgqr(input: Tensor, input2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def ormqr(input: Tensor, input2: Tensor, input3: Tensor, left: _bool=True, transpose: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- def outer(input: Tensor, vec2: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def pairwise_distance(x1: Tensor, x2: Tensor, p: _float=2, eps: _float=1e-06, keepdim: _bool=False) -> Tensor: ...
- def pdist(input: Tensor, p: _float=2) -> Tensor: ...
- def permute(input: Tensor, dims: _size) -> Tensor: ...
- def permute_copy(input: Tensor, dims: _size, *, out: Optional[Tensor]=None) -> Tensor: ...
- def pinverse(input: Tensor, rcond: _float=1e-15) -> Tensor: ...
- def pixel_shuffle(input: Tensor, upscale_factor: _int) -> Tensor: ...
- def pixel_unshuffle(input: Tensor, downscale_factor: _int) -> Tensor: ...
- def poisson(input: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
- def poisson_nll_loss(input: Tensor, target: Tensor, log_input: _bool, full: _bool, eps: _float, reduction: _int) -> Tensor: ...
- def polar(abs: Tensor, angle: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def polygamma(n: _int, input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def positive(input: Tensor) -> Tensor: ...
- @overload
- def pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def pow(self: Number, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def pow(input: Tensor, exponent: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def prelu(input: Tensor, weight: Tensor) -> Tensor: ...
- @overload
- def prod(input: Tensor, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def prod(input: Tensor, dim: _int, keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def prod(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- def promote_types(type1: _dtype, type2: _dtype) -> _dtype: ...
- def put(input: Tensor, index: Tensor, source: Tensor, accumulate: _bool=False) -> Tensor: ...
- def q_per_channel_axis(input: Tensor) -> _int: ...
- def q_per_channel_scales(input: Tensor) -> Tensor: ...
- def q_per_channel_zero_points(input: Tensor) -> Tensor: ...
- def q_scale(input: Tensor) -> _float: ...
- def q_zero_point(input: Tensor) -> _int: ...
- def qr(input: Tensor, some: _bool=True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.qr: ...
- @overload
- def quantile(input: Tensor, q: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear", out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def quantile(input: Tensor, q: _float, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear", out: Optional[Tensor]=None) -> Tensor: ...
- def quantize_per_channel(input: Tensor, scales: Tensor, zero_points: Tensor, axis: _int, dtype: _dtype) -> Tensor: ...
- @overload
- def quantize_per_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, dtype: _dtype) -> Tensor: ...
- @overload
- def quantize_per_tensor(input: Tensor, scale: _float, zero_point: _int, dtype: _dtype) -> Tensor: ...
- @overload
- def quantize_per_tensor(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scales: Tensor, zero_points: Tensor, dtype: _dtype) -> List[Tensor]: ...
- def quantize_per_tensor_dynamic(input: Tensor, dtype: _dtype, reduce_range: _bool) -> Tensor: ...
- def quantized_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, var: Tensor, eps: _float, output_scale: _float, output_zero_point: _int) -> Tensor: ...
- def quantized_gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tensor: ...
- def quantized_lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tuple[Tensor, Tensor]: ...
- def quantized_max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
- def quantized_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size]=(), padding: Union[_int, _size]=0, dilation: Union[_int, _size]=1, ceil_mode: _bool=False) -> Tensor: ...
- def quantized_rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tensor: ...
- def quantized_rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Number, scale_hh: Number, zero_point_ih: Number, zero_point_hh: Number) -> Tensor: ...
- def rad2deg(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def rad2deg_(input: Tensor) -> Tensor: ...
- @overload
- def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def rand(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def rand(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def rand(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def rand(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def rand(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def rand(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def rand_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def randint(high: _int, size: _size, *, generator: Optional[Generator]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def randint(high: _int, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randint(high: _int, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randint(low: _int, high: _int, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randint(low: _int, high: _int, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randint_like(input: Tensor, high: _int, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randint_like(input: Tensor, low: _int, high: _int, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randn(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randn(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def randn_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randperm(n: _int, *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def randperm(n: _int, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def range(start: Number, end: Number, step: Number=1, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- def ravel(input: Tensor) -> Tensor: ...
- def real(input: Tensor) -> Tensor: ...
- def reciprocal(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def reciprocal_(input: Tensor) -> Tensor: ...
- def relu(input: Tensor) -> Tensor: ...
- def relu_(input: Tensor) -> Tensor: ...
- @overload
- def remainder(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def remainder(self: Number, other: Tensor) -> Tensor: ...
- @overload
- def remainder(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def renorm(input: Tensor, p: Number, dim: _int, maxnorm: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def repeat_interleave(input: Tensor, repeats: Tensor, dim: Optional[_int]=None, *, output_size: Optional[_int]=None) -> Tensor: ...
- @overload
- def repeat_interleave(repeats: Tensor, *, output_size: Optional[_int]=None) -> Tensor: ...
- @overload
- def repeat_interleave(input: Tensor, repeats: Union[_int, SymInt], dim: Optional[_int]=None, *, output_size: Optional[_int]=None) -> Tensor: ...
- def reshape(input: Tensor, shape: Sequence[Union[_int, SymInt]]) -> Tensor: ...
- def resize_as_(input: Tensor, the_template: Tensor, *, memory_format: Optional[memory_format]=None) -> Tensor: ...
- def resize_as_sparse_(input: Tensor, the_template: Tensor) -> Tensor: ...
- def resolve_conj(input: Tensor) -> Tensor: ...
- def resolve_neg(input: Tensor) -> Tensor: ...
- @overload
- def result_type(tensor: Tensor, other: Tensor) -> _dtype: ...
- @overload
- def result_type(scalar: Number, tensor: Tensor) -> _dtype: ...
- @overload
- def result_type(tensor: Tensor, other: Number) -> _dtype: ...
- @overload
- def result_type(scalar1: Number, scalar2: Number) -> _dtype: ...
- @overload
- def rnn_relu(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
- @overload
- def rnn_relu(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
- def rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def rnn_tanh(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
- @overload
- def rnn_tanh(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
- def rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor]=None, b_hh: Optional[Tensor]=None) -> Tensor: ...
- def roll(input: Tensor, shifts: Union[_int, _size], dims: Union[_int, _size]=()) -> Tensor: ...
- def rot90(input: Tensor, k: _int=1, dims: _size=(0,1)) -> Tensor: ...
- @overload
- def round(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def round(input: Tensor, *, decimals: _int, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def round_(input: Tensor) -> Tensor: ...
- @overload
- def round_(input: Tensor, *, decimals: _int) -> Tensor: ...
- def row_indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def row_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
- def rrelu(input: Tensor, lower: Number=0.125, upper: Number=0.3333333333333333, training: _bool=False, generator: Optional[Generator]=None) -> Tensor: ...
- def rrelu_(input: Tensor, lower: Number=0.125, upper: Number=0.3333333333333333, training: _bool=False, generator: Optional[Generator]=None) -> Tensor: ...
- def rsqrt(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def rsqrt_(input: Tensor) -> Tensor: ...
- @overload
- def rsub(input: Tensor, other: Tensor, *, alpha: Number=1) -> Tensor: ...
- @overload
- def rsub(input: Tensor, other: Number, alpha: Number=1) -> Tensor: ...
- def saddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- def scalar_tensor(s: Number, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, reduce: str, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def scatter(input: Tensor, dim: _int, index: Tensor, value: Number, *, reduce: str, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ...
- @overload
- def scatter(input: Tensor, dim: _int, index: Tensor, value: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ...
- @overload
- def scatter_add(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def scatter_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ...
- def scatter_reduce(input: Tensor, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool=True, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def searchsorted(sorted_sequence: Tensor, input: Tensor, *, out_int32: _bool=False, right: _bool=False, side: Optional[str]=None, sorter: Optional[Tensor]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def searchsorted(sorted_sequence: Tensor, self: Number, *, out_int32: _bool=False, right: _bool=False, side: Optional[str]=None, sorter: Optional[Tensor]=None) -> Tensor: ...
- def segment_reduce(data: Tensor, reduce: str, *, lengths: Optional[Tensor]=None, indices: Optional[Tensor]=None, offsets: Optional[Tensor]=None, axis: _int=0, unsafe: _bool=False, initial: Optional[Number]=None) -> Tensor: ...
- @overload
- def select(input: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: ...
- @overload
- def select(input: Tensor, dim: Union[str, ellipsis, None], index: _int) -> Tensor: ...
- def select_copy(input: Tensor, dim: _int, index: Union[_int, SymInt], *, out: Optional[Tensor]=None) -> Tensor: ...
- def select_scatter(input: Tensor, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: ...
- def selu(input: Tensor) -> Tensor: ...
- def selu_(input: Tensor) -> Tensor: ...
- def set_flush_denormal(mode: _bool) -> _bool: ...
- def set_num_interop_threads(num: _int) -> None: ...
- def set_num_threads(num: _int) -> None: ...
- def sgn(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def sigmoid(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def sigmoid_(input: Tensor) -> Tensor: ...
- def sign(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def signbit(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def sin(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def sin_(input: Tensor) -> Tensor: ...
- def sinc(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def sinc_(input: Tensor) -> Tensor: ...
- def sinh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def sinh_(input: Tensor) -> Tensor: ...
- def slice_copy(input: Tensor, dim: _int=0, start: Optional[Union[_int, SymInt]]=None, end: Optional[Union[_int, SymInt]]=None, step: Union[_int, SymInt]=1, *, out: Optional[Tensor]=None) -> Tensor: ...
- def slice_scatter(input: Tensor, src: Tensor, dim: _int=0, start: Optional[Union[_int, SymInt]]=None, end: Optional[Union[_int, SymInt]]=None, step: Union[_int, SymInt]=1) -> Tensor: ...
- def slogdet(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.slogdet: ...
- def smm(input: Tensor, mat2: Tensor) -> Tensor: ...
- @overload
- def softmax(input: Tensor, dim: _int, dtype: Optional[_dtype]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def sort(input: Tensor, *, stable: Optional[_bool], dim: _int=-1, descending: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.sort: ...
- @overload
- def sort(input: Tensor, dim: _int=-1, descending: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.sort: ...
- @overload
- def sort(input: Tensor, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.sort: ...
- @overload
- def sort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.sort: ...
- def sparse_bsc_tensor(ccol_indices: Union[Tensor, List],row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
- def sparse_bsr_tensor(crow_indices: Union[Tensor, List],col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
- def sparse_compressed_tensor(compressed_indices: Union[Tensor, List],plain_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, layout: Optional[_layout] = None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
- def sparse_coo_tensor(indices: Tensor, values: Union[Tensor,List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
- def sparse_csc_tensor(ccol_indices: Union[Tensor, List],row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
- def sparse_csr_tensor(crow_indices: Union[Tensor, List],col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size]=None, *, dtype: Optional[_dtype]=None, device: Union[_device, str, None]=None, requires_grad:_bool=False, check_invariants:_bool=None) -> Tensor: ...
- def split_copy(input: Tensor, split_size: Union[_int, SymInt], dim: _int=0, *, out: Union[Tuple[Tensor, ...], List[Tensor]]=None) -> None: ...
- def split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int=0) -> List[Tensor]: ...
- def split_with_sizes_copy(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int=0, *, out: Union[Tuple[Tensor, ...], List[Tensor]]=None) -> None: ...
- def spmm(input: Tensor, mat2: Tensor) -> Tensor: ...
- def sqrt(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def sqrt_(input: Tensor) -> Tensor: ...
- def square(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def square_(input: Tensor) -> Tensor: ...
- @overload
- def squeeze(input: Tensor) -> Tensor: ...
- @overload
- def squeeze(input: Tensor, dim: _int) -> Tensor: ...
- @overload
- def squeeze(input: Tensor, dim: _size) -> Tensor: ...
- @overload
- def squeeze(input: Tensor, dim: Union[str, ellipsis, None]) -> Tensor: ...
- @overload
- def squeeze_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def squeeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def squeeze_copy(input: Tensor, dim: _size, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def sspaddmm(beta: Number, self: Tensor, alpha: Number, mat1: Tensor, mat2: Tensor) -> Tensor: ...
- @overload
- def sspaddmm(beta: Number, self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: ...
- @overload
- def sspaddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- def stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def std(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def std(input: Tensor, dim: Optional[Union[_int, _size]]=None, *, correction: Optional[_int]=None, keepdim: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def std(input: Tensor, unbiased: _bool=True) -> Tensor: ...
- @overload
- def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int]=None, keepdim: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def std_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
- @overload
- def std_mean(input: Tensor, dim: Optional[Union[_int, _size]]=None, *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
- @overload
- def std_mean(input: Tensor, unbiased: _bool=True) -> Tuple[Tensor, Tensor]: ...
- @overload
- def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
- @overload
- def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
- @overload
- def sub(input: Union[Tensor, Number], other: Union[Tensor, Number], *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def sub(self: Tensor, alpha: Number, other: Tensor) -> Tensor: ...
- @overload
- def sub(self: Tensor, alpha: Number, other: Tensor, *, out: Tensor) -> Tensor: ...
- @overload
- def subtract(input: Tensor, other: Tensor, *, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def subtract(input: Tensor, other: Number, alpha: Number=1) -> Tensor: ...
- @overload
- def sum(input: Tensor, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor: ...
- def svd(input: Tensor, some: _bool=True, compute_uv: _bool=True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.svd: ...
- def swapaxes(input: Tensor, axis0: _int, axis1: _int) -> Tensor: ...
- def swapdims(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
- def t(input: Tensor) -> Tensor: ...
- def t_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def take(input: Tensor, index: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def take_along_dim(input: Tensor, indices: Tensor, dim: Optional[_int]=None, *, out: Optional[Tensor]=None) -> Tensor: ...
- def tan(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def tan_(input: Tensor) -> Tensor: ...
- def tanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def tanh_(input: Tensor) -> Tensor: ...
- def tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def tensor_split(input: Tensor, tensor_indices_or_sections: Tensor, dim: _int=0) -> List[Tensor]: ...
- @overload
- def tensor_split(input: Tensor, sections: Union[_int, SymInt], dim: _int=0) -> List[Tensor]: ...
- @overload
- def tensor_split(input: Tensor, indices: Sequence[Union[_int, SymInt]], dim: _int=0) -> List[Tensor]: ...
- def threshold(input: Tensor, threshold: Number, value: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- def threshold_(input: Tensor, threshold: Number, value: Number) -> Tensor: ...
- def tile(input: Tensor, dims: _size) -> Tensor: ...
- def topk(input: Tensor, k: _int, dim: _int=-1, largest: _bool=True, sorted: _bool=True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.topk: ...
- def trace(input: Tensor) -> Tensor: ...
- @overload
- def transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
- @overload
- def transpose(input: Tensor, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor: ...
- def transpose_copy(input: Tensor, dim0: _int, dim1: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def trapezoid(y: Tensor, x: Tensor, *, dim: _int=-1) -> Tensor: ...
- @overload
- def trapezoid(y: Tensor, *, dx: Number=1, dim: _int=-1) -> Tensor: ...
- @overload
- def trapz(y: Tensor, *, dx: _float=1, dim: _int=-1) -> Tensor: ...
- @overload
- def trapz(y: Tensor, x: Tensor, *, dim: _int=-1) -> Tensor: ...
- def triangular_solve(input: Tensor, A: Tensor, upper: _bool=True, transpose: _bool=False, unitriangular: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types.triangular_solve: ...
- def tril(input: Tensor, diagonal: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
- def tril_indices(row: _int, col: _int, offset: _int=0, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: float = ..., p: float = ..., eps: float = ..., swap: bool = ..., size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
- def triu(input: Tensor, diagonal: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
- def triu_indices(row: _int, col: _int, offset: _int=0, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def true_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor]=None) -> Tensor: ...
- def trunc(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def trunc_(input: Tensor) -> Tensor: ...
- @overload
- def unbind(input: Tensor, dim: _int=0) -> List[Tensor]: ...
- @overload
- def unbind(input: Tensor, dim: Union[str, ellipsis, None]) -> List[Tensor]: ...
- def unbind_copy(input: Tensor, dim: _int=0, *, out: Union[Tuple[Tensor, ...], List[Tensor]]=None) -> None: ...
- @overload
- def unflatten(input: Tensor, dim: _int, sizes: _size) -> Tensor: ...
- @overload
- def unflatten(input: Tensor, dim: Union[str, ellipsis, None], sizes: _size, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
- def unfold_copy(input: Tensor, dimension: _int, size: _int, step: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
- def unique_dim(input: Tensor, dim: _int, sorted: _bool=True, return_inverse: _bool=False, return_counts: _bool=False) -> Tuple[Tensor, Tensor, Tensor]: ...
- def unsafe_chunk(input: Tensor, chunks: _int, dim: _int=0) -> List[Tensor]: ...
- def unsafe_split(input: Tensor, split_size: Union[_int, SymInt], dim: _int=0) -> List[Tensor]: ...
- def unsafe_split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int=0) -> List[Tensor]: ...
- def unsqueeze(input: Tensor, dim: _int) -> Tensor: ...
- def unsqueeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
- def values_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def vander(x: Tensor, N: Optional[_int]=None, increasing: _bool=False) -> Tensor: ...
- @overload
- def var(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def var(input: Tensor, dim: Optional[Union[_int, _size]]=None, *, correction: Optional[_int]=None, keepdim: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def var(input: Tensor, unbiased: _bool=True) -> Tensor: ...
- @overload
- def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int]=None, keepdim: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def var_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
- @overload
- def var_mean(input: Tensor, dim: Optional[Union[_int, _size]]=None, *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
- @overload
- def var_mean(input: Tensor, unbiased: _bool=True) -> Tuple[Tensor, Tensor]: ...
- @overload
- def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
- @overload
- def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
- def vdot(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def view_as_complex(input: Tensor) -> Tensor: ...
- def view_as_complex_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- def view_as_real(input: Tensor) -> Tensor: ...
- def view_as_real_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def view_copy(input: Tensor, dtype: _dtype, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def view_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def vsplit(input: Tensor, sections: _int) -> List[Tensor]: ...
- @overload
- def vsplit(input: Tensor, indices: _size) -> List[Tensor]: ...
- def vstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def where(condition: Tensor) -> List[Tensor]: ...
- @overload
- def where(condition: Tensor, input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def where(condition: Tensor, self: Number, other: Tensor) -> Tensor: ...
- @overload
- def where(condition: Tensor, input: Tensor, other: Number) -> Tensor: ...
- @overload
- def where(condition: Tensor, self: Number, other: Number) -> Tensor: ...
- @overload
- def xlogy(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def xlogy(self: Number, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def xlogy(input: Tensor, other: Number, *, out: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def xlogy_(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def xlogy_(input: Tensor, other: Number) -> Tensor: ...
- def zero_(input: Tensor) -> Tensor: ...
- @overload
- def zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def zeros(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def zeros_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- __all__ = ['__and__', '__lshift__', '__or__', '__rshift__', '__xor__', '_adaptive_avg_pool2d',
- '_adaptive_avg_pool3d', '_add_batch_dim', '_add_relu', '_add_relu_', '_addmm_activation',
- '_aminmax', '_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_assert_async',
- '_assert_tensor_metadata', '_batch_norm_impl_index', '_cast_Byte', '_cast_Char', '_cast_Double',
- '_cast_Float', '_cast_Half', '_cast_Int', '_cast_Long', '_cast_Short',
- '_choose_qparams_per_tensor', '_chunk_grad_outputs_efficient_attention', '_coalesce',
- '_compute_linear_combination', '_conj', '_conj_copy', '_conj_physical',
- '_convert_indices_from_coo_to_csr', '_convert_indices_from_csr_to_coo', '_convolution',
- '_convolution_mode', '_copy_from', '_copy_from_and_resize', '_ctc_loss', '_cudnn_ctc_loss',
- '_cudnn_init_dropout_state', '_cudnn_rnn', '_cudnn_rnn_flatten_weight', '_cufft_clear_plan_cache',
- '_cufft_get_plan_cache_max_size', '_cufft_get_plan_cache_size', '_cufft_set_plan_cache_max_size',
- '_cummax_helper', '_cummin_helper', '_debug_has_internal_overlap', '_dim_arange',
- '_dirichlet_grad', '_disable_functionalization', '_efficientzerotensor', '_embedding_bag',
- '_embedding_bag_forward_only', '_empty_affine_quantized', '_empty_per_channel_affine_quantized',
- '_enable_functionalization', '_euclidean_dist', '_fake_quantize_learnable_per_channel_affine',
- '_fake_quantize_learnable_per_tensor_affine',
- '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams',
- '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', '_fft_c2c', '_fft_c2r', '_fft_r2c',
- '_foobar', '_foreach_abs', '_foreach_abs_', '_foreach_acos', '_foreach_acos_', '_foreach_add',
- '_foreach_add_', '_foreach_addcdiv', '_foreach_addcdiv_', '_foreach_addcmul', '_foreach_addcmul_',
- '_foreach_asin', '_foreach_asin_', '_foreach_atan', '_foreach_atan_', '_foreach_ceil',
- '_foreach_ceil_', '_foreach_clamp_max', '_foreach_clamp_max_', '_foreach_clamp_min',
- '_foreach_clamp_min_', '_foreach_cos', '_foreach_cos_', '_foreach_cosh', '_foreach_cosh_',
- '_foreach_div', '_foreach_div_', '_foreach_erf', '_foreach_erf_', '_foreach_erfc',
- '_foreach_erfc_', '_foreach_exp', '_foreach_exp_', '_foreach_expm1', '_foreach_expm1_',
- '_foreach_floor', '_foreach_floor_', '_foreach_frac', '_foreach_frac_', '_foreach_lerp',
- '_foreach_lerp_', '_foreach_lgamma', '_foreach_lgamma_', '_foreach_log', '_foreach_log10',
- '_foreach_log10_', '_foreach_log1p', '_foreach_log1p_', '_foreach_log2', '_foreach_log2_',
- '_foreach_log_', '_foreach_maximum', '_foreach_maximum_', '_foreach_minimum', '_foreach_minimum_',
- '_foreach_mul', '_foreach_mul_', '_foreach_neg', '_foreach_neg_', '_foreach_norm',
- '_foreach_reciprocal', '_foreach_reciprocal_', '_foreach_round', '_foreach_round_',
- '_foreach_sigmoid', '_foreach_sigmoid_', '_foreach_sin', '_foreach_sin_', '_foreach_sinh',
- '_foreach_sinh_', '_foreach_sqrt', '_foreach_sqrt_', '_foreach_sub', '_foreach_sub_',
- '_foreach_tan', '_foreach_tan_', '_foreach_tanh', '_foreach_tanh_', '_foreach_trunc',
- '_foreach_trunc_', '_foreach_zero_', '_from_functional_tensor', '_fused_adam_', '_fused_adamw_',
- '_fused_dropout', '_fused_moving_avg_obs_fq_helper', '_fused_moving_avg_obs_fq_helper',
- '_fused_sdp_choice', '_fw_primal_copy', '_grid_sampler_2d_cpu_fallback',
- '_has_compatible_shallow_copy_type', '_histogramdd_bin_edges', '_histogramdd_from_bin_cts',
- '_histogramdd_from_bin_tensors', '_index_put_impl_', '_indices_copy', '_is_all_true',
- '_is_any_true', '_is_functional_tensor', '_is_zerotensor', '_linalg_check_errors', '_linalg_det',
- '_linalg_det', '_linalg_eigh', '_linalg_eigh', '_linalg_slogdet', '_linalg_slogdet',
- '_linalg_solve_ex', '_linalg_solve_ex', '_linalg_svd', '_linalg_svd', '_log_softmax',
- '_log_softmax_backward_data', '_logcumsumexp', '_lstm_mps', '_lu_with_info', '_lu_with_info',
- '_make_dual', '_make_dual_copy', '_make_per_channel_quantized_tensor',
- '_make_per_tensor_quantized_tensor', '_masked_scale', '_masked_softmax', '_mkldnn_reshape',
- '_mkldnn_transpose', '_mkldnn_transpose_', '_mps_convolution', '_mps_convolution_transpose',
- '_native_batch_norm_legit', '_native_decoder_only_multi_head_attention',
- '_native_multi_head_attention', '_neg_view', '_neg_view_copy', '_nested_from_padded',
- '_nested_from_padded_and_nested_example', '_nested_tensor_from_mask',
- '_nested_tensor_from_mask_left_aligned', '_nested_tensor_from_tensor_list',
- '_nested_tensor_softmax_with_shape', '_nnpack_available', '_nnpack_spatial_convolution',
- '_pack_padded_sequence', '_pad_packed_sequence', '_pin_memory', '_prelu_kernel',
- '_remove_batch_dim', '_reshape_alias_copy', '_reshape_from_tensor', '_resize_output_',
- '_rowwise_prune', '_sample_dirichlet', '_saturate_weight_to_fp16',
- '_scaled_dot_product_attention_math', '_scaled_dot_product_efficient_attention',
- '_scaled_dot_product_flash_attention', '_scaled_dot_product_flash_attention', '_shape_as_tensor',
- '_sobol_engine_draw', '_sobol_engine_ff_', '_sobol_engine_initialize_state_',
- '_sobol_engine_scramble_', '_softmax', '_softmax_backward_data', '_sparse_broadcast_to',
- '_sparse_broadcast_to_copy', '_sparse_csr_prod', '_sparse_csr_sum',
- '_sparse_log_softmax_backward_data', '_sparse_softmax_backward_data', '_sparse_sparse_matmul',
- '_sparse_sum', '_stack', '_standard_gamma', '_standard_gamma_grad', '_sync',
- '_test_autograd_multiple_dispatch', '_test_autograd_multiple_dispatch_view',
- '_test_autograd_multiple_dispatch_view_copy', '_test_check_tensor', '_test_serialization_subcmul',
- '_to_cpu', '_to_functional_tensor', '_transform_bias_rescale_qkv',
- '_transformer_decoder_only_layer_fwd', '_transformer_encoder_layer_fwd', '_trilinear',
- '_triton_multi_head_attention', '_triton_scaled_dot_attention', '_unique', '_unique2',
- '_unpack_dual', '_unpack_dual', '_use_cudnn_ctc_loss', '_use_cudnn_rnn_flatten_weight',
- '_validate_compressed_sparse_indices', '_validate_sparse_bsc_tensor_args',
- '_validate_sparse_bsr_tensor_args', '_validate_sparse_compressed_tensor_args',
- '_validate_sparse_coo_tensor_args', '_validate_sparse_csc_tensor_args',
- '_validate_sparse_csr_tensor_args', '_values_copy', '_weight_norm', '_weight_norm_interface',
- 'abs', 'abs_', 'absolute', 'acos', 'acos_', 'acosh', 'acosh_', 'adaptive_avg_pool1d',
- 'adaptive_max_pool1d', 'add', 'addbmm', 'addcdiv', 'addcmul', 'addmm', 'addmv', 'addmv_', 'addr',
- 'adjoint', 'affine_grid_generator', 'alias_copy', 'all', 'allclose', 'alpha_dropout',
- 'alpha_dropout_', 'amax', 'amin', 'aminmax', 'aminmax', 'angle', 'any', 'arange', 'arccos',
- 'arccos_', 'arccosh', 'arccosh_', 'arcsin', 'arcsin_', 'arcsinh', 'arcsinh_', 'arctan', 'arctan2',
- 'arctan_', 'arctanh', 'arctanh_', 'argmax', 'argmin', 'argsort', 'argwhere', 'as_strided',
- 'as_strided_', 'as_strided_copy', 'as_strided_scatter', 'as_tensor', 'asarray', 'asin', 'asin_',
- 'asinh', 'asinh_', 'atan', 'atan2', 'atan_', 'atanh', 'atanh_', 'avg_pool1d', 'baddbmm',
- 'bartlett_window', 'batch_norm', 'batch_norm_backward_elemt', 'batch_norm_backward_reduce',
- 'batch_norm_elemt', 'batch_norm_gather_stats', 'batch_norm_gather_stats_with_counts',
- 'batch_norm_stats', 'batch_norm_update_stats', 'bernoulli', 'bilinear',
- 'binary_cross_entropy_with_logits', 'bincount', 'binomial', 'bitwise_and', 'bitwise_left_shift',
- 'bitwise_not', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'blackman_window', 'bmm',
- 'broadcast_to', 'bucketize', 'can_cast', 'cat', 'ccol_indices_copy', 'ceil', 'ceil_', 'celu',
- 'celu_', 'channel_shuffle', 'cholesky', 'cholesky_inverse', 'cholesky_solve',
- 'choose_qparams_optimized', 'chunk', 'clamp', 'clamp_', 'clamp_max', 'clamp_max_', 'clamp_min',
- 'clamp_min_', 'clip', 'clip_', 'clone', 'col_indices_copy', 'column_stack', 'combinations',
- 'complex', 'concat', 'concatenate', 'conj', 'conj_physical', 'conj_physical_', 'constant_pad_nd',
- 'conv1d', 'conv2d', 'conv3d', 'conv_tbc', 'conv_transpose1d', 'conv_transpose2d',
- 'conv_transpose3d', 'convolution', 'copysign', 'corrcoef', 'cos', 'cos_', 'cosh', 'cosh_',
- 'cosine_embedding_loss', 'cosine_similarity', 'count_nonzero', 'cov', 'cross', 'crow_indices_copy',
- 'ctc_loss', 'cudnn_affine_grid_generator', 'cudnn_batch_norm', 'cudnn_convolution',
- 'cudnn_convolution_add_relu', 'cudnn_convolution_relu', 'cudnn_convolution_transpose',
- 'cudnn_grid_sampler', 'cudnn_is_acceptable', 'cummax', 'cummax', 'cummin', 'cummin', 'cumprod',
- 'cumsum', 'cumulative_trapezoid', 'deg2rad', 'deg2rad_', 'dequantize', 'det', 'detach', 'detach_',
- 'detach_copy', 'diag', 'diag_embed', 'diagflat', 'diagonal', 'diagonal_copy', 'diagonal_scatter',
- 'diff', 'digamma', 'dist', 'div', 'divide', 'dot', 'dropout', 'dropout_', 'dsmm', 'dsplit',
- 'dstack', 'embedding', 'embedding_bag', 'embedding_renorm_', 'empty', 'empty_like',
- 'empty_quantized', 'empty_strided', 'eq', 'equal', 'erf', 'erf_', 'erfc', 'erfc_', 'erfinv', 'exp',
- 'exp2', 'exp2_', 'exp_', 'expand_copy', 'expm1', 'expm1_', 'eye',
- 'fake_quantize_per_channel_affine', 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight',
- 'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight',
- 'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight',
- 'fbgemm_pack_gemm_matrix_fp16', 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout',
- 'feature_alpha_dropout_', 'feature_dropout', 'feature_dropout_', 'fill', 'fill_', 'fix', 'fix_',
- 'flatten', 'flip', 'fliplr', 'flipud', 'float_power', 'floor', 'floor_', 'floor_divide', 'fmax',
- 'fmin', 'fmod', 'frac', 'frac_', 'frexp', 'frexp', 'frobenius_norm', 'from_file', 'from_numpy',
- 'frombuffer', 'full', 'full_like', 'fused_moving_avg_obs_fake_quant', 'gather', 'gcd', 'gcd_',
- 'ge', 'geqrf', 'geqrf', 'ger', 'get_default_dtype', 'get_num_interop_threads', 'get_num_threads',
- 'gradient', 'greater', 'greater_equal', 'grid_sampler', 'grid_sampler_2d', 'grid_sampler_3d',
- 'group_norm', 'gru', 'gru_cell', 'gt', 'hamming_window', 'hann_window', 'hardshrink', 'heaviside',
- 'hinge_embedding_loss', 'histc', 'histogram', 'histogram', 'histogramdd', 'histogramdd', 'hsmm',
- 'hsplit', 'hspmm', 'hstack', 'hypot', 'i0', 'i0_', 'igamma', 'igammac', 'imag', 'index_add',
- 'index_copy', 'index_fill', 'index_put', 'index_put_', 'index_reduce', 'index_select',
- 'indices_copy', 'init_num_threads', 'inner', 'instance_norm', 'int_repr', 'inverse', 'is_complex',
- 'is_conj', 'is_distributed', 'is_floating_point', 'is_grad_enabled', 'is_inference',
- 'is_inference_mode_enabled', 'is_neg', 'is_nonzero', 'is_same_size', 'is_signed',
- 'is_vulkan_available', 'isclose', 'isfinite', 'isin', 'isinf', 'isnan', 'isneginf', 'isposinf',
- 'isreal', 'istft', 'kaiser_window', 'kl_div', 'kron', 'kthvalue', 'kthvalue', 'layer_norm', 'lcm',
- 'lcm_', 'ldexp', 'ldexp_', 'le', 'lerp', 'less', 'less_equal', 'lgamma', 'linspace', 'log',
- 'log10', 'log10_', 'log1p', 'log1p_', 'log2', 'log2_', 'log_', 'log_softmax', 'logaddexp',
- 'logaddexp2', 'logcumsumexp', 'logdet', 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
- 'logit', 'logit_', 'logspace', 'logsumexp', 'lstm', 'lstm_cell', 'lt', 'lu_solve', 'lu_unpack',
- 'lu_unpack', 'margin_ranking_loss', 'masked_fill', 'masked_scatter', 'masked_select', 'matmul',
- 'matrix_exp', 'matrix_power', 'max', 'max', 'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d',
- 'max_pool3d', 'maximum', 'mean', 'median', 'median', 'min', 'min', 'minimum', 'miopen_batch_norm',
- 'miopen_convolution', 'miopen_convolution_add_relu', 'miopen_convolution_relu',
- 'miopen_convolution_transpose', 'miopen_depthwise_convolution', 'miopen_rnn',
- 'mkldnn_adaptive_avg_pool2d', 'mkldnn_convolution', 'mkldnn_linear_backward_weights',
- 'mkldnn_max_pool2d', 'mkldnn_max_pool3d', 'mkldnn_rnn_layer', 'mm', 'mode', 'mode', 'moveaxis',
- 'movedim', 'msort', 'mul', 'multinomial', 'multiply', 'mv', 'mvlgamma', 'nan_to_num',
- 'nan_to_num_', 'nanmean', 'nanmedian', 'nanmedian', 'nanquantile', 'nansum', 'narrow',
- 'narrow_copy', 'native_batch_norm', 'native_channel_shuffle', 'native_dropout',
- 'native_group_norm', 'native_layer_norm', 'native_norm', 'ne', 'neg', 'neg_', 'negative',
- 'negative_', 'nextafter', 'nonzero', 'norm_except_dim', 'normal', 'not_equal', 'nuclear_norm',
- 'numel', 'ones', 'ones_like', 'orgqr', 'ormqr', 'outer', 'pairwise_distance', 'pdist', 'permute',
- 'permute_copy', 'pinverse', 'pixel_shuffle', 'pixel_unshuffle', 'poisson', 'poisson_nll_loss',
- 'polar', 'polygamma', 'positive', 'pow', 'prelu', 'prod', 'promote_types', 'put',
- 'q_per_channel_axis', 'q_per_channel_scales', 'q_per_channel_zero_points', 'q_scale',
- 'q_zero_point', 'qr', 'qr', 'quantile', 'quantize_per_channel', 'quantize_per_tensor',
- 'quantize_per_tensor_dynamic', 'quantized_batch_norm', 'quantized_gru_cell', 'quantized_lstm_cell',
- 'quantized_max_pool1d', 'quantized_max_pool2d', 'quantized_rnn_relu_cell',
- 'quantized_rnn_tanh_cell', 'rad2deg', 'rad2deg_', 'rand', 'rand_like', 'randint', 'randint_like',
- 'randn', 'randn_like', 'randperm', 'range', 'ravel', 'real', 'reciprocal', 'reciprocal_', 'relu',
- 'relu_', 'remainder', 'renorm', 'repeat_interleave', 'reshape', 'resize_as_', 'resize_as_sparse_',
- 'resolve_conj', 'resolve_neg', 'result_type', 'rnn_relu', 'rnn_relu_cell', 'rnn_tanh',
- 'rnn_tanh_cell', 'roll', 'rot90', 'round', 'round_', 'row_indices_copy', 'row_stack', 'rrelu',
- 'rrelu_', 'rsqrt', 'rsqrt_', 'rsub', 'saddmm', 'scalar_tensor', 'scatter', 'scatter_add',
- 'scatter_reduce', 'searchsorted', 'segment_reduce', 'select', 'select_copy', 'select_scatter',
- 'selu', 'selu_', 'set_flush_denormal', 'set_num_interop_threads', 'set_num_threads', 'sgn',
- 'sigmoid', 'sigmoid_', 'sign', 'signbit', 'sin', 'sin_', 'sinc', 'sinc_', 'sinh', 'sinh_',
- 'slice_copy', 'slice_scatter', 'slogdet', 'slogdet', 'smm', 'softmax', 'sort', 'sort',
- 'sparse_bsc_tensor', 'sparse_bsr_tensor', 'sparse_compressed_tensor', 'sparse_coo_tensor',
- 'sparse_csc_tensor', 'sparse_csr_tensor', 'split_copy', 'split_with_sizes',
- 'split_with_sizes_copy', 'spmm', 'sqrt', 'sqrt_', 'square', 'square_', 'squeeze', 'squeeze_copy',
- 'sspaddmm', 'stack', 'std', 'std_mean', 'sub', 'subtract', 'sum', 'svd', 'svd', 'swapaxes',
- 'swapdims', 't', 't_copy', 'take', 'take_along_dim', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor',
- 'tensor_split', 'threshold', 'threshold_', 'tile', 'topk', 'topk', 'trace', 'transpose',
- 'transpose_copy', 'trapezoid', 'trapz', 'triangular_solve', 'triangular_solve', 'tril',
- 'tril_indices', 'triplet_margin_loss', 'triu', 'triu_indices', 'true_divide', 'trunc', 'trunc_',
- 'unbind', 'unbind_copy', 'unflatten', 'unfold_copy', 'unique_dim', 'unsafe_chunk', 'unsafe_split',
- 'unsafe_split_with_sizes', 'unsqueeze', 'unsqueeze_copy', 'values_copy', 'vander', 'var',
- 'var_mean', 'vdot', 'view_as_complex', 'view_as_complex_copy', 'view_as_real', 'view_as_real_copy',
- 'view_copy', 'vsplit', 'vstack', 'where', 'xlogy', 'xlogy_', 'zero_', 'zeros', 'zeros_like']
|