1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944 |
- # @generated from torch/_C/__init__.pyi.in
- import torch
- from torch.package import PackageExporter
- from torch import Tensor, inf
- from torch.autograd.graph import Node as _Node
- from enum import Enum
- from pathlib import Path
- from typing import (
- Any, BinaryIO, Callable, ContextManager, Dict, Iterable, Iterator, List,
- NamedTuple, Optional, overload, Sequence, Tuple, TypeVar, Type, Union,
- Literal, Generic, Set, AnyStr)
- from torch.types import (
- _int, _float, _bool, _dtype, _device, _qscheme, _size, _layout, Device, Number, Storage, SymInt, _dispatchkey
- )
- from torch.storage import TypedStorage
- import builtins
- # This module is defined in torch/csrc/Module.cpp
- from . import _nn as _nn
- from . import _onnx as _onnx
- from . import _VariableFunctions as _VariableFunctions
- from . import _functorch as _functorch
- from . import _lazy as _lazy
- from . import _lazy_ts_backend as _lazy_ts_backend
- T = TypeVar('T')
- S = TypeVar("S", bound="torch.Tensor")
- # Defined in torch/csrc/Device.cpp
- class device:
- type: str # THPDevice_type
- index: _int # THPDevice_index
- def __get__(self, instance, owner=None) -> device: ...
- # THPDevice_pynew
- @overload
- def __init__(self, device: Union[_device, _int, str]) -> None: ...
- @overload
- def __init__(self, type: str, index: _int) -> None: ...
- # Uncomment if we ever make torch.device a decorator
- # def __call__(self, func: T) -> T: ...
- def __enter__(self) -> "device": ...
- def __exit__(self, exc_type, exc_val, exc_tb) -> None: ...
- def __reduce__(self) -> Tuple[Any, ...]: ... # THPDevice_reduce
- # Defined in torch/csrc/Stream.cpp
- class Stream:
- stream_id: _int # Stream id
- device_index: _int
- device_type: _int
- device: device # The device of the stream
- ...
- # Defined in torch/csrc/Size.cpp
- class Size(Tuple[_int, ...]):
- # TODO: __reduce__
- @overload # type: ignore[override]
- def __getitem__(self: Size, key: _int) -> _int: ...
- @overload
- def __getitem__(self: Size, key: slice) -> Size: ...
- def numel(self: Size) -> _int: ...
- ...
- # Defined in torch/csrc/Dtype.cpp
- class dtype:
- # TODO: __reduce__
- is_floating_point: _bool
- is_complex: _bool
- is_signed: _bool
- ...
- # Defined in torch/csrc/TypeInfo.cpp
- class iinfo:
- bits: _int
- min: _int
- max: _int
- dtype: str
- def __init__(self, dtype: _dtype) -> None: ...
- class finfo:
- bits: _int
- min: _float
- max: _float
- eps: _float
- tiny: _float
- smallest_normal: _float
- resolution: _float
- dtype: str
- @overload
- def __init__(self, dtype: _dtype) -> None: ...
- @overload
- def __init__(self) -> None: ...
- float32: dtype = ...
- float: dtype = ...
- float64: dtype = ...
- double: dtype = ...
- float16: dtype = ...
- bfloat16: dtype = ...
- half: dtype = ...
- uint8: dtype = ...
- int8: dtype = ...
- int16: dtype = ...
- short: dtype = ...
- int32: dtype = ...
- int: dtype = ...
- int64: dtype = ...
- long: dtype = ...
- complex32: dtype = ...
- complex64: dtype = ...
- cfloat: dtype = ...
- complex128: dtype = ...
- cdouble: dtype = ...
- quint8: dtype = ...
- qint8: dtype = ...
- qint32: dtype = ...
- bool: dtype = ...
- quint4x2: dtype = ...
- quint2x4: dtype = ...
- # Defined in torch/csrc/Layout.cpp
- class layout:
- ...
- # Defined in torch/csrc/utils/disable_torch_function.cpp
- def DisableTorchFunction(): ...
- def DisableTorchFunctionSubclass(): ...
- # Defined in torch/csrc/utils/tensor_layouts.cpp
- strided : layout = ...
- sparse_coo : layout = ...
- sparse_csr : layout = ...
- sparse_csc : layout = ...
- sparse_bsr : layout = ...
- sparse_bsc : layout = ...
- _mkldnn : layout = ...
- # Defined in torch/csrc/MemoryFormat.cpp
- class memory_format: ...
- # Defined in torch/csrc/utils/tensor_memoryformats.cpp
- contiguous_format: memory_format = ...
- channels_last: memory_format = ...
- channels_last_3d: memory_format = ...
- preserve_format: memory_format = ...
- # Defined in torch/csrc/QScheme.cpp
- class qscheme: ...
- # Defined in torch/csrc/utils/tensor_qschemes.h
- per_tensor_affine: qscheme = ...
- per_channel_affine: qscheme = ...
- per_tensor_symmetric: qscheme = ...
- per_channel_symmetric: qscheme = ...
- per_channel_affine_float_qparams: qscheme = ...
- # Defined in torch/csrc/autograd/python_function.cpp
- class _FunctionBase:
- ...
- # Defined in torch/csrc/autograd/python_legacy_variable.cpp
- class _LegacyVariableBase(Tensor): # inherits from Tensor to appease mypy
- def __init__(
- self,
- data: Optional[Tensor]=...,
- requires_grad: Optional[_bool]=...,
- volatile: Optional[_bool]=...,
- _grad_fn: Optional[_FunctionBase]=...
- ) -> None: ...
- # Defined in torch/csrc/jit/python/init.cpp
- class IODescriptor: ...
- class JITException: ...
- class Future:
- def __init__(self, devices: List[device]) -> None: ...
- def done(self) -> _bool: ...
- def value(self) -> Any: ...
- def wait(self) -> Any: ...
- def add_done_callback(self, callback: Callable) -> None: ...
- def then(self, callback: Callable) -> Future: ...
- def set_result(self, result: Any) -> None: ...
- def _set_unwrap_func(self, callback: Callable) -> None: ...
- class _Await:
- def __init__(self) -> None: ...
- def fn(self) -> Callable: ...
- def args(self) -> Tuple[Any, ...]: ...
- def is_nowait(self) -> _bool: ...
- def _jit_set_num_profiled_runs(num: _size) -> _size: ...
- # Defined in torch/csrc/jit/passes/mobile_optimizer_type.h
- class _MobileOptimizerType:
- ...
- CONV_BN_FUSION: _MobileOptimizerType
- INSERT_FOLD_PREPACK_OPS: _MobileOptimizerType
- REMOVE_DROPOUT: _MobileOptimizerType
- FUSE_ADD_RELU: _MobileOptimizerType
- HOIST_CONV_PACKED_PARAMS: _MobileOptimizerType
- VULKAN_AUTOMATIC_GPU_TRANSFER: _MobileOptimizerType
- def fork(*args: Any, **kwargs: Any) -> Future: ...
- def wait(fut: Future) -> Any: ...
- def _awaitable(*args: Any, **kwargs: Any) -> _Await: ...
- def _awaitable_wait(aw: _Await) -> Any: ...
- def _awaitable_nowait(x: Any) -> _Await: ...
- def _collect_all(futures: List[Future]) -> Future: ...
- def _set_print_stack_traces_on_fatal_signal(print: _bool) -> None: ...
- def unify_type_list(types: List[JitType]) -> JitType: ...
- def _freeze_module(module: ScriptModule,
- preserved_attrs: List[str] = [],
- freeze_interfaces: _bool = True,
- preserveParameters: _bool = True) -> ScriptModule: ...
- def _jit_pass_optimize_frozen_graph(Graph, optimize_numerics: _bool = True) -> None: ...
- def _jit_pass_optimize_for_inference(module: 'torch.jit.ScriptModule',
- other_methods: List[str] = []) -> None: ...
- def _jit_pass_fold_frozen_conv_bn(graph: Graph): ...
- def _jit_pass_fold_frozen_conv_add_or_sub(graph: Graph): ...
- def _jit_pass_fold_frozen_conv_mul_or_div(graph: Graph): ...
- def _jit_pass_fuse_frozen_conv_add_relu(graph: Graph): ...
- def _jit_pass_concat_frozen_linear(graph: Graph): ...
- def _jit_pass_convert_frozen_ops_to_mkldnn(graph: Graph): ...
- def _jit_pass_transpose_frozen_linear(graph:Graph): ...
- def _jit_pass_remove_dropout(module: 'torch.jit.ScriptModule'): ...
- def _is_tracing() -> _bool: ...
- def _jit_init() -> _bool: ...
- def _jit_flatten(arg: Any) -> Tuple[List[Tensor], IODescriptor]: ...
- def _jit_unflatten(vars: List[Tensor], desc: IODescriptor) -> Any: ...
- def _jit_get_operation(op_name: str) -> Tuple[Callable, List[str]]: ...
- def _get_operation_overload(op_name: str, op_overload_name: str) -> Tuple[Callable, Callable, List[Any]]: ...
- def _get_schema(op_name: str, overload_name: str) -> FunctionSchema: ...
- def _jit_pass_optimize_for_mobile(module: 'torch.jit.ScriptModule',
- optimization_blocklist: Set[_MobileOptimizerType],
- preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ...
- def _clone_module_with_class(module: 'torch.jit.ScriptModule',
- ignored_methods: List[AnyStr],
- ignored_attributes: List[AnyStr]) -> 'torch.jit.ScriptModule': ...
- def _jit_pass_vulkan_optimize_for_mobile(module: 'torch.jit.ScriptModule',
- optimization_blocklist: Set[_MobileOptimizerType],
- preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ...
- def _jit_pass_metal_optimize_for_mobile(module: 'torch.jit.ScriptModule',
- preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ...
- def _jit_pass_inline(Graph) -> None: ...
- def _jit_pass_constant_propagation(Graph) -> None: ...
- def _jit_pass_propagate_shapes_on_graph(Graph) -> None: ...
- def _jit_register_decomposition_for_schema(schema: FunctionSchema, Graph) -> None: ...
- def _jit_erase_non_input_shape_information(Graph) -> None: ...
- def _jit_get_schemas_for_operator(name :str) -> List[FunctionSchema]: ...
- def _jit_get_all_schemas() -> List[FunctionSchema]: ...
- def _jit_check_alias_annotation(g: Graph, args: Tuple[Any, ...], unqualified_op_name: str): ...
- def _jit_can_fuse_on_cpu() -> _bool: ...
- def _jit_can_fuse_on_gpu() -> _bool: ...
- def _jit_can_fuse_on_cpu_legacy() -> _bool: ...
- def _debug_get_fusion_group_inlining() -> _bool: ...
- def _debug_set_fusion_group_inlining(enable: _bool): ...
- def _jit_texpr_fuser_enabled() -> _bool: ...
- def _jit_nvfuser_enabled() -> _bool: ...
- def _jit_llga_enabled() -> _bool: ...
- def _jit_set_llga_enabled(enable: _bool): ...
- def _llvm_enabled() -> _bool: ...
- def _jit_override_can_fuse_on_cpu(override: _bool): ...
- def _jit_override_can_fuse_on_gpu(override: _bool): ...
- def _jit_override_can_fuse_on_cpu_legacy(override: _bool): ...
- def _jit_set_symbolic_shapes_test_mode(override: _bool): ...
- def _jit_symbolic_shapes_test_mode_enabled() -> _bool: ...
- def _jit_set_texpr_fuser_enabled(enable: _bool): ...
- def _jit_set_te_must_use_llvm_cpu(use_llvm: _bool): ...
- def _jit_set_nvfuser_enabled(enable: _bool) -> _bool: ...
- def _jit_cat_wo_conditionals(optimize_cat: _bool): ...
- def _jit_opt_conditionals(opt_conds: _bool): ...
- def _jit_pass_canonicalize(graph: Graph, keep_unique_names: _bool = True): ...
- def _jit_pass_erase_shape_information(graph: Graph): ...
- def _jit_pass_fold_convbn(module: 'torch.jit.ScriptModule'): ...
- def _jit_pass_insert_observers(module: 'torch.jit.ScriptModule',
- method_name: str,
- qconfig_dict: Dict[str, Any],
- inplace: _bool,
- quant_type: _int): ...
- def _jit_pass_insert_quant_dequant(module: 'torch.jit.ScriptModule',
- method_name: str,
- inplace: _bool,
- debug: _bool,
- quant_type: _int): ...
- def _jit_pass_insert_quant_dequant_for_ondevice_ptq(module: 'torch.jit.ScriptModule',
- method_name: str,
- inplace: _bool,
- debug: _bool,
- quant_type: _int): ...
- def _jit_pass_quant_finalize(module: 'torch.jit.ScriptModule',
- quant_type: _int,
- preserved_attrs: Sequence[str]): ...
- def _jit_pass_quant_finalize_for_ondevice_ptq(module: 'torch.jit.ScriptModule',
- quant_type: _int,
- method_name: str): ...
- def _jit_pass_insert_observer_method_for_ondevice_ptq(module: 'torch.jit.ScriptModule',
- method_name: str,
- qconfig_dict: Dict[str, Any],
- inplace: _bool,
- quant_type: _int): ...
- def _jit_set_profiling_executor(profiling_flag: _bool) -> _bool: ...
- def _jit_set_profiling_mode(profiling_flag: _bool) -> _bool: ...
- def _jit_set_fusion_strategy(strategy: List[Tuple[str, _int]]) -> List[Tuple[str, _int]]: ...
- def _jit_try_infer_type(obj: Any) -> InferredType: ...
- def _jit_get_trigger_value(trigger_name: str) -> _int: ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- ResolutionCallback = Callable[[str], Callable[..., Any]]
- # Defined in torch/csrc/jit/python/script_init.cpp
- # and torch/csrc/jit/python/init.cpp
- def _create_function_from_graph(qualname: str, graph: Graph) -> ScriptFunction: ...
- def _debug_set_autodiff_subgraph_inlining(disabled: _bool) -> None: ...
- def _ivalue_tags_match(lhs: ScriptModule, rhs: ScriptModule) -> _bool: ...
- def _jit_assert_is_instance(obj: Any, type: JitType): ...
- def _jit_clear_class_registry() -> None: ...
- def _jit_set_emit_hooks(ModuleHook: Optional[Callable], FunctionHook: Optional[Callable]) -> None: ...
- def _jit_get_emit_hooks() -> Tuple[Callable, Callable]: ...
- def _load_for_lite_interpreter(filename: Union[str, Path], map_location: Union[_device, str, None]): ...
- def _load_for_lite_interpreter_from_buffer(buffer: BinaryIO, map_location: Union[_device, str, None]): ...
- def _export_operator_list(module: LiteScriptModule): ...
- def _quantize_ondevice_ptq_dynamic(module: LiteScriptModule, method_name: str): ...
- def _get_model_bytecode_version(filename: Union[str, Path]) -> _int: ...
- def _get_model_bytecode_version_from_buffer(buffer: BinaryIO) -> _int: ...
- def _backport_for_mobile(filename_input: Union[str, Path], filename_output: Union[str, Path], to_version: _int) -> None: ...
- def _backport_for_mobile_from_buffer(buffer: BinaryIO, filename_output: Union[str, Path], to_version: _int) -> None: ...
- def _backport_for_mobile_to_buffer(filename_input: Union[str, Path], to_version: _int) -> bytes:...
- def _backport_for_mobile_from_buffer_to_buffer(buffer: BinaryIO, to_version: _int) -> bytes:...
- def _get_model_ops_and_info(filename: Union[str, Path]): ...
- def _get_model_ops_and_info_from_buffer(buffer: BinaryIO): ...
- def _get_mobile_model_contained_types(filename: Union[str, Path]): ...
- def _get_mobile_model_contained_types_from_buffer(buffer: BinaryIO): ...
- def _logging_set_logger(logger: LoggerBase) -> LoggerBase: ...
- def _get_graph_executor_optimize(optimize: Optional[_bool] = None) -> _bool: ...
- def _set_graph_executor_optimize(optimize: _bool): ...
- def _export_opnames(module: ScriptModule) -> List[str]: ...
- def _create_function_from_trace(
- qualname: str,
- func: Callable[..., Any],
- input_tuple: Tuple[Any, ...],
- var_lookup_fn: Callable[[Tensor], str],
- strict: _bool,
- force_outplace: _bool,
- argument_names: List[str]
- ) -> Tuple[Graph, Stack]: ...
- def _create_function_from_trace_with_dict(
- qualname: str,
- func: Callable[..., Any],
- input_dict: Dict[str, Any],
- var_lookup_fn: Callable[[Tensor], str],
- strict: _bool,
- force_outplace: _bool,
- argument_names: List[str]
- ) -> Tuple[Graph, Stack]: ...
- def _jit_is_script_object(obj: Any) -> _bool: ...
- def _last_executed_optimized_graph() -> Graph: ...
- def parse_type_comment(comment: str) -> Decl: ...
- def _get_upgraders_map_size() -> _int: ...
- def _dump_upgraders_map() -> Dict[str, str]: ...
- def _test_only_populate_upgraders(content: Dict[str, str]) -> None: ...
- def _test_only_remove_upgraders(content: Dict[str, str]) -> None: ...
- def merge_type_from_type_comment(decl: Decl, type_annotation_decl: Decl, is_method: _bool) -> Decl: ...
- def parse_ir(input: str, parse_tensor_constants: _bool) -> Graph: ...
- def parse_schema(schema: str) -> FunctionSchema: ...
- def get_device(input: Tensor) -> _int: ...
- def _resolve_type_from_object(obj: Any, range: SourceRange, rcb: ResolutionCallback) -> JitType: ...
- def _create_module_with_type(ty: JitType) -> ScriptModule: ...
- def _create_object_with_type(ty: ClassType) -> ScriptObject: ...
- def _run_emit_module_hook(m: ScriptModule): ...
- def _replace_overloaded_method_decl(overload_decl: Decl, implementation_def: Def, new_name: str) -> Def: ...
- def _jit_pass_lower_all_tuples(graph: Graph) -> None: ...
- def _jit_pass_onnx_set_dynamic_input_shape(graph: Graph, dynamic_axes: Dict[str, Dict[_int, str]], input_names: List[str]) -> None: ...
- def _jit_pass_onnx_graph_shape_type_inference(graph: Graph, params_dict: Dict[str, IValue], opset_version: _int) -> None: ...
- def _jit_pass_onnx_assign_output_shape(graph: Graph, tensors: List[Tensor], desc: IODescriptor, onnx_shape_inference: _bool, is_script: _bool, opset_version: _int) -> None: ...
- def _jit_pass_onnx_remove_inplace_ops_for_onnx(graph: Graph, module: Optional[ScriptModule] = None) -> None: ...
- def _jit_pass_remove_inplace_ops(graph: Graph) -> None: ...
- def _jit_pass_canonicalize_graph_fuser_ops(graph: Graph) -> None: ...
- def _jit_pass_peephole(graph: Graph, disable_shape_peepholes: _bool = False) -> None: ...
- def _jit_pass_onnx_autograd_function_process(graph: Graph) -> None: ...
- def _jit_pass_fuse_addmm(graph: Graph) -> None: ...
- def _jit_pass_onnx_preprocess(graph: Graph) -> None: ...
- def _jit_pass_prepare_division_for_onnx(graph: Graph) -> None: ...
- def _jit_pass_onnx_remove_print(graph: Graph) -> None: ...
- def _jit_pass_onnx_preprocess_caffe2(graph: Graph) -> None: ...
- def _jit_pass_onnx_unpack_quantized_weights(
- graph: Graph,
- paramsDict: Dict[str, IValue],
- caffe2: _bool
- ) -> Dict[str, IValue]: ...
- def _jit_pass_onnx_quantization_insert_permutes(
- graph: Graph,
- paramsDict: Dict[str, IValue]
- ) -> Dict[str, IValue]: ...
- def _jit_pass_custom_pattern_based_rewrite_graph(pattern: str, fused_node_name: str, graph: Graph) -> None: ...
- def _jit_onnx_list_model_parameters(module: ScriptModule) -> Tuple[ScriptModule, List[IValue]]: ...
- def _jit_pass_erase_number_types(graph: Graph) -> None: ...
- def _jit_pass_onnx_lint(graph: Graph) -> None: ...
- def _jit_pass_onnx(graph: Graph, _jit_pass_onnx: _onnx.OperatorExportTypes) -> Graph: ...
- def _jit_pass_onnx_scalar_type_analysis(graph: Graph, lowprecision_cast: _bool, opset_version: _int) -> None: ...
- def _jit_pass_onnx_peephole(graph: Graph, opset_version: _int, fixed_batch_size: _bool) -> None: ...
- def _jit_pass_dce_allow_deleting_nodes_with_side_effects(graph: Graph) -> None: ...
- def _jit_pass_onnx_function_substitution(graph: Graph) -> None: ...
- def _jit_pass_onnx_function_extraction(graph: Graph, module_names : Set[str], param_names : List[str]) -> Dict[Node, Dict[str, str]]: ...
- def _jit_pass_onnx_clear_scope_records() -> None: ...
- def _jit_pass_onnx_track_scope_attributes(graph: Graph, onnx_attrs: Dict[str, Any]) -> None: ...
- def _jit_is_onnx_log_enabled() -> _bool: ...
- def _jit_set_onnx_log_enabled(enabled: _bool) -> None: ...
- def _jit_set_onnx_log_output_stream(stream_name: str) -> None: ...
- def _jit_onnx_log(*args: Any) -> None: ...
- def _jit_pass_lower_graph(graph: Graph, m: Module) -> Tuple[Graph, List[IValue]]: ...
- def _jit_pass_inline_fork_wait(graph: Graph) -> None: ...
- def _jit_pass_onnx_deduplicate_initializers(graph: Graph, params_dict: Dict[str, IValue], is_train: _bool) -> Dict[str, IValue]: ...
- def _jit_pass_onnx_eval_peephole(graph: Graph, paramsDict: Dict[str, IValue]) -> Dict[str, IValue]: ...
- def _jit_pass_onnx_constant_fold(graph: Graph, paramsDict: Dict[str, IValue], opset_version: _int) -> Dict[str, IValue]: ...
- def _jit_pass_onnx_eliminate_unused_items(graph: Graph, paramsDict: Dict[str, IValue]) -> Dict[str, IValue]: ...
- def _jit_pass_onnx_cast_all_constant_to_floating(graph: Graph) -> None: ...
- def _jit_pass_filter_non_tensor_arguments(params: Dict[str, IValue]) -> Dict[str, Tensor]: ...
- def _jit_decay_packed_param_input_types(graph: Graph) -> None: ...
- def _jit_pass_onnx_node_shape_type_inference(n: Node, paramsDict: Dict[str, IValue], opset_version: _int) -> None: ...
- def _jit_onnx_convert_pattern_from_subblock(block: Block, n: Node, env: Dict[Value, Value]) -> List[Value]: ...
- def _jit_pass_onnx_block(
- old_block: Block,
- new_block: Block,
- operator_export_type: _onnx.OperatorExportTypes,
- env: Dict[Value, Value],
- is_sub_block: _bool
- ) -> Dict[Value, Value]: ...
- def _jit_pass_onnx_assign_scoped_names_for_node_and_value(graph: Graph) -> None: ...
- def _jit_pass_fixup_onnx_controlflow_node(n: Node, opset_version: _int) -> List[Value]: ...
- def _jit_onnx_create_full_scope_name(class_name: str, variable_name: str) -> str: ...
- def _compile_graph_to_code_table(name: str, graph: Graph) -> IValue: ...
- def _generate_upgraders_graph() -> Dict[str, Graph]: ...
- def _calculate_package_version_based_on_upgraders(val: _bool): ...
- def _get_version_calculator_flag() -> _bool: ...
- def _jit_script_interface_compile(name: str, class_def: ClassDef, rcb: ResolutionCallback, is_module: _bool): ...
- def _jit_script_compile_overload(
- qualname: str,
- overload_decl: Decl,
- implementation_def: Def,
- rcb: ResolutionCallback,
- implementation_defaults: Dict[str, Any],
- signature: Any
- ): ...
- def _jit_script_compile(
- qual_name: str,
- definition: Def,
- rcb: ResolutionCallback,
- defaults: Dict[str, Any]
- ): ...
- def _jit_script_class_compile(
- qual_name: str,
- definition: ClassDef,
- defaults: Dict[str, Dict[str, Any]],
- rcb: ResolutionCallback
- ): ...
- def _parse_source_def(src: str) -> Def: ...
- def import_ir_module(
- cu: CompilationUnit,
- filename: Union[str, Path],
- map_location: Union[_device, str, None],
- extra_files: Dict[str, Any]
- ) -> ScriptModule: ...
- def import_ir_module_from_buffer(
- cu: CompilationUnit,
- buffer: BinaryIO,
- map_location: Union[_device, str, None],
- extra_files: Dict[str, Any]
- ) -> ScriptModule: ...
- def _import_ir_module_from_package(
- cu: CompilationUnit,
- reader: PyTorchFileReader,
- storage_context: DeserializationStorageContext,
- map_location: Union[_device, str, None],
- ts_id: str
- ) -> ScriptModule: ...
- def _assign_output_shapes(graph: Graph, inputs: List[Tensor]) -> Graph: ...
- def _check_onnx_proto(proto: str) -> None: ...
- def _propagate_and_assign_input_shapes(
- graph: Graph,
- inputs: Tuple[Tensor, ...],
- param_count_list: List[_int],
- with_grad: _bool,
- propagate: _bool
- ) -> Graph: ...
- # Defined in torch/csrc/jit/runtime/graph_executor.h
- class GraphExecutorState:
- ...
- # Defined in torch/torch/csrc/jit/ir/alias_analysis.h
- class AliasDb:
- def __str__(self) -> str: ...
- ...
- class _InsertPoint:
- def __enter__(self) -> None: ...
- def __exit__(self, *args) -> None: ...
- # Defined in torch/csrc/jit/ir/ir.h
- class Use:
- @property
- def user(self) -> Node: ...
- @property
- def offset(self) -> _int: ...
- def isAfter(self, other: Use) -> _bool: ...
- ...
- # Defined in torch/csrc/jit/ir/ir.h
- class Value:
- def type(self)-> JitType: ...
- def setType(self, t: JitType) -> Value: ...
- def setTypeAs(self, other: Value) -> Value: ...
- def inferTypeFrom(self, t: Tensor) -> None: ...
- def debugName(self) -> str: ...
- def setDebugName(self, name: str) -> None: ...
- def unique(self) -> _int: ...
- def offset(self) -> _int: ...
- def node(self) -> Node: ...
- def uses(self) -> List[Use]: ...
- def replaceAllUsesWith(self, val: Value) -> None: ...
- def replaceAllUsesAfterNodeWith(self, node: Node, val: Value) -> None: ...
- def requires_grad(self) -> _bool: ...
- def requiresGrad(self) -> _bool: ...
- def copyMetadata(self, other: Value) -> Value: ...
- def isCompleteTensor(self) -> _bool: ...
- def toIValue(self) -> IValue: ...
- ...
- # Defined in torch/csrc/jit/ir/ir.h
- class Block:
- def inputs(self) -> Iterator[Value]: ...
- def outputs(self) -> Iterator[Value]: ...
- def nodes(self) -> Iterator[Node]: ...
- def paramNode(self) -> Node: ...
- def returnNode(self) -> Node: ...
- def owningNode(self) -> Node: ...
- def registerOutput(self, n: Value) -> _int: ...
- def addNode(self, name: str, inputs: Sequence[Value]) -> Node: ...
- ...
- # Defined in torch/csrc/jit/ir/ir.h
- class Node:
- def __getitem__(self, key: str) -> Any: ...
- def schema(self) -> str: ...
- def input(self) -> Value: ...
- def inputs(self) -> Iterator[Value]: ...
- def inputsAt(self, idx: _int) -> Value: ...
- def inputsSize(self) -> _int: ...
- def output(self) -> Value: ...
- def outputs(self) -> Iterator[Value]: ...
- def outputsAt(self, idx: _int) -> Value: ...
- def outputsSize(self) -> _int: ...
- def hasMultipleOutputs(self) -> _bool: ...
- def blocks(self) -> List[Block]: ...
- def addBlock(self) -> Block: ...
- def mustBeNone(self) -> _bool: ...
- def matches(self, pattern: str) -> _bool: ...
- def kind(self) -> str: ...
- def kindOf(self, name: str) -> str: ...
- def addInput(self, name: str) -> Value: ...
- def replaceInput(self, i: _int, newValue: Value) -> Value: ...
- def replaceInputWith(self, from_: Value, to: Value) -> None: ...
- def replaceAllUsesWith(self, n: Node) -> None: ...
- def insertBefore(self, n: Node) -> Node: ...
- def insertAfter(self, n: Node) -> Node: ...
- def isBefore(self, n: Node) -> _bool: ...
- def isAfter(self, n: Node) -> _bool: ...
- def moveBefore(self, n: Node) -> None: ...
- def moveAfter(self, n: Node) -> None: ...
- def removeInput(self, i: _int) -> None: ...
- def removeAllInputs(self, i: _int) -> None: ...
- def hasUses(self) -> _bool: ...
- def eraseOutput(self, i: _int) -> None: ...
- def addOutput(self) -> Value: ...
- def scopeName(self) -> str: ...
- def isNondeterministic(self) -> _bool: ...
- def copyAttributes(self, rhs: Node) -> Node: ...
- def copyMetadata(self, rhs: Node) -> Node: ...
- def hasAttributes(self) -> _bool: ...
- def hasAttribute(self, name: str) -> _bool: ...
- def removeAttribute(self, attr: str) -> Node: ...
- def namedInput(self, name: str) -> Value: ...
- def sourceRange(self) -> SourceRange: ...
- def owningBlock(self) -> Block: ...
- def findNode(self, kind: str, recurse: _bool = True) -> Node: ...
- def findAllNodes(self, kind: str, recurse: _bool = True) -> List[Node]: ...
- def getModuleHierarchy(self) -> str: ...
- def prev(self) -> Node: ...
- def destroy(self) -> None: ...
- def attributeNames(self) -> List[str]: ...
- # Accessors for attributes as types.
- def f(self, name: str) -> _float: ...
- def f_(self, name: str, val: _float) -> Node: ...
- def fs(self, name: str) -> List[_float]: ...
- def fs_(self, name: str, val: List[_float]) -> Node: ...
- def c(self, name: str) -> complex: ...
- def c_(self, name: str, val: complex) -> Node: ...
- def s(self, name: str) -> str: ...
- def s_(self, name: str, val: str) -> Node: ...
- def ss(self, name: str) -> List[str]: ...
- def ss_(self, name: str, val: List[str]) -> Node: ...
- def i(self, name: str) -> _int: ...
- def i_(self, name: str, val: _int) -> Node: ...
- # Cannot define "is" like this because it's a reserved keyword in python.
- # def is(self, name: str) -> List[_int]: ...
- # def is_(self, name: str, val: List[_int]) -> Node: ...
- def g(self, name: str) -> Graph: ...
- def g_(self, name: str, val: Graph) -> Node: ...
- def gs(self, name: str) -> List[Graph]: ...
- def gs_(self, name: str, val: List[Graph]) -> Node: ...
- def ival(self, name: str) -> IValue: ...
- def ival_(self, name: str, val: IValue) -> Node: ...
- def t(self, name: str) -> Tensor: ...
- def t_(self, name: str, val: Tensor) -> Node: ...
- def ts(self, name: str) -> List[Tensor]: ...
- def ts_(self, name: str, val: List[Tensor]) -> Node: ...
- def ty(self, name: str) -> JitType: ...
- def ty_(self, name: str, val: JitType) -> Node: ...
- def tys(self, name: str) -> List[JitType]: ...
- def tys_(self, name: str, val: List[JitType]) -> Node: ...
- ...
- # Defined in torch/torch/csrc/jit/ir/ir.h
- class Graph:
- def inputs(self) -> Iterator[Value]: ...
- def outputs(self) -> Iterator[Value]: ...
- def nodes(self) -> Iterator[Node]: ...
- def param_node(self) -> Node: ...
- def return_node(self) -> Node: ...
- def addInput(self, name: str = "") -> Value: ...
- def eraseInput(self, i: _int) -> None: ...
- def registerOutput(self, n: Value) -> _int: ...
- def eraseOutput(self, i: _int) -> None: ...
- def create(self, name: str, args, num_outputs: _int) -> Node: ...
- def appendNode(self, n: Node) -> Node: ...
- def prependNode(self, n: Node) -> Node: ...
- def insertNode(self, n: Node) -> Node: ...
- def block(self) -> Block: ...
- def lint(self) -> None: ...
- def alias_db(self) -> AliasDb: ...
- def setInsertPoint(self, n: Union[Block, Node]) -> None: ...
- def insert_point_guard(self, n: Union[Block, Node]) -> _InsertPoint: ...
- def insertPoint(self) -> Node: ...
- def insertGraph(self, callee: Graph, inputs: List[Value]) -> List[Value]: ...
- def makeMultiOutputIntoTuple(self) -> None: ...
- def copy(self) -> Graph: ...
- ...
- # Defined in torch/aten/src/ATen/core/alias_info.h
- class AliasInfo:
- is_write: _bool
- before_set: Set[str]
- after_set: Set[str]
- # Defined in torch/aten/src/ATen/core/function_schema.h
- class Argument:
- name: str
- type: JitType
- default_value: Optional[Any]
- def has_default_value(self) -> _bool: ...
- kwarg_only : _bool
- is_out: _bool
- alias_info: Optional[AliasInfo]
- ...
- class FunctionSchema:
- arguments: List[Argument]
- returns: List[Argument]
- name: str
- overload_name: str
- ...
- class _UpgraderEntry:
- bumped_at_version: _int
- upgrader_name: str
- old_schema: str
- def __init__(self, bumped_at_version: _int, upgrader_name: str, old_schema: str) -> None: ...
- class _UpgraderRange:
- min_version: _int
- max_version: _int
- def _get_max_operator_version() -> _int: ...
- def _get_operator_version_map() -> Dict[str, List[_UpgraderEntry]]: ...
- def _get_upgrader_ranges(name: str) -> List[_UpgraderRange]: ...
- def _test_only_add_entry_to_op_version(op_name: str, entry: _UpgraderEntry) -> None: ...
- def _test_only_remove_entry_to_op_version(op_name: str) -> None: ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- class ScriptModuleSerializer:
- def __init__(self, export_writer: PyTorchFileWriter) -> None: ...
- def serialize(self, model: ScriptModule, script_module_id: _int) -> None: ...
- def write_files(self) -> None: ...
- def storage_context(self) -> SerializationStorageContext: ...
- ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- class SerializationStorageContext:
- def __init__(self) -> None: ...
- def has_storage(self, storage: Storage) -> _bool: ...
- def get_or_add_storage(self, storage: Storage) -> _int: ...
- ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- class DeserializationStorageContext:
- def __init__(self) -> None: ...
- def get_storage(self, name: str, dtype: _dtype) -> Tensor: ...
- def has_storage(self, name: str) -> _bool: ...
- def add_storage(self, name: str, tensor: Tensor) -> _int: ...
- ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- class ConcreteModuleTypeBuilder:
- def __init__(self, obj: Any) -> None: ...
- def set_module_dict(self): ...
- def set_module_list(self): ...
- def set_parameter_list(self): ...
- def set_parameter_dict(self): ...
- def add_attribute(self, name: str, ty: JitType, is_param: _bool, is_buffer: _bool): ...
- def add_module(self, name: str, meta: ConcreteModuleType): ...
- def add_constant(self, name: str, value: Any): ...
- def add_overload(self, method_name: str, overloaded_method_names: List[str]): ...
- def add_builtin_function(self, name: str, symbol_name: str): ...
- def add_failed_attribute(self, name: str, failure_reason: str): ...
- def add_function_attribute(self, name: str, ty: JitType, func: Callable[..., Any]): ...
- def add_ignored_attribute(self, name: str): ...
- def add_ignored_attributes(self, names: List[str]): ...
- def add_forward_hook(self, hook: Callable[..., Any]): ...
- def add_forward_pre_hook(self, pre_hook: Callable[..., Any]): ...
- class ConcreteModuleType:
- def get_constants(self) -> Dict[str, Any]: ...
- def equals(self, other: 'ConcreteModuleType') -> _bool: ...
- @staticmethod
- def from_jit_type(ty: JitType) -> ConcreteModuleType: ...
- class CallStack:
- def __init__(self, name: str, range: SourceRange): ...
- class ErrorReport:
- def __init__(self, range: SourceRange) -> None: ...
- def what(self) -> str: ...
- @staticmethod
- def call_stack() -> str: ...
- class CompilationUnit:
- def __init__(self, lang: str=..., _frames_up: _int=...) -> None: ...
- def find_function(self, name: str) -> ScriptFunction: ...
- def __getattr__(self, name: str) -> ScriptFunction: ...
- def define(self, script: str, rcb: ResolutionCallback=..., _frames_up: _int=...): ...
- def get_interface(self, name: str) -> InterfaceType: ...
- def get_functions(self) -> List[ScriptFunction]: ...
- def create_function(self, name: str, graph: Graph, shouldMangle: _bool=...) -> ScriptFunction: ...
- def get_class(self, name: str) -> ClassType: ...
- class ScriptObject:
- def setattr(self, name: str, value: Any): ...
- class ScriptModule(ScriptObject):
- def _method_names(self) -> List[str]: ...
- def _get_method(self, name: str) -> ScriptMethod: ...
- class LiteScriptModule:
- def __call__(self, *input): ...
- def find_method(self, method_name: str): ...
- def forward(self, *input) -> List[str]: ...
- def run_method(self, method_name: str, *input): ...
- class ScriptFunction:
- def __call__(self, *args, **kwargs) -> Tensor: ...
- def save(self, filename: str, _extra_files: Dict[str, bytes]) -> None: ...
- def save_to_buffer(self, _extra_files: Dict[str, bytes]) -> bytes: ...
- @property
- def graph(self) -> Graph: ...
- def inlined_graph(self) -> Graph: ...
- def schema(self) -> FunctionSchema: ...
- def code(self) -> str: ...
- def name(self) -> str: ...
- @property
- def qualified_name(self) -> str: ...
- class ScriptMethod:
- graph: Graph
- @property
- def owner(self) -> ScriptModule: ...
- @property
- def name(self) -> str: ...
- class ModuleDict:
- def __init__(self, mod: ScriptModule) -> None: ...
- def items(self) -> List[Tuple[str, Any]]: ...
- class ParameterDict:
- def __init__(self, mod: ScriptModule) -> None: ...
- class BufferDict:
- def __init__(self, mod: ScriptModule) -> None: ...
- # Defined in torch/csrc/jit/api/module.h
- class Module:
- ...
- # Defined in torch/csrc/Module.cpp
- def _initExtension(shm_manager_path: str) -> None: ... # THPModule_initExtension
- def _autograd_init() -> _bool: ... # THPAutograd_initExtension
- def _add_docstr(obj: T, doc_obj: str) -> T: ... # THPModule_addDocStr
- def _init_names(arg: Sequence[Type]) -> None: ... # THPModule_initNames
- def _has_distributed() -> _bool: ... # THPModule_hasDistributed
- def _set_default_tensor_type(type) -> None: ... # THPModule_setDefaultTensorType
- def _set_default_dtype(d: _dtype) -> None: ... # THPModule_setDefaultDtype
- def _infer_size(arg1: Size, arg2: Size) -> Size: ... # THPModule_inferSize
- def _crash_if_csrc_asan() -> _int: ... # THPModule_crashIfCsrcASAN
- def _crash_if_csrc_ubsan() -> _int: ... # THPModule_crashIfCsrcUBSAN
- def _crash_if_aten_asan() -> _int: ... # THPModule_crashIfATenASAN
- def _show_config() -> str: ... # THPModule_showConfig
- def _cxx_flags() -> str: ... # THPModule_cxxFlags
- def _parallel_info() -> str: ... # THPModule_parallelInfo
- def _set_backcompat_broadcast_warn(arg: _bool) -> None: ... # THPModule_setBackcompatBroadcastWarn
- def _get_backcompat_broadcast_warn() -> _bool: ... # THPModule_getBackcompatBroadcastWarn
- def _set_backcompat_keepdim_warn(arg: _bool) -> None: ... # THPModule_setBackcompatKeepdimWarn
- def _get_backcompat_keepdim_warn() -> _bool: ... # THPModule_getBackcompatKeepdimWarn
- def get_num_thread() -> _int: ... # THPModule_getNumThreads
- def set_num_threads(nthreads: _int) -> None: ... # THPModule_setNumThreads
- def get_num_interop_threads() -> _int: ... # THPModule_getNumInteropThreads
- def set_num_interop_threads(nthreads: _int) -> None: ... # THPModule_setNumInteropThreads
- def _get_cudnn_enabled() -> _bool: ... # THPModule_userEnabledCuDNN
- def _set_cudnn_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledCuDNN
- def _get_flash_sdp_enabled() -> _bool: ... # THPModule_userEnabledFusedSDP
- def _set_sdp_use_flash(arg: _bool) -> None: ... # THPModule_setSDPUseFlash
- def _get_mem_efficient_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP
- def _set_sdp_use_mem_efficient(arg: _bool) -> None: ... # THPModule_setSDPUseMemEfficient
- def _get_math_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP
- def _set_sdp_use_math(arg: _bool) -> None: ... # THPModule_setSDPUseMath
- def _get_mkldnn_enabled() -> _bool: ... # THPModule_userEnabledMkldnn
- def _set_mkldnn_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledMkldnn
- def _get_cudnn_benchmark() -> _bool: ... # THPModule_benchmarkCuDNN
- def _set_cudnn_benchmark(arg: _bool) -> None: ... # THPModule_setBenchmarkCuDNN
- def _get_cudnn_deterministic() -> _bool: ... # THPModule_deterministicCuDNN
- def _set_cudnn_deterministic(arg: _bool) -> None: ... # THPModule_setDeterministicCuDNN
- def _get_deterministic_algorithms() -> _bool: ... # THPModule_deterministicAlgorithms
- def _get_deterministic_algorithms_warn_only() -> _bool: ... # THPModule_deterministicAlgorithmsWarnOnly
- def _set_deterministic_algorithms(mode: _bool, *, warn_only: _bool=...) -> None: ... # THPModule_setDeterministicAlgorithms
- def _get_warnAlways() -> _bool: ... # THPModule_warnAlways
- def _set_warnAlways(arg: _bool) -> None: ... # THPModule_setWarnAlways
- def _get_cudnn_allow_tf32() -> _bool: ... # THPModule_allowTF32CuDNN
- def _set_cudnn_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuDNN
- def _get_cublas_allow_tf32() -> _bool: ... # THPModule_allowTF32CuBLAS
- def _set_cublas_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuBLAS
- def _get_float32_matmul_precision() -> str: ... #THPModule_float32MatmulPrecision
- def _set_float32_matmul_precision(arg: str) -> None: ... #THPModule_setFloat32MatmulPrecision
- def _get_cublas_allow_fp16_reduced_precision_reduction() -> _bool: ... #THPModule_allowFP16ReductionCuBLAS
- def _set_cublas_allow_fp16_reduced_precision_reduction(arg: _bool) -> None: ... #THPModule_setAllowFP16ReductionCuBLAS
- def _get_cublas_allow_bf16_reduced_precision_reduction() -> _bool: ... #THPModule_allowBF16ReductionCuBLAS
- def _set_cublas_allow_bf16_reduced_precision_reduction(arg: _bool) -> None: ... #THPModule_setAllowBF16ReductionCuBLAS
- def _set_conj(x: Tensor, conj: _bool) -> None: ...
- def _set_neg(x: Tensor, neg: _bool) -> None: ...
- def _set_meta_in_tls_dispatch_include(meta_in_tls: _bool) -> None: ...
- def _meta_in_tls_dispatch_include() -> _bool: ...
- def _select_conv_backend(*args, **kwargs) -> ConvBackend: ...
- def _conv_determine_backend_memory_format(input: Tensor, weight: Tensor, backend: ConvBackend) -> memory_format: ...
- def _has_storage(x: Tensor) -> _bool: ...
- def _should_allow_numbers_as_tensors(func_name: str) -> _bool: ...
- # NB: There is no Capsule type in typing, see
- # https://code.activestate.com/lists/python-dev/139675/
- def _to_dlpack(data: Tensor) -> Any: ... # THPModule_toDLPack
- def _from_dlpack(data: Any) -> Tensor: ... # THPModule_fromDLPack
- def _get_cpp_backtrace(frames_to_skip: _int, maximum_number_of_frames: _int) -> str: ... # THPModule_getCppBacktrace
- def set_flush_denormal(arg: _bool) -> _bool: ... # THPModule_setFlushDenormal
- def get_default_dtype() -> _dtype: ... # THPModule_getDefaultDtype
- def _get_default_device() -> str: ... # THPModule_getDefaultDevice
- def _get_qengine() -> _int: ... # THPModule_qEngine
- def _set_qengine(qegine: _int) -> None: ... # THPModule_setQEngine
- def _supported_qengines() -> List[_int]: ... # THPModule_supportedQEngines
- def _is_xnnpack_enabled() -> _bool: ... # THPModule_isEnabledXNNPACK
- def _check_sparse_tensor_invariants() -> _bool: ... # THPModule_checkSparseTensorInvariants
- def _set_check_sparse_tensor_invariants(arg: _bool) -> None: ... # THPModule_setCheckSparseTensorInvariants
- def _set_default_mobile_cpu_allocator() -> None: ... # THPModule_setDefaultMobileCPUAllocator
- def _unset_default_mobile_cpu_allocator() -> None: ... # THPModule_unsetDefaultMobileCPUAllocator
- def _is_torch_function_enabled() -> _bool: ... # THPModule_isEnabledTorchFunction
- def _has_torch_function(args: Iterable[Any]) -> _bool: ... # THPModule_has_torch_function
- def _has_torch_function_unary(Any) -> _bool: ... # THPModule_has_torch_function_unary
- def _has_torch_function_variadic(*args: Any) -> _bool: ... # THPModule_has_torch_function_variadic
- def _vmapmode_increment_nesting() -> _int: ... # THPModule_vmapmode_increment_nesting
- def _vmapmode_decrement_nesting() -> _int: ... # THPModule_vmapmode_decrement_nesting
- def _log_api_usage_once(str) -> None: ... # LogAPIUsageOnceFromPython
- def _demangle(str) -> str: ... # c10::demangle
- def _disabled_torch_function_impl(func: Callable, types: Iterable[Type], args: Tuple, kwargs: Dict) -> Any: ... # THPModule_disable_torch_function
- def _disabled_torch_dispatch_impl(func: Callable, types: Iterable[Type], args: Tuple, kwargs: Dict) -> Any: ... # THPModule_disable_dispatch_function
- def _get_linalg_preferred_backend() -> torch._C._LinalgBackend: ...
- def _set_linalg_preferred_backend(arg: torch._C._LinalgBackend): ...
- class _LinalgBackend:
- Default: _LinalgBackend
- Cusolver: _LinalgBackend
- Magma: _LinalgBackend
- class ConvBackend(Enum):
- ...
- # Defined in `valgrind.h` and `callgrind.h` respecitively.
- def _valgrind_supported_platform() -> _bool: ... # NVALGRIND
- def _valgrind_toggle() -> None: ... # CALLGRIND_TOGGLE_COLLECT
- def _valgrind_toggle_and_dump_stats() -> None: ... # CALLGRIND_TOGGLE_COLLECT and CALLGRIND_DUMP_STATS
- has_openmp: _bool
- has_mkl: _bool
- has_mps: _bool
- has_lapack: _bool
- has_cuda: _bool
- has_mkldnn: _bool
- has_cudnn: _bool
- has_spectral: _bool
- _GLIBCXX_USE_CXX11_ABI: _bool
- default_generator: Generator
- # Defined in torch/csrc/autograd/init.cpp
- def _set_grad_enabled(enabled: _bool) -> None: ...
- def is_grad_enabled() -> _bool: ...
- def _set_fwd_grad_enabled(enabled: _bool) -> None: ...
- def _is_fwd_grad_enabled() -> _bool: ...
- def is_inference_mode_enabled() -> _bool: ...
- def set_autocast_enabled(enabled: _bool) -> None: ...
- def is_autocast_enabled() -> _bool: ...
- def clear_autocast_cache() -> None: ...
- def set_autocast_cpu_enabled(enabled: _bool) -> None: ...
- def is_autocast_cpu_enabled() -> _bool: ...
- def set_autocast_cpu_dtype(dtype: _dtype) -> None: ...
- def set_autocast_gpu_dtype(dtype: _dtype) -> None: ...
- def get_autocast_cpu_dtype() -> _dtype: ...
- def get_autocast_gpu_dtype() -> _dtype: ...
- def autocast_increment_nesting() -> _int: ...
- def autocast_decrement_nesting() -> _int: ...
- def is_autocast_cache_enabled() -> _bool: ...
- def set_autocast_cache_enabled(enabled: _bool) -> None: ...
- def set_anomaly_enabled(enabled: _bool, check_nan: _bool = True) -> None: ...
- def is_anomaly_enabled() -> _bool: ...
- def is_anomaly_check_nan_enabled() -> _bool: ...
- def _enter_dual_level() -> _int: ...
- def _exit_dual_level(level: _int) -> None: ...
- def _make_dual(tensor: Tensor, tangent: Tensor, level: _int) -> Tensor: ...
- def _unpack_dual(tensor: Tensor, level: _int) -> Tensor: ...
- def __set_forward_AD_enabled(enabled: _bool) -> None: ...
- def __is_forward_AD_enabled() -> _bool: ...
- def _register_default_hooks(pack_hook: Callable, unpack_hook: Callable) -> None: ...
- def _reset_default_hooks() -> None: ...
- def _is_torch_function_mode_enabled()-> _bool: ...
- def _set_torch_function_mode(cls: Any) -> None: ...
- def _push_on_torch_function_stack(cls: Any) -> None: ...
- def _pop_torch_function_stack() -> Any: ...
- def _get_function_stack_at(idx: _int) -> Any: ...
- def _len_torch_function_stack() -> _int: ...
- def _set_torch_dispatch_mode(cls: Any) -> None: ...
- def _push_on_torch_dispatch_stack(cls: Any) -> None: ...
- def _pop_torch_dispatch_stack() -> Any: ...
- def _get_dispatch_stack_at(idx: _int) -> Any: ...
- def _len_torch_dispatch_stack() -> _int: ...
- class _InferenceMode:
- def __init__(self, mode: _bool) -> None: ...
- class _DisableFuncTorch:
- def __init__(self) -> None: ...
- class _EnableTorchFunction:
- def __init__(self) -> None: ...
- class _MultithreadingEnabled:
- def __init__(self, mode: _bool) -> None: ...
- class _ViewReplayEnabled:
- def __init__(self, mode: _bool) -> None: ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- class LoggerBase:
- ...
- class NoopLogger(LoggerBase):
- ...
- class LockingLogger(LoggerBase):
- ...
- class AggregationType(Enum):
- SUM = 0
- AVG = 1
- class FileCheck:
- def run(self, test_string: str) -> None: ...
- def check(self, test_string: str) -> 'FileCheck': ...
- def check_not(self, test_string: str) -> 'FileCheck': ...
- def check_same(self, test_string: str) -> 'FileCheck': ...
- def check_next(self, test_string: str) -> 'FileCheck': ...
- def check_count(self, test_string: str, count: _int, exactly: _bool = False) -> 'FileCheck': ...
- def check_dag(self, test_string: str) -> 'FileCheck': ...
- def check_source_highlighted(self, test_string: str) -> 'FileCheck': ...
- ...
- # Defined in torch/csrc/jit/python/init.cpp
- class PyTorchFileReader:
- @overload
- def __init__(self, name: str) -> None: ...
- @overload
- def __init__(self, buffer: BinaryIO) -> None: ...
- def get_record(self, name: str) -> bytes: ...
- ...
- class PyTorchFileWriter:
- @overload
- def __init__(self, name: str) -> None: ...
- @overload
- def __init__(self, buffer: BinaryIO) -> None: ...
- def write_record(self, name: str, data: Union[bytes, _int], size: _int) -> None: ...
- def write_end_of_file(self) -> None: ...
- def set_min_version(self, version: _int) -> None: ...
- def get_all_written_records(self) -> List[str]: ...
- def archive_name(self) -> str: ...
- ...
- def _jit_get_inline_everything_mode() -> _bool: ...
- def _jit_set_inline_everything_mode(enabled: _bool) -> None: ...
- def _jit_get_logging_option() -> str: ...
- def _jit_set_logging_option(option: str) -> None: ...
- def _jit_set_logging_stream(stream_name: str) -> None: ...
- def _jit_pass_cse(Graph) -> _bool: ...
- def _jit_pass_dce(Graph) -> None: ...
- def _jit_pass_lint(Graph) -> None: ...
- # Defined in torch/csrc/jit/python/python_custome_class.cpp
- def _get_custom_class_python_wrapper(name: str, attr: str) -> Any: ...
- # Defined in torch/csrc/Module.cpp
- def _rename_privateuse1_backend(backend: str) -> None: ...
- # Defined in torch/csrc/Generator.cpp
- class Generator:
- device: _device
- def __init__(self, device: Union[_device, str, None] = None) -> None: ...
- def get_state(self) -> Tensor: ...
- def set_state(self, _new_state: Tensor) -> Generator: ...
- def manual_seed(self, seed: _int) -> Generator: ...
- def seed(self) -> _int: ...
- def initial_seed(self) -> _int: ...
- # Defined in torch/csrc/utils/python_dispatch.cpp
- class _DispatchOperatorHandle:
- def schema(self) -> FunctionSchema: ...
- class _DispatchModule:
- def def_(self, schema: str, alias: str = "") -> _DispatchModule: ...
- def def_legacy(self, schema: str) -> _DispatchModule: ...
- def def_name_t_t(self, name: str, dispatch: str, debug: str = "default_def_name_t_t") -> _DispatchModule: ...
- def def_schema_t_t(self, schema: str, dispatch: str, alias: str, debug: str = "default_def_schema_t_t") -> _DispatchModule: ...
- def impl_t_t(self, name: str, dispatch: str, debug: str = "impl_t_t") -> _DispatchModule: ...
- def impl(self, name: str, dispatch: str, func: Callable) -> _DispatchModule: ...
- def define(self, schema: str, alias: str = "") -> _DispatchModule: ...
- def fallback_fallthrough(self, dispatch: str = "") -> _DispatchModule: ...
- def _dispatch_library(kind: str, name: str, dispatch: str, file: str = "", linenum: Any = 0) -> _DispatchModule: ...
- def _dispatch_dump(name: str) -> str: ...
- def _dispatch_dump_table(name: str) -> str: ...
- def _dispatch_check_invariants(name: str) -> None: ...
- def _dispatch_check_all_invariants() -> None: ...
- def _dispatch_has_kernel(name: str) -> _bool: ...
- def _dispatch_has_kernel_for_dispatch_key(name: str, dispatch: _dispatchkey) -> _bool: ...
- def _dispatch_has_kernel_for_any_dispatch_key(name: str, dispatch_key_set: DispatchKeySet) -> _bool: ...
- def _dispatch_has_computed_kernel_for_dispatch_key(name: str, dispatch: _dispatchkey) -> _bool: ...
- def _dispatch_find_dangling_impls() -> List[str]: ...
- def _dispatch_get_all_op_names() -> List[str]: ...
- def _dispatch_tls_set_dispatch_key_excluded(dispatch: _dispatchkey, val: _bool) -> None: ...
- def _dispatch_tls_is_dispatch_key_excluded(dispatch: _dispatchkey) -> _bool: ...
- def _dispatch_tls_set_dispatch_key_included(dispatch: _dispatchkey, val: _bool) -> None: ...
- def _dispatch_tls_is_dispatch_key_included(dispatch: _dispatchkey) -> _bool: ...
- def _dispatch_isTensorSubclassLike(tensor: Tensor) -> _bool: ...
- def _dispatch_key_name(dispatch: _dispatchkey) -> str: ...
- def _dispatch_key_parse(dispatch: _dispatchkey) -> DispatchKey: ...
- def _dispatch_num_backends() -> _int: ...
- def _functionalization_reapply_views_tls() -> _bool: ...
- class DispatchKey(Enum):
- Undefined: DispatchKey = ...
- FPGA: DispatchKey = ...
- ORT: DispatchKey = ...
- Vulkan: DispatchKey = ...
- Metal: DispatchKey = ...
- MKLDNN: DispatchKey = ...
- OpenGL: DispatchKey = ...
- OpenCL: DispatchKey = ...
- IDEEP: DispatchKey = ...
- CustomRNGKeyId: DispatchKey = ...
- MkldnnCPU: DispatchKey = ...
- Sparse: DispatchKey = ...
- SparseCsrCPU: DispatchKey = ...
- SparseCsrCUDA: DispatchKey = ...
- Python: DispatchKey = ...
- FuncTorchDynamicLayerBackMode: DispatchKey = ...
- ZeroTensor: DispatchKey = ...
- BackendSelect: DispatchKey = ...
- Named: DispatchKey = ...
- AutogradOther: DispatchKey = ...
- AutogradFunctionality: DispatchKey = ...
- AutogradNestedTensor: DispatchKey = ...
- Tracer: DispatchKey = ...
- Autocast: DispatchKey = ...
- Batched: DispatchKey = ...
- VmapMode: DispatchKey = ...
- FuncTorchDynamicLayerFrontMode: DispatchKey = ...
- Functionalize: DispatchKey = ...
- TESTING_ONLY_GenericWrapper: DispatchKey = ...
- TESTING_ONLY_GenericMode: DispatchKey = ...
- ADInplaceOrView: DispatchKey = ...
- Autograd: DispatchKey = ...
- CompositeImplicitAutograd: DispatchKey = ...
- CompositeImplicitAutogradNestedTensor: DispatchKey = ...
- CompositeExplicitAutograd: DispatchKey = ...
- CompositeExplicitAutogradNonFunctional: DispatchKey = ...
- CPU: DispatchKey = ...
- CUDA: DispatchKey = ...
- HIP: DispatchKey = ...
- XLA: DispatchKey = ...
- MPS: DispatchKey = ...
- IPU: DispatchKey = ...
- XPU: DispatchKey = ...
- HPU: DispatchKey = ...
- VE: DispatchKey = ...
- Lazy: DispatchKey = ...
- Meta: DispatchKey = ...
- PrivateUse1: DispatchKey = ...
- PrivateUse2: DispatchKey = ...
- PrivateUse3: DispatchKey = ...
- QuantizedCPU: DispatchKey = ...
- QuantizedCUDA: DispatchKey = ...
- QuantizedHIP: DispatchKey = ...
- QuantizedXLA: DispatchKey = ...
- QuantizedMPS: DispatchKey = ...
- QuantizedIPU: DispatchKey = ...
- QuantizedXPU: DispatchKey = ...
- QuantizedHPU: DispatchKey = ...
- QuantizedVE: DispatchKey = ...
- QuantizedLazy: DispatchKey = ...
- QuantizedMeta: DispatchKey = ...
- QuantizedPrivateUse1: DispatchKey = ...
- QuantizedPrivateUse2: DispatchKey = ...
- QuantizedPrivateUse3: DispatchKey = ...
- SparseCPU: DispatchKey = ...
- SparseCUDA: DispatchKey = ...
- SparseHIP: DispatchKey = ...
- SparseXLA: DispatchKey = ...
- SparseMPS: DispatchKey = ...
- SparseIPU: DispatchKey = ...
- SparseXPU: DispatchKey = ...
- SparseHPU: DispatchKey = ...
- SparseVE: DispatchKey = ...
- SparseLazy: DispatchKey = ...
- SparseMeta: DispatchKey = ...
- SparsePrivateUse1: DispatchKey = ...
- SparsePrivateUse2: DispatchKey = ...
- SparsePrivateUse3: DispatchKey = ...
- NestedTensorCPU: DispatchKey = ...
- NestedTensorCUDA: DispatchKey = ...
- NestedTensorHIP: DispatchKey = ...
- NestedTensorXLA: DispatchKey = ...
- NestedTensorMPS: DispatchKey = ...
- NestedTensorIPU: DispatchKey = ...
- NestedTensorXPU: DispatchKey = ...
- NestedTensorHPU: DispatchKey = ...
- NestedTensorVE: DispatchKey = ...
- NestedTensorLazy: DispatchKey = ...
- NestedTensorMeta: DispatchKey = ...
- NestedTensorPrivateUse1: DispatchKey = ...
- NestedTensorPrivateUse2: DispatchKey = ...
- NestedTensorPrivateUse3: DispatchKey = ...
- AutogradCPU: DispatchKey = ...
- AutogradCUDA: DispatchKey = ...
- AutogradHIP: DispatchKey = ...
- AutogradXLA: DispatchKey = ...
- AutogradMPS: DispatchKey = ...
- AutogradIPU: DispatchKey = ...
- AutogradXPU: DispatchKey = ...
- AutogradHPU: DispatchKey = ...
- AutogradVE: DispatchKey = ...
- AutogradLazy: DispatchKey = ...
- AutogradMeta: DispatchKey = ...
- AutogradPrivateUse1: DispatchKey = ...
- AutogradPrivateUse2: DispatchKey = ...
- AutogradPrivateUse3: DispatchKey = ...
- class DispatchKeySet:
- def __or__(self, other: DispatchKeySet) -> DispatchKeySet: ...
- def __sub__(self, other: DispatchKeySet) -> DispatchKeySet: ...
- def __and__(self, other: DispatchKeySet) -> DispatchKeySet: ...
- def highestPriorityTypeId(self) -> DispatchKey: ...
- def has(self, k: _dispatchkey) -> _bool: ...
- def __repr__(self) -> str: ...
- _dispatch_autogradother_backends: DispatchKeySet
- def _dispatch_has_backend_fallback(dispatch: _dispatchkey) -> _bool: ...
- def _dispatch_keyset_full_after(t: _dispatchkey) -> DispatchKeySet: ...
- def _dispatch_keyset_to_string(keyset: DispatchKeySet) -> str: ...
- def _dispatch_get_backend_keyset_from_autograd(dispatch: _dispatchkey) -> DispatchKeySet: ...
- def _dispatch_keys(tensor: Tensor) -> DispatchKeySet: ...
- def _dispatch_tls_local_exclude_set() -> DispatchKeySet: ...
- def _dispatch_tls_local_include_set() -> DispatchKeySet: ...
- def _dispatch_is_included_in_alias(dispatch_a: _dispatchkey, dispatch_b: _dispatchkey) -> _bool: ...
- class ExcludeDispatchKeyGuard:
- pass
- class _AutoDispatchBelowAutograd:
- pass
- def _dispatch_print_registrations_for_dispatch_key(dispatch_key: str = "") -> None: ...
- def _dispatch_get_registrations_for_dispatch_key(dispatch_key: str = "") -> List[str]: ...
- def _are_functorch_transforms_active() -> _bool: ...
- # Define in torch/csrc/autograd/init.cpp
- class _DisablePythonDispatcher:
- pass
- class _EnablePythonDispatcher:
- pass
- def _set_python_dispatcher(dispatcher: object) -> None: ...
- # Defined in torch/csrc/utils/init.cpp
- class BenchmarkConfig:
- num_calling_threads: _int
- num_worker_threads: _int
- num_warmup_iters: _int
- num_iters: _int
- profiler_output_path: str
- class BenchmarkExecutionStats:
- latency_avg_ms: _float
- num_iters: _int
- class ThroughputBenchmark:
- def __init__(self, module: Any) -> None: ...
- def add_input(self, *args: Any, **kwargs: Any) -> None: ...
- def run_once(self, *args: Any, **kwargs: Any) -> Any: ...
- def benchmark(self, config: BenchmarkConfig) -> BenchmarkExecutionStats: ...
- # Defined in torch/csrc/Storage.cpp
- class StorageBase(object): ...
- # TODO: where
- class DoubleTensor(Tensor): ...
- class FloatTensor(Tensor): ...
- class LongTensor(Tensor): ...
- class IntTensor(Tensor): ...
- class ShortTensor(Tensor): ...
- class HalfTensor(Tensor): ...
- class CharTensor(Tensor): ...
- class ByteTensor(Tensor): ...
- class BoolTensor(Tensor): ...
- # Defined in torch/csrc/autograd/python_engine.cpp
- class _ImperativeEngine:
- def queue_callback(self, callback: Callable[[], None]) -> None: ...
- def run_backward(self, *args: Any, **kwargs: Any) -> Tuple[Tensor, ...]: ...
- def is_checkpoint_valid(self) -> _bool: ...
- # Defined in torch/csrc/autograd/python_variable.cpp
- class _TensorMeta(type):
- pass
- # Defined in torch/csrc/autograd/python_variable.cpp
- class _TensorBase(metaclass=_TensorMeta):
- requires_grad: _bool
- shape: Size
- data: Tensor
- names: List[str]
- device: _device
- dtype: _dtype
- layout: _layout
- real: Tensor
- imag: Tensor
- T: Tensor
- H: Tensor
- mT: Tensor
- mH: Tensor
- ndim: _int
- output_nr: _int
- _version: _int
- _base: Optional[Tensor]
- _cdata: _int
- grad_fn: _Node
- _grad_fn: Any
- _grad: Optional[Tensor]
- grad: Optional[Tensor]
- _backward_hooks: Optional[Dict[_int, Callable[[Tensor], Optional[Tensor]]]]
- def __abs__(self) -> Tensor: ...
- def __add__(self, other: Any) -> Tensor: ...
- @overload
- def __and__(self, other: Tensor) -> Tensor: ...
- @overload
- def __and__(self, other: Number) -> Tensor: ...
- @overload
- def __and__(self, other: Any) -> Tensor: ...
- def __bool__(self) -> builtins.bool: ...
- def __complex__(self) -> builtins.complex: ...
- def __div__(self, other: Any) -> Tensor: ...
- def __eq__(self, other: Any) -> Tensor: ... # type: ignore[override]
- def __float__(self) -> builtins.float: ...
- def __floordiv__(self, other: Any) -> Tensor: ...
- def __ge__(self, other: Any) -> Tensor: ...
- def __getitem__(self, indices: Union[None, _int, slice, Tensor, List, Tuple]) -> Tensor: ...
- def __gt__(self, other: Any) -> Tensor: ...
- def __iadd__(self, other: Any) -> Tensor: ...
- @overload
- def __iand__(self, other: Tensor) -> Tensor: ...
- @overload
- def __iand__(self, other: Number) -> Tensor: ...
- @overload
- def __iand__(self, other: Any) -> Tensor: ...
- def __idiv__(self, other: Any) -> Tensor: ...
- def __ifloordiv__(self, other: Any) -> Tensor: ...
- @overload
- def __ilshift__(self, other: Tensor) -> Tensor: ...
- @overload
- def __ilshift__(self, other: Number) -> Tensor: ...
- @overload
- def __ilshift__(self, other: Any) -> Tensor: ...
- def __imod__(self, other: Any) -> Tensor: ...
- def __imul__(self, other: Any) -> Tensor: ...
- def __index__(self) -> builtins.int: ...
- @overload
- def __init__(self, *args: Any, device: Device=None) -> None: ...
- @overload
- def __init__(self, storage: Storage) -> None: ...
- @overload
- def __init__(self, other: Tensor) -> None: ...
- @overload
- def __init__(self, size: _size, *, device: Device=None) -> None: ...
- def __int__(self) -> builtins.int: ...
- def __invert__(self) -> Tensor: ...
- @overload
- def __ior__(self, other: Tensor) -> Tensor: ...
- @overload
- def __ior__(self, other: Number) -> Tensor: ...
- @overload
- def __ior__(self, other: Any) -> Tensor: ...
- @overload
- def __irshift__(self, other: Tensor) -> Tensor: ...
- @overload
- def __irshift__(self, other: Number) -> Tensor: ...
- @overload
- def __irshift__(self, other: Any) -> Tensor: ...
- def __isub__(self, other: Any) -> Tensor: ...
- @overload
- def __ixor__(self, other: Tensor) -> Tensor: ...
- @overload
- def __ixor__(self, other: Number) -> Tensor: ...
- @overload
- def __ixor__(self, other: Any) -> Tensor: ...
- def __le__(self, other: Any) -> Tensor: ...
- def __long__(self) -> builtins.int: ...
- @overload
- def __lshift__(self, other: Tensor) -> Tensor: ...
- @overload
- def __lshift__(self, other: Number) -> Tensor: ...
- @overload
- def __lshift__(self, other: Any) -> Tensor: ...
- def __lt__(self, other: Any) -> Tensor: ...
- def __matmul__(self, other: Any) -> Tensor: ...
- def __mod__(self, other: Any) -> Tensor: ...
- def __mul__(self, other: Any) -> Tensor: ...
- def __ne__(self, other: Any) -> Tensor: ... # type: ignore[override]
- def __neg__(self) -> Tensor: ...
- def __nonzero__(self) -> builtins.bool: ...
- @overload
- def __or__(self, other: Tensor) -> Tensor: ...
- @overload
- def __or__(self, other: Number) -> Tensor: ...
- @overload
- def __or__(self, other: Any) -> Tensor: ...
- def __pow__(self, other: Any) -> Tensor: ...
- def __radd__(self, other: Any) -> Tensor: ...
- def __rand__(self, other: Any) -> Tensor: ...
- def __rfloordiv__(self, other: Any) -> Tensor: ...
- def __rmul__(self, other: Any) -> Tensor: ...
- def __ror__(self, other: Any) -> Tensor: ...
- def __rpow__(self, other: Any) -> Tensor: ...
- @overload
- def __rshift__(self, other: Tensor) -> Tensor: ...
- @overload
- def __rshift__(self, other: Number) -> Tensor: ...
- @overload
- def __rshift__(self, other: Any) -> Tensor: ...
- def __rsub__(self, other: Any) -> Tensor: ...
- def __rtruediv__(self, other: Any) -> Tensor: ...
- def __rxor__(self, other: Any) -> Tensor: ...
- def __setitem__(self, indices: Union[None, _int, slice, Tensor, List, Tuple], val: Union[Tensor, Number]) -> None: ...
- def __sub__(self, other: Any) -> Tensor: ...
- def __truediv__(self, other: Any) -> Tensor: ...
- @overload
- def __xor__(self, other: Tensor) -> Tensor: ...
- @overload
- def __xor__(self, other: Number) -> Tensor: ...
- @overload
- def __xor__(self, other: Any) -> Tensor: ...
- def _addmm_activation(self, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, use_gelu: _bool=False) -> Tensor: ...
- def _autocast_to_full_precision(self, cuda_enabled: _bool, cpu_enabled: _bool) -> Tensor: ...
- def _autocast_to_reduced_precision(self, cuda_enabled: _bool, cpu_enabled: _bool, cuda_dtype: _dtype, cpu_dtype: _dtype) -> Tensor: ...
- def _coalesced_(self, coalesced: _bool) -> Tensor: ...
- def _conj(self) -> Tensor: ...
- def _conj_physical(self) -> Tensor: ...
- def _dimI(self) -> _int: ...
- def _dimV(self) -> _int: ...
- def _indices(self) -> Tensor: ...
- def _is_all_true(self) -> Tensor: ...
- def _is_any_true(self) -> Tensor: ...
- def _is_view(self) -> _bool: ...
- def _is_zerotensor(self) -> _bool: ...
- def _make_subclass(cls, data: Tensor, require_grad: _bool = False, dispatch_strides: _bool=False, dispatch_device: _bool=False, device_for_backend_keys: Optional[_device] = None) -> Tensor: ...
- def _neg_view(self) -> Tensor: ...
- def _nested_tensor_size(self) -> Tensor: ...
- def _nested_tensor_strides(self) -> Tensor: ...
- def _nnz(self) -> _int: ...
- def _to_dense(self, dtype: Optional[_dtype]=None) -> Tensor: ...
- def _values(self) -> Tensor: ...
- def abs(self) -> Tensor: ...
- def abs_(self) -> Tensor: ...
- def absolute(self) -> Tensor: ...
- def absolute_(self) -> Tensor: ...
- def acos(self) -> Tensor: ...
- def acos_(self) -> Tensor: ...
- def acosh(self) -> Tensor: ...
- def acosh_(self) -> Tensor: ...
- def add(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ...
- def add_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, alpha: Optional[Number]=1) -> Tensor: ...
- def addbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- def addbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- def addcdiv(self, tensor1: Tensor, tensor2: Tensor, *, value: Number=1) -> Tensor: ...
- def addcdiv_(self, tensor1: Tensor, tensor2: Tensor, *, value: Number=1) -> Tensor: ...
- def addcmul(self, tensor1: Tensor, tensor2: Tensor, *, value: Number=1) -> Tensor: ...
- def addcmul_(self, tensor1: Tensor, tensor2: Tensor, *, value: Number=1) -> Tensor: ...
- def addmm(self, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- def addmm_(self, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- def addmv(self, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- def addmv_(self, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- def addr(self, vec1: Tensor, vec2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- def addr_(self, vec1: Tensor, vec2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- def adjoint(self) -> Tensor: ...
- def align_as(self, other: Tensor) -> Tensor: ...
- @overload
- def align_to(self, order: Sequence[Union[str, ellipsis, None]], ellipsis_idx: _int) -> Tensor: ...
- @overload
- def align_to(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
- @overload
- def all(self) -> Tensor: ...
- @overload
- def all(self, dim: _int, keepdim: _bool=False) -> Tensor: ...
- @overload
- def all(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> Tensor: ...
- def allclose(self, other: Tensor, rtol: _float=1e-05, atol: _float=1e-08, equal_nan: _bool=False) -> _bool: ...
- def amax(self, dim: Union[_int, _size]=(), keepdim: _bool=False) -> Tensor: ...
- def amin(self, dim: Union[_int, _size]=(), keepdim: _bool=False) -> Tensor: ...
- def aminmax(self, *, dim: Optional[_int]=None, keepdim: _bool=False) -> torch.return_types.aminmax: ...
- def angle(self) -> Tensor: ...
- @overload
- def any(self) -> Tensor: ...
- @overload
- def any(self, dim: _int, keepdim: _bool=False) -> Tensor: ...
- @overload
- def any(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> Tensor: ...
- def apply_(self, callable: Callable) -> Tensor: ...
- def arccos(self) -> Tensor: ...
- def arccos_(self) -> Tensor: ...
- def arccosh(self) -> Tensor: ...
- def arccosh_(self) -> Tensor: ...
- def arcsin(self) -> Tensor: ...
- def arcsin_(self) -> Tensor: ...
- def arcsinh(self) -> Tensor: ...
- def arcsinh_(self) -> Tensor: ...
- def arctan(self) -> Tensor: ...
- def arctan2(self, other: Tensor) -> Tensor: ...
- def arctan2_(self, other: Tensor) -> Tensor: ...
- def arctan_(self) -> Tensor: ...
- def arctanh(self) -> Tensor: ...
- def arctanh_(self) -> Tensor: ...
- def argmax(self, dim: Optional[_int]=None, keepdim: _bool=False) -> Tensor: ...
- def argmin(self, dim: Optional[_int]=None, keepdim: _bool=False) -> Tensor: ...
- @overload
- def argsort(self, *, stable: _bool, dim: _int=-1, descending: _bool=False) -> Tensor: ...
- @overload
- def argsort(self, dim: _int=-1, descending: _bool=False) -> Tensor: ...
- @overload
- def argsort(self, dim: Union[str, ellipsis, None], descending: _bool=False) -> Tensor: ...
- def argwhere(self) -> Tensor: ...
- def as_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ...
- def as_strided_(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ...
- def as_strided_scatter(self, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]]=None) -> Tensor: ...
- def as_subclass(self, cls: Type[S]) -> S: ...
- def asin(self) -> Tensor: ...
- def asin_(self) -> Tensor: ...
- def asinh(self) -> Tensor: ...
- def asinh_(self) -> Tensor: ...
- def atan(self) -> Tensor: ...
- def atan2(self, other: Tensor) -> Tensor: ...
- def atan2_(self, other: Tensor) -> Tensor: ...
- def atan_(self) -> Tensor: ...
- def atanh(self) -> Tensor: ...
- def atanh_(self) -> Tensor: ...
- def baddbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- def baddbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- @overload
- def bernoulli(self, *, generator: Optional[Generator]=None) -> Tensor: ...
- @overload
- def bernoulli(self, p: _float, *, generator: Optional[Generator]=None) -> Tensor: ...
- @overload
- def bernoulli_(self, p: Tensor, *, generator: Optional[Generator]=None) -> Tensor: ...
- @overload
- def bernoulli_(self, p: _float=0.5, *, generator: Optional[Generator]=None) -> Tensor: ...
- def bfloat16(self) -> Tensor: ...
- def bincount(self, weights: Optional[Tensor]=None, minlength: _int=0) -> Tensor: ...
- @overload
- def bitwise_and(self, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_and(self, other: Number) -> Tensor: ...
- @overload
- def bitwise_and_(self, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_and_(self, other: Number) -> Tensor: ...
- @overload
- def bitwise_left_shift(self, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_left_shift(self, other: Number) -> Tensor: ...
- @overload
- def bitwise_left_shift_(self, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_left_shift_(self, other: Number) -> Tensor: ...
- def bitwise_not(self) -> Tensor: ...
- def bitwise_not_(self) -> Tensor: ...
- @overload
- def bitwise_or(self, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_or(self, other: Number) -> Tensor: ...
- @overload
- def bitwise_or_(self, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_or_(self, other: Number) -> Tensor: ...
- @overload
- def bitwise_right_shift(self, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_right_shift(self, other: Number) -> Tensor: ...
- @overload
- def bitwise_right_shift_(self, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_right_shift_(self, other: Number) -> Tensor: ...
- @overload
- def bitwise_xor(self, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_xor(self, other: Number) -> Tensor: ...
- @overload
- def bitwise_xor_(self, other: Tensor) -> Tensor: ...
- @overload
- def bitwise_xor_(self, other: Number) -> Tensor: ...
- def bmm(self, mat2: Tensor) -> Tensor: ...
- def bool(self) -> Tensor: ...
- @overload
- def broadcast_to(self, size: Sequence[Union[_int, SymInt]]) -> Tensor: ...
- @overload
- def broadcast_to(self, *size: _int) -> Tensor: ...
- def byte(self) -> Tensor: ...
- def cauchy_(self, median: _float=0, sigma: _float=1, *, generator: Optional[Generator]=None) -> Tensor: ...
- def ccol_indices(self) -> Tensor: ...
- def ceil(self) -> Tensor: ...
- def ceil_(self) -> Tensor: ...
- def chalf(self, *, memory_format: Optional[memory_format]=None) -> Tensor: ...
- def char(self) -> Tensor: ...
- def cholesky(self, upper: _bool=False) -> Tensor: ...
- def cholesky_inverse(self, upper: _bool=False) -> Tensor: ...
- def cholesky_solve(self, input2: Tensor, upper: _bool=False) -> Tensor: ...
- def chunk(self, chunks: _int, dim: _int=0) -> List[Tensor]: ...
- @overload
- def clamp(self, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clamp(self, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ...
- @overload
- def clamp_(self, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clamp_(self, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ...
- @overload
- def clamp_max(self, max: Tensor) -> Tensor: ...
- @overload
- def clamp_max(self, max: Number) -> Tensor: ...
- @overload
- def clamp_max_(self, max: Tensor) -> Tensor: ...
- @overload
- def clamp_max_(self, max: Number) -> Tensor: ...
- @overload
- def clamp_min(self, min: Tensor) -> Tensor: ...
- @overload
- def clamp_min(self, min: Number) -> Tensor: ...
- @overload
- def clamp_min_(self, min: Tensor) -> Tensor: ...
- @overload
- def clamp_min_(self, min: Number) -> Tensor: ...
- @overload
- def clip(self, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clip(self, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ...
- @overload
- def clip_(self, min: Optional[Tensor]=None, max: Optional[Tensor]=None) -> Tensor: ...
- @overload
- def clip_(self, min: Optional[Number]=None, max: Optional[Number]=None) -> Tensor: ...
- def clone(self, *, memory_format: Optional[memory_format]=None) -> Tensor: ...
- def coalesce(self) -> Tensor: ...
- def col_indices(self) -> Tensor: ...
- def conj(self) -> Tensor: ...
- def conj_physical(self) -> Tensor: ...
- def conj_physical_(self) -> Tensor: ...
- def contiguous(self, memory_format=torch.contiguous_format) -> Tensor: ...
- def copy_(self, src: Tensor, non_blocking: _bool=False) -> Tensor: ...
- @overload
- def copysign(self, other: Tensor) -> Tensor: ...
- @overload
- def copysign(self, other: Number) -> Tensor: ...
- @overload
- def copysign_(self, other: Tensor) -> Tensor: ...
- @overload
- def copysign_(self, other: Number) -> Tensor: ...
- def corrcoef(self) -> Tensor: ...
- def cos(self) -> Tensor: ...
- def cos_(self) -> Tensor: ...
- def cosh(self) -> Tensor: ...
- def cosh_(self) -> Tensor: ...
- @overload
- def count_nonzero(self, dim: Optional[_int]=None) -> Tensor: ...
- @overload
- def count_nonzero(self, dim: _size) -> Tensor: ...
- @overload
- def count_nonzero(self, *dim: _int) -> Tensor: ...
- def cov(self, *, correction: _int=1, fweights: Optional[Tensor]=None, aweights: Optional[Tensor]=None) -> Tensor: ...
- def cpu(self) -> Tensor: ...
- def cross(self, other: Tensor, dim: Optional[_int]=None) -> Tensor: ...
- def crow_indices(self) -> Tensor: ...
- def cuda(self, device: Optional[Union[_device, _int, str]]=None, non_blocking: _bool=False) -> Tensor: ...
- @overload
- def cummax(self, dim: _int) -> torch.return_types.cummax: ...
- @overload
- def cummax(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummax: ...
- @overload
- def cummin(self, dim: _int) -> torch.return_types.cummin: ...
- @overload
- def cummin(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummin: ...
- @overload
- def cumprod(self, dim: _int, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def cumprod(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def cumprod_(self, dim: _int, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def cumprod_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def cumsum(self, dim: _int, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def cumsum(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def cumsum_(self, dim: _int, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def cumsum_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
- def data_ptr(self) -> _int: ...
- def deg2rad(self) -> Tensor: ...
- def deg2rad_(self) -> Tensor: ...
- def dense_dim(self) -> _int: ...
- def dequantize(self) -> Tensor: ...
- def det(self) -> Tensor: ...
- def detach(self) -> Tensor: ...
- def detach_(self) -> Tensor: ...
- def diag(self, diagonal: _int=0) -> Tensor: ...
- def diag_embed(self, offset: _int=0, dim1: _int=-2, dim2: _int=-1) -> Tensor: ...
- def diagflat(self, offset: _int=0) -> Tensor: ...
- @overload
- def diagonal(self, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int=0) -> Tensor: ...
- @overload
- def diagonal(self, offset: _int=0, dim1: _int=0, dim2: _int=1) -> Tensor: ...
- def diagonal_scatter(self, src: Tensor, offset: _int=0, dim1: _int=0, dim2: _int=1) -> Tensor: ...
- def diff(self, n: _int=1, dim: _int=-1, prepend: Optional[Tensor]=None, append: Optional[Tensor]=None) -> Tensor: ...
- def digamma(self) -> Tensor: ...
- def digamma_(self) -> Tensor: ...
- def dim(self) -> _int: ...
- def dist(self, other: Tensor, p: Number=2) -> Tensor: ...
- def div(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor: ...
- def div_(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor: ...
- @overload
- def divide(self, other: Tensor) -> Tensor: ...
- @overload
- def divide(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor: ...
- @overload
- def divide(self, other: Number, *, rounding_mode: Optional[str]) -> Tensor: ...
- @overload
- def divide(self, other: Number) -> Tensor: ...
- @overload
- def divide_(self, other: Tensor) -> Tensor: ...
- @overload
- def divide_(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor: ...
- @overload
- def divide_(self, other: Number, *, rounding_mode: Optional[str]) -> Tensor: ...
- @overload
- def divide_(self, other: Number) -> Tensor: ...
- def dot(self, tensor: Tensor) -> Tensor: ...
- def double(self) -> Tensor: ...
- @overload
- def dsplit(self, sections: _int) -> List[Tensor]: ...
- @overload
- def dsplit(self, indices: _size) -> List[Tensor]: ...
- @overload
- def dsplit(self, *indices: _int) -> List[Tensor]: ...
- def element_size(self) -> _int: ...
- @overload
- def eq(self, other: Tensor) -> Tensor: ...
- @overload
- def eq(self, other: Number) -> Tensor: ...
- @overload
- def eq_(self, other: Tensor) -> Tensor: ...
- @overload
- def eq_(self, other: Number) -> Tensor: ...
- def equal(self, other: Tensor) -> _bool: ...
- def erf(self) -> Tensor: ...
- def erf_(self) -> Tensor: ...
- def erfc(self) -> Tensor: ...
- def erfc_(self) -> Tensor: ...
- def erfinv(self) -> Tensor: ...
- def erfinv_(self) -> Tensor: ...
- def exp(self) -> Tensor: ...
- def exp2(self) -> Tensor: ...
- def exp2_(self) -> Tensor: ...
- def exp_(self) -> Tensor: ...
- @overload
- def expand(self, size: Sequence[Union[_int, SymInt]], *, implicit: _bool=False) -> Tensor: ...
- @overload
- def expand(self, *size: _int, implicit: _bool=False) -> Tensor: ...
- def expand_as(self, other: Tensor) -> Tensor: ...
- def expm1(self) -> Tensor: ...
- def expm1_(self) -> Tensor: ...
- def exponential_(self, lambd: _float=1, *, generator: Optional[Generator]=None) -> Tensor: ...
- @overload
- def fill_(self, value: Tensor) -> Tensor: ...
- @overload
- def fill_(self, value: Number) -> Tensor: ...
- def fill_diagonal_(self, fill_value: Number, wrap: _bool=False) -> Tensor: ...
- def fix(self) -> Tensor: ...
- def fix_(self) -> Tensor: ...
- @overload
- def flatten(self, start_dim: _int=0, end_dim: _int=-1) -> Tensor: ...
- @overload
- def flatten(self, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor: ...
- @overload
- def flatten(self, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor: ...
- @overload
- def flatten(self, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor: ...
- @overload
- def flip(self, dims: _size) -> Tensor: ...
- @overload
- def flip(self, *dims: _int) -> Tensor: ...
- def fliplr(self) -> Tensor: ...
- def flipud(self) -> Tensor: ...
- def float(self) -> Tensor: ...
- @overload
- def float_power(self, exponent: Tensor) -> Tensor: ...
- @overload
- def float_power(self, exponent: Number) -> Tensor: ...
- @overload
- def float_power_(self, exponent: Tensor) -> Tensor: ...
- @overload
- def float_power_(self, exponent: Number) -> Tensor: ...
- def floor(self) -> Tensor: ...
- def floor_(self) -> Tensor: ...
- def floor_divide(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor]=None) -> Tensor: ...
- def floor_divide_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor: ...
- def fmax(self, other: Tensor) -> Tensor: ...
- def fmin(self, other: Tensor) -> Tensor: ...
- @overload
- def fmod(self, other: Tensor) -> Tensor: ...
- @overload
- def fmod(self, other: Number) -> Tensor: ...
- @overload
- def fmod_(self, other: Tensor) -> Tensor: ...
- @overload
- def fmod_(self, other: Number) -> Tensor: ...
- def frac(self) -> Tensor: ...
- def frac_(self) -> Tensor: ...
- def frexp(self) -> torch.return_types.frexp: ...
- @overload
- def gather(self, dim: _int, index: Tensor, *, sparse_grad: _bool=False) -> Tensor: ...
- @overload
- def gather(self, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool=False) -> Tensor: ...
- def gcd(self, other: Tensor) -> Tensor: ...
- def gcd_(self, other: Tensor) -> Tensor: ...
- @overload
- def ge(self, other: Tensor) -> Tensor: ...
- @overload
- def ge(self, other: Number) -> Tensor: ...
- @overload
- def ge_(self, other: Tensor) -> Tensor: ...
- @overload
- def ge_(self, other: Number) -> Tensor: ...
- def geometric_(self, p: _float, *, generator: Optional[Generator]=None) -> Tensor: ...
- def geqrf(self) -> torch.return_types.geqrf: ...
- def ger(self, vec2: Tensor) -> Tensor: ...
- def get_device(self) -> _int: ...
- @overload
- def greater(self, other: Tensor) -> Tensor: ...
- @overload
- def greater(self, other: Number) -> Tensor: ...
- @overload
- def greater_(self, other: Tensor) -> Tensor: ...
- @overload
- def greater_(self, other: Number) -> Tensor: ...
- @overload
- def greater_equal(self, other: Tensor) -> Tensor: ...
- @overload
- def greater_equal(self, other: Number) -> Tensor: ...
- @overload
- def greater_equal_(self, other: Tensor) -> Tensor: ...
- @overload
- def greater_equal_(self, other: Number) -> Tensor: ...
- @overload
- def gt(self, other: Tensor) -> Tensor: ...
- @overload
- def gt(self, other: Number) -> Tensor: ...
- @overload
- def gt_(self, other: Tensor) -> Tensor: ...
- @overload
- def gt_(self, other: Number) -> Tensor: ...
- def half(self) -> Tensor: ...
- def hardshrink(self, lambd: Number=0.5) -> Tensor: ...
- def has_names(self) -> _bool: ...
- def heaviside(self, values: Tensor) -> Tensor: ...
- def heaviside_(self, values: Tensor) -> Tensor: ...
- def histc(self, bins: _int=100, min: Number=0, max: Number=0) -> Tensor: ...
- @overload
- def histogram(self, bins: Tensor, *, weight: Optional[Tensor]=None, density: _bool=False) -> torch.return_types.histogram: ...
- @overload
- def histogram(self, bins: _int=100, *, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> torch.return_types.histogram: ...
- @overload
- def hsplit(self, sections: _int) -> List[Tensor]: ...
- @overload
- def hsplit(self, indices: _size) -> List[Tensor]: ...
- @overload
- def hsplit(self, *indices: _int) -> List[Tensor]: ...
- def hypot(self, other: Tensor) -> Tensor: ...
- def hypot_(self, other: Tensor) -> Tensor: ...
- def i0(self) -> Tensor: ...
- def i0_(self) -> Tensor: ...
- def igamma(self, other: Tensor) -> Tensor: ...
- def igamma_(self, other: Tensor) -> Tensor: ...
- def igammac(self, other: Tensor) -> Tensor: ...
- def igammac_(self, other: Tensor) -> Tensor: ...
- @overload
- def index_add(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Number=1) -> Tensor: ...
- @overload
- def index_add(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Number=1) -> Tensor: ...
- def index_add_(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Number=1) -> Tensor: ...
- @overload
- def index_copy(self, dim: _int, index: Tensor, source: Tensor) -> Tensor: ...
- @overload
- def index_copy(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ...
- @overload
- def index_copy_(self, dim: _int, index: Tensor, source: Tensor) -> Tensor: ...
- @overload
- def index_copy_(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ...
- @overload
- def index_fill(self, dim: _int, index: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def index_fill(self, dim: _int, index: Tensor, value: Number) -> Tensor: ...
- @overload
- def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ...
- @overload
- def index_fill_(self, dim: _int, index: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def index_fill_(self, dim: _int, index: Tensor, value: Number) -> Tensor: ...
- @overload
- def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ...
- def index_put(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False) -> Tensor: ...
- def index_put_(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False) -> Tensor: ...
- def index_reduce(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool=True) -> Tensor: ...
- def index_reduce_(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool=True) -> Tensor: ...
- @overload
- def index_select(self, dim: _int, index: Tensor) -> Tensor: ...
- @overload
- def index_select(self, dim: Union[str, ellipsis, None], index: Tensor) -> Tensor: ...
- def indices(self) -> Tensor: ...
- def inner(self, other: Tensor) -> Tensor: ...
- def int(self) -> Tensor: ...
- def int_repr(self) -> Tensor: ...
- def inverse(self) -> Tensor: ...
- def is_coalesced(self) -> _bool: ...
- def is_complex(self) -> _bool: ...
- def is_conj(self) -> _bool: ...
- def is_contiguous(self, memory_format=torch.contiguous_format) -> _bool: ...
- is_cuda: _bool
- def is_distributed(self) -> _bool: ...
- def is_floating_point(self) -> _bool: ...
- def is_inference(self) -> _bool: ...
- is_ipu: _bool
- is_leaf: _bool
- is_meta: _bool
- is_mkldnn: _bool
- is_mps: _bool
- def is_neg(self) -> _bool: ...
- is_nested: _bool
- def is_nonzero(self) -> _bool: ...
- is_ort: _bool
- def is_pinned(self, device: Optional[Union[_device, str, None]]=None) -> _bool: ...
- is_quantized: _bool
- def is_same_size(self, other: Tensor) -> _bool: ...
- def is_set_to(self, tensor: Tensor) -> _bool: ...
- def is_signed(self) -> _bool: ...
- is_sparse: _bool
- is_sparse_csr: _bool
- is_vulkan: _bool
- def isclose(self, other: Tensor, rtol: _float=1e-05, atol: _float=1e-08, equal_nan: _bool=False) -> Tensor: ...
- def isfinite(self) -> Tensor: ...
- def isinf(self) -> Tensor: ...
- def isnan(self) -> Tensor: ...
- def isneginf(self) -> Tensor: ...
- def isposinf(self) -> Tensor: ...
- def isreal(self) -> Tensor: ...
- def istft(self, n_fft: _int, hop_length: Optional[_int]=None, win_length: Optional[_int]=None, window: Optional[Tensor]=None, center: _bool=True, normalized: _bool=False, onesided: Optional[_bool]=None, length: Optional[_int]=None, return_complex: _bool=False) -> Tensor: ...
- def item(self) -> Number: ...
- def kron(self, other: Tensor) -> Tensor: ...
- @overload
- def kthvalue(self, k: _int, dim: _int=-1, keepdim: _bool=False) -> torch.return_types.kthvalue: ...
- @overload
- def kthvalue(self, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.kthvalue: ...
- def lcm(self, other: Tensor) -> Tensor: ...
- def lcm_(self, other: Tensor) -> Tensor: ...
- def ldexp(self, other: Tensor) -> Tensor: ...
- def ldexp_(self, other: Tensor) -> Tensor: ...
- @overload
- def le(self, other: Tensor) -> Tensor: ...
- @overload
- def le(self, other: Number) -> Tensor: ...
- @overload
- def le_(self, other: Tensor) -> Tensor: ...
- @overload
- def le_(self, other: Number) -> Tensor: ...
- @overload
- def lerp(self, end: Tensor, weight: Tensor) -> Tensor: ...
- @overload
- def lerp(self, end: Tensor, weight: Number) -> Tensor: ...
- @overload
- def lerp_(self, end: Tensor, weight: Tensor) -> Tensor: ...
- @overload
- def lerp_(self, end: Tensor, weight: Number) -> Tensor: ...
- @overload
- def less(self, other: Tensor) -> Tensor: ...
- @overload
- def less(self, other: Number) -> Tensor: ...
- @overload
- def less_(self, other: Tensor) -> Tensor: ...
- @overload
- def less_(self, other: Number) -> Tensor: ...
- @overload
- def less_equal(self, other: Tensor) -> Tensor: ...
- @overload
- def less_equal(self, other: Number) -> Tensor: ...
- @overload
- def less_equal_(self, other: Tensor) -> Tensor: ...
- @overload
- def less_equal_(self, other: Number) -> Tensor: ...
- def lgamma(self) -> Tensor: ...
- def lgamma_(self) -> Tensor: ...
- def log(self) -> Tensor: ...
- def log10(self) -> Tensor: ...
- def log10_(self) -> Tensor: ...
- def log1p(self) -> Tensor: ...
- def log1p_(self) -> Tensor: ...
- def log2(self) -> Tensor: ...
- def log2_(self) -> Tensor: ...
- def log_(self) -> Tensor: ...
- def log_normal_(self, mean: _float=1, std: _float=2, *, generator: Optional[Generator]=None) -> Tensor: ...
- @overload
- def log_softmax(self, dim: _int, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def log_softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
- def logaddexp(self, other: Tensor) -> Tensor: ...
- def logaddexp2(self, other: Tensor) -> Tensor: ...
- @overload
- def logcumsumexp(self, dim: _int) -> Tensor: ...
- @overload
- def logcumsumexp(self, dim: Union[str, ellipsis, None]) -> Tensor: ...
- def logdet(self) -> Tensor: ...
- def logical_and(self, other: Tensor) -> Tensor: ...
- def logical_and_(self, other: Tensor) -> Tensor: ...
- def logical_not(self) -> Tensor: ...
- def logical_not_(self) -> Tensor: ...
- def logical_or(self, other: Tensor) -> Tensor: ...
- def logical_or_(self, other: Tensor) -> Tensor: ...
- def logical_xor(self, other: Tensor) -> Tensor: ...
- def logical_xor_(self, other: Tensor) -> Tensor: ...
- def logit(self, eps: Optional[_float]=None) -> Tensor: ...
- def logit_(self, eps: Optional[_float]=None) -> Tensor: ...
- @overload
- def logsumexp(self, dim: Union[_int, _size], keepdim: _bool=False) -> Tensor: ...
- @overload
- def logsumexp(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False) -> Tensor: ...
- def long(self) -> Tensor: ...
- @overload
- def lt(self, other: Tensor) -> Tensor: ...
- @overload
- def lt(self, other: Number) -> Tensor: ...
- @overload
- def lt_(self, other: Tensor) -> Tensor: ...
- @overload
- def lt_(self, other: Number) -> Tensor: ...
- def lu_solve(self, LU_data: Tensor, LU_pivots: Tensor) -> Tensor: ...
- def map2_(self, x: Tensor, y: Tensor, callable: Callable) -> Tensor: ...
- def map_(self, tensor: Tensor, callable: Callable) -> Tensor: ...
- @overload
- def masked_fill(self, mask: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def masked_fill(self, mask: Tensor, value: Number) -> Tensor: ...
- @overload
- def masked_fill_(self, mask: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def masked_fill_(self, mask: Tensor, value: Number) -> Tensor: ...
- def masked_scatter(self, mask: Tensor, source: Tensor) -> Tensor: ...
- def masked_scatter_(self, mask: Tensor, source: Tensor) -> Tensor: ...
- def masked_select(self, mask: Tensor) -> Tensor: ...
- def matmul(self, other: Tensor) -> Tensor: ...
- def matrix_exp(self) -> Tensor: ...
- def matrix_power(self, n: _int) -> Tensor: ...
- @overload
- def max(self) -> Tensor: ...
- @overload
- def max(self, other: Tensor) -> Tensor: ...
- @overload
- def max(self, dim: _int, keepdim: _bool=False) -> torch.return_types.max: ...
- @overload
- def max(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.max: ...
- def maximum(self, other: Tensor) -> Tensor: ...
- @overload
- def mean(self, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def mean(self, dim: Optional[Union[_int, _size]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def mean(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def median(self) -> Tensor: ...
- @overload
- def median(self, dim: _int, keepdim: _bool=False) -> torch.return_types.median: ...
- @overload
- def median(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.median: ...
- @overload
- def min(self) -> Tensor: ...
- @overload
- def min(self, other: Tensor) -> Tensor: ...
- @overload
- def min(self, dim: _int, keepdim: _bool=False) -> torch.return_types.min: ...
- @overload
- def min(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.min: ...
- def minimum(self, other: Tensor) -> Tensor: ...
- def mm(self, mat2: Tensor) -> Tensor: ...
- @overload
- def mode(self, dim: _int=-1, keepdim: _bool=False) -> torch.return_types.mode: ...
- @overload
- def mode(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.mode: ...
- @overload
- def moveaxis(self, source: _int, destination: _int) -> Tensor: ...
- @overload
- def moveaxis(self, source: _size, destination: _size) -> Tensor: ...
- @overload
- def movedim(self, source: _int, destination: _int) -> Tensor: ...
- @overload
- def movedim(self, source: _size, destination: _size) -> Tensor: ...
- def msort(self) -> Tensor: ...
- def mul(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor]=None) -> Tensor: ...
- def mul_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor: ...
- def multinomial(self, num_samples: _int, replacement: _bool=False, *, generator: Optional[Generator]=None) -> Tensor: ...
- @overload
- def multiply(self, other: Tensor) -> Tensor: ...
- @overload
- def multiply(self, other: Number) -> Tensor: ...
- @overload
- def multiply_(self, other: Tensor) -> Tensor: ...
- @overload
- def multiply_(self, other: Number) -> Tensor: ...
- def mv(self, vec: Tensor) -> Tensor: ...
- def mvlgamma(self, p: _int) -> Tensor: ...
- def mvlgamma_(self, p: _int) -> Tensor: ...
- def nan_to_num(self, nan: Optional[_float]=None, posinf: Optional[_float]=None, neginf: Optional[_float]=None) -> Tensor: ...
- def nan_to_num_(self, nan: Optional[_float]=None, posinf: Optional[_float]=None, neginf: Optional[_float]=None) -> Tensor: ...
- def nanmean(self, dim: Optional[Union[_int, _size]]=None, keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def nanmedian(self) -> Tensor: ...
- @overload
- def nanmedian(self, dim: _int, keepdim: _bool=False) -> torch.return_types.nanmedian: ...
- @overload
- def nanmedian(self, dim: Union[str, ellipsis, None], keepdim: _bool=False) -> torch.return_types.nanmedian: ...
- @overload
- def nanquantile(self, q: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear") -> Tensor: ...
- @overload
- def nanquantile(self, q: _float, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear") -> Tensor: ...
- def nansum(self, dim: Optional[Union[_int, _size]]=None, keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def narrow(self, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor: ...
- @overload
- def narrow(self, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: ...
- def narrow_copy(self, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: ...
- def ndimension(self) -> _int: ...
- @overload
- def ne(self, other: Tensor) -> Tensor: ...
- @overload
- def ne(self, other: Number) -> Tensor: ...
- @overload
- def ne_(self, other: Tensor) -> Tensor: ...
- @overload
- def ne_(self, other: Number) -> Tensor: ...
- def neg(self) -> Tensor: ...
- def neg_(self) -> Tensor: ...
- def negative(self) -> Tensor: ...
- def negative_(self) -> Tensor: ...
- def nelement(self) -> _int: ...
- @overload
- def new(self, *args: Any, device: Device=None) ->Tensor: ...
- @overload
- def new(self, storage: Storage) -> Tensor: ...
- @overload
- def new(self, other: Tensor) -> Tensor: ...
- @overload
- def new(self, size: _size, *, device: Device=None) -> Tensor: ...
- @overload
- def new_empty(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def new_empty(self, *size: _int, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def new_empty_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def new_full(self, size: Sequence[Union[_int, SymInt]], fill_value: Number, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def new_ones(self, size: _size, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def new_ones(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def new_ones(self, *size: _int, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def new_tensor(self, data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor: ...
- @overload
- def new_zeros(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- @overload
- def new_zeros(self, *size: _int, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
- def nextafter(self, other: Tensor) -> Tensor: ...
- def nextafter_(self, other: Tensor) -> Tensor: ...
- @overload
- def nonzero(self, *, as_tuple: Literal[False]=False) -> Tensor: ...
- @overload
- def nonzero(self, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: ...
- def normal_(self, mean: _float=0, std: _float=1, *, generator: Optional[Generator]=None) -> Tensor: ...
- @overload
- def not_equal(self, other: Tensor) -> Tensor: ...
- @overload
- def not_equal(self, other: Number) -> Tensor: ...
- @overload
- def not_equal_(self, other: Tensor) -> Tensor: ...
- @overload
- def not_equal_(self, other: Number) -> Tensor: ...
- def numel(self) -> _int: ...
- def numpy(self, *, force: _bool=False) -> Any: ...
- def orgqr(self, input2: Tensor) -> Tensor: ...
- def ormqr(self, input2: Tensor, input3: Tensor, left: _bool=True, transpose: _bool=False) -> Tensor: ...
- def outer(self, vec2: Tensor) -> Tensor: ...
- @overload
- def permute(self, dims: _size) -> Tensor: ...
- @overload
- def permute(self, *dims: _int) -> Tensor: ...
- def pin_memory(self, device: Optional[Union[_device, str, None]]=None) -> Tensor: ...
- def pinverse(self, rcond: _float=1e-15) -> Tensor: ...
- def polygamma(self, n: _int) -> Tensor: ...
- def polygamma_(self, n: _int) -> Tensor: ...
- def positive(self) -> Tensor: ...
- @overload
- def pow(self, exponent: Tensor) -> Tensor: ...
- @overload
- def pow(self, exponent: Number) -> Tensor: ...
- @overload
- def pow_(self, exponent: Tensor) -> Tensor: ...
- @overload
- def pow_(self, exponent: Number) -> Tensor: ...
- def prelu(self, weight: Tensor) -> Tensor: ...
- @overload
- def prod(self, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def prod(self, dim: _int, keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def prod(self, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- def put(self, index: Tensor, source: Tensor, accumulate: _bool=False) -> Tensor: ...
- def put_(self, index: Tensor, source: Tensor, accumulate: _bool=False) -> Tensor: ...
- def q_per_channel_axis(self) -> _int: ...
- def q_per_channel_scales(self) -> Tensor: ...
- def q_per_channel_zero_points(self) -> Tensor: ...
- def q_scale(self) -> _float: ...
- def q_zero_point(self) -> _int: ...
- def qr(self, some: _bool=True) -> torch.return_types.qr: ...
- def qscheme(self) -> _qscheme: ...
- @overload
- def quantile(self, q: Tensor, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear") -> Tensor: ...
- @overload
- def quantile(self, q: _float, dim: Optional[_int]=None, keepdim: _bool=False, *, interpolation: str="linear") -> Tensor: ...
- def rad2deg(self) -> Tensor: ...
- def rad2deg_(self) -> Tensor: ...
- @overload
- def random_(self, *, generator: Optional[Generator]=None) -> Tensor: ...
- @overload
- def random_(self, from_: _int, to: Optional[_int], *, generator: Optional[Generator]=None) -> Tensor: ...
- @overload
- def random_(self, to: _int, *, generator: Optional[Generator]=None) -> Tensor: ...
- def ravel(self) -> Tensor: ...
- def reciprocal(self) -> Tensor: ...
- def reciprocal_(self) -> Tensor: ...
- def record_stream(self, s: Stream) -> None: ...
- def refine_names(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
- def relu(self) -> Tensor: ...
- def relu_(self) -> Tensor: ...
- @overload
- def remainder(self, other: Tensor) -> Tensor: ...
- @overload
- def remainder(self, other: Number) -> Tensor: ...
- @overload
- def remainder_(self, other: Tensor) -> Tensor: ...
- @overload
- def remainder_(self, other: Number) -> Tensor: ...
- def rename(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ...
- def rename_(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ...
- def renorm(self, p: Number, dim: _int, maxnorm: Number) -> Tensor: ...
- def renorm_(self, p: Number, dim: _int, maxnorm: Number) -> Tensor: ...
- @overload
- def repeat(self, repeats: Sequence[Union[_int, SymInt]]) -> Tensor: ...
- @overload
- def repeat(self, *repeats: _int) -> Tensor: ...
- @overload
- def repeat_interleave(self, repeats: Tensor, dim: Optional[_int]=None, *, output_size: Optional[_int]=None) -> Tensor: ...
- @overload
- def repeat_interleave(self, repeats: Union[_int, SymInt], dim: Optional[_int]=None, *, output_size: Optional[_int]=None) -> Tensor: ...
- def requires_grad_(self, mode: _bool=True) -> Tensor: ...
- @overload
- def reshape(self, shape: Sequence[Union[_int, SymInt]]) -> Tensor: ...
- @overload
- def reshape(self, *shape: _int) -> Tensor: ...
- def reshape_as(self, other: Tensor) -> Tensor: ...
- @overload
- def resize_(self, size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None) -> Tensor: ...
- @overload
- def resize_(self, *size: _int, memory_format: Optional[memory_format]=None) -> Tensor: ...
- def resize_as_(self, the_template: Tensor, *, memory_format: Optional[memory_format]=None) -> Tensor: ...
- def resize_as_sparse_(self, the_template: Tensor) -> Tensor: ...
- def resolve_conj(self) -> Tensor: ...
- def resolve_neg(self) -> Tensor: ...
- def retain_grad(self) -> None: ...
- def roll(self, shifts: Union[_int, _size], dims: Union[_int, _size]=()) -> Tensor: ...
- def rot90(self, k: _int=1, dims: _size=(0,1)) -> Tensor: ...
- @overload
- def round(self) -> Tensor: ...
- @overload
- def round(self, *, decimals: _int) -> Tensor: ...
- @overload
- def round_(self) -> Tensor: ...
- @overload
- def round_(self, *, decimals: _int) -> Tensor: ...
- def row_indices(self) -> Tensor: ...
- def rsqrt(self) -> Tensor: ...
- def rsqrt_(self) -> Tensor: ...
- @overload
- def scatter(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ...
- @overload
- def scatter(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor: ...
- @overload
- def scatter(self, dim: _int, index: Tensor, value: Number, *, reduce: str) -> Tensor: ...
- @overload
- def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ...
- @overload
- def scatter(self, dim: _int, index: Tensor, value: Number) -> Tensor: ...
- @overload
- def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, value: Number) -> Tensor: ...
- @overload
- def scatter_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ...
- @overload
- def scatter_(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor: ...
- @overload
- def scatter_(self, dim: _int, index: Tensor, value: Number, *, reduce: str) -> Tensor: ...
- @overload
- def scatter_(self, dim: _int, index: Tensor, value: Number) -> Tensor: ...
- @overload
- def scatter_add(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ...
- @overload
- def scatter_add(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ...
- def scatter_add_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: ...
- def scatter_reduce(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool=True) -> Tensor: ...
- def scatter_reduce_(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool=True) -> Tensor: ...
- @overload
- def select(self, dim: _int, index: Union[_int, SymInt]) -> Tensor: ...
- @overload
- def select(self, dim: Union[str, ellipsis, None], index: _int) -> Tensor: ...
- def select_scatter(self, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: ...
- @overload
- def set_(self, storage: Union[Storage, TypedStorage], offset: _int, size: _size, stride: _size) -> Tensor: ...
- @overload
- def set_(self, storage: Union[Storage, TypedStorage]) -> Tensor: ...
- def sgn(self) -> Tensor: ...
- def sgn_(self) -> Tensor: ...
- def short(self) -> Tensor: ...
- def sigmoid(self) -> Tensor: ...
- def sigmoid_(self) -> Tensor: ...
- def sign(self) -> Tensor: ...
- def sign_(self) -> Tensor: ...
- def signbit(self) -> Tensor: ...
- def sin(self) -> Tensor: ...
- def sin_(self) -> Tensor: ...
- def sinc(self) -> Tensor: ...
- def sinc_(self) -> Tensor: ...
- def sinh(self) -> Tensor: ...
- def sinh_(self) -> Tensor: ...
- @overload
- def size(self) -> Size: ...
- @overload
- def size(self, dim: _int) -> _int: ...
- def slice_scatter(self, src: Tensor, dim: _int=0, start: Optional[Union[_int, SymInt]]=None, end: Optional[Union[_int, SymInt]]=None, step: Union[_int, SymInt]=1) -> Tensor: ...
- def slogdet(self) -> torch.return_types.slogdet: ...
- def smm(self, mat2: Tensor) -> Tensor: ...
- @overload
- def softmax(self, dim: _int, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def sort(self, *, stable: Optional[_bool], dim: _int=-1, descending: _bool=False) -> torch.return_types.sort: ...
- @overload
- def sort(self, dim: _int=-1, descending: _bool=False) -> torch.return_types.sort: ...
- @overload
- def sort(self, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool=False) -> torch.return_types.sort: ...
- @overload
- def sort(self, dim: Union[str, ellipsis, None], descending: _bool=False) -> torch.return_types.sort: ...
- def sparse_dim(self) -> _int: ...
- def sparse_mask(self, mask: Tensor) -> Tensor: ...
- def sparse_resize_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor: ...
- def sparse_resize_and_clear_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor: ...
- @overload
- def split(self, split_size: _int, dim: _int=0) -> Sequence[Tensor]: ...
- @overload
- def split(self, split_size: Tuple[_int, ...], dim: _int=0) -> Sequence[Tensor]: ...
- def split_with_sizes(self, split_sizes: Sequence[Union[_int, SymInt]], dim: _int=0) -> List[Tensor]: ...
- def sqrt(self) -> Tensor: ...
- def sqrt_(self) -> Tensor: ...
- def square(self) -> Tensor: ...
- def square_(self) -> Tensor: ...
- @overload
- def squeeze(self) -> Tensor: ...
- @overload
- def squeeze(self, dim: _int) -> Tensor: ...
- @overload
- def squeeze(self, dim: _size) -> Tensor: ...
- @overload
- def squeeze(self, *dim: _int) -> Tensor: ...
- @overload
- def squeeze(self, dim: Union[str, ellipsis, None]) -> Tensor: ...
- @overload
- def squeeze_(self) -> Tensor: ...
- @overload
- def squeeze_(self, dim: _int) -> Tensor: ...
- @overload
- def squeeze_(self, dim: _size) -> Tensor: ...
- @overload
- def squeeze_(self, *dim: _int) -> Tensor: ...
- @overload
- def squeeze_(self, dim: Union[str, ellipsis, None]) -> Tensor: ...
- def sspaddmm(self, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
- @overload
- def std(self, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False) -> Tensor: ...
- @overload
- def std(self, dim: Optional[Union[_int, _size]]=None, *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tensor: ...
- @overload
- def std(self, unbiased: _bool=True) -> Tensor: ...
- @overload
- def std(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False) -> Tensor: ...
- @overload
- def std(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tensor: ...
- def untyped_storage(self) -> Storage: ...
- def storage_offset(self) -> _int: ...
- def storage_type(self) -> Storage: ...
- @overload
- def stride(self) -> Tuple[_int, ...]: ...
- @overload
- def stride(self, _int) -> _int: ...
- def sub(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ...
- def sub_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, alpha: Optional[Number]=1) -> Tensor: ...
- @overload
- def subtract(self, other: Tensor, *, alpha: Number=1) -> Tensor: ...
- @overload
- def subtract(self, other: Number, alpha: Number=1) -> Tensor: ...
- @overload
- def subtract_(self, other: Tensor, *, alpha: Number=1) -> Tensor: ...
- @overload
- def subtract_(self, other: Number, alpha: Number=1) -> Tensor: ...
- @overload
- def sum(self, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def sum(self, dim: Optional[Union[_int, _size]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def sum(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None) -> Tensor: ...
- @overload
- def sum_to_size(self, size: _size) -> Tensor: ...
- @overload
- def sum_to_size(self, *size: _int) -> Tensor: ...
- def svd(self, some: _bool=True, compute_uv: _bool=True) -> torch.return_types.svd: ...
- def swapaxes(self, axis0: _int, axis1: _int) -> Tensor: ...
- def swapaxes_(self, axis0: _int, axis1: _int) -> Tensor: ...
- def swapdims(self, dim0: _int, dim1: _int) -> Tensor: ...
- def swapdims_(self, dim0: _int, dim1: _int) -> Tensor: ...
- def t(self) -> Tensor: ...
- def t_(self) -> Tensor: ...
- def take(self, index: Tensor) -> Tensor: ...
- def take_along_dim(self, indices: Tensor, dim: Optional[_int]=None) -> Tensor: ...
- def tan(self) -> Tensor: ...
- def tan_(self) -> Tensor: ...
- def tanh(self) -> Tensor: ...
- def tanh_(self) -> Tensor: ...
- @overload
- def tensor_split(self, indices: Sequence[Union[_int, SymInt]], dim: _int=0) -> List[Tensor]: ...
- @overload
- def tensor_split(self, tensor_indices_or_sections: Tensor, dim: _int=0) -> List[Tensor]: ...
- @overload
- def tensor_split(self, sections: Union[_int, SymInt], dim: _int=0) -> List[Tensor]: ...
- @overload
- def tile(self, dims: _size) -> Tensor: ...
- @overload
- def tile(self, *dims: _int) -> Tensor: ...
- @overload
- def to(self, dtype: _dtype, non_blocking: _bool=False, copy: _bool=False) -> Tensor: ...
- @overload
- def to(self, device: Optional[Union[_device, str]]=None, dtype: Optional[_dtype]=None, non_blocking: _bool=False, copy: _bool=False) -> Tensor: ...
- @overload
- def to(self, other: Tensor, non_blocking: _bool=False, copy: _bool=False) -> Tensor: ...
- def to_dense(self, dtype: Optional[_dtype]=None) -> Tensor: ...
- def to_mkldnn(self, dtype: Optional[_dtype]=None) -> Tensor: ...
- def to_padded_tensor(self, padding: _float, output_size: Optional[Sequence[Union[_int, SymInt]]]=None) -> Tensor: ...
- @overload
- def to_sparse(self, *, layout: Optional[_layout]=None, blocksize: Optional[Union[_int, _size]]=None, dense_dim: Optional[_int]=None) -> Tensor: ...
- @overload
- def to_sparse(self, sparse_dim: _int) -> Tensor: ...
- def to_sparse_bsc(self, blocksize: Union[_int, _size], dense_dim: Optional[_int]=None) -> Tensor: ...
- def to_sparse_bsr(self, blocksize: Union[_int, _size], dense_dim: Optional[_int]=None) -> Tensor: ...
- def to_sparse_csc(self, dense_dim: Optional[_int]=None) -> Tensor: ...
- def to_sparse_csr(self, dense_dim: Optional[_int]=None) -> Tensor: ...
- def tolist(self) -> List: ...
- def topk(self, k: _int, dim: _int=-1, largest: _bool=True, sorted: _bool=True) -> torch.return_types.topk: ...
- def trace(self) -> Tensor: ...
- @overload
- def transpose(self, dim0: _int, dim1: _int) -> Tensor: ...
- @overload
- def transpose(self, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor: ...
- def transpose_(self, dim0: _int, dim1: _int) -> Tensor: ...
- def triangular_solve(self, A: Tensor, upper: _bool=True, transpose: _bool=False, unitriangular: _bool=False) -> torch.return_types.triangular_solve: ...
- def tril(self, diagonal: _int=0) -> Tensor: ...
- def tril_(self, diagonal: _int=0) -> Tensor: ...
- def triu(self, diagonal: _int=0) -> Tensor: ...
- def triu_(self, diagonal: _int=0) -> Tensor: ...
- def true_divide(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor]=None) -> Tensor: ...
- def true_divide_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor: ...
- def trunc(self) -> Tensor: ...
- def trunc_(self) -> Tensor: ...
- @overload
- def type(self, dtype: None=None, non_blocking: _bool=False) -> str: ...
- @overload
- def type(self, dtype: Union[str, _dtype], non_blocking: _bool=False) -> Tensor: ...
- def type_as(self, other: Tensor) -> Tensor: ...
- @overload
- def unbind(self, dim: _int=0) -> List[Tensor]: ...
- @overload
- def unbind(self, dim: Union[str, ellipsis, None]) -> List[Tensor]: ...
- @overload
- def unflatten(self, dim: Union[str, ellipsis, None], sizes: _size, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
- @overload
- def unflatten(self, dim: _int, sizes: _size) -> Tensor: ...
- def unfold(self, dimension: _int, size: _int, step: _int) -> Tensor: ...
- def uniform_(self, from_: _float=0, to: _float=1, *, generator: Optional[Generator]=None) -> Tensor: ...
- def unsafe_chunk(self, chunks: _int, dim: _int=0) -> List[Tensor]: ...
- def unsafe_split(self, split_size: Union[_int, SymInt], dim: _int=0) -> List[Tensor]: ...
- def unsafe_split_with_sizes(self, split_sizes: Sequence[Union[_int, SymInt]], dim: _int=0) -> List[Tensor]: ...
- def unsqueeze(self, dim: _int) -> Tensor: ...
- def unsqueeze_(self, dim: _int) -> Tensor: ...
- def values(self) -> Tensor: ...
- @overload
- def var(self, dim: Optional[Union[_int, _size]], unbiased: _bool=True, keepdim: _bool=False) -> Tensor: ...
- @overload
- def var(self, dim: Optional[Union[_int, _size]]=None, *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tensor: ...
- @overload
- def var(self, unbiased: _bool=True) -> Tensor: ...
- @overload
- def var(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool=True, keepdim: _bool=False) -> Tensor: ...
- @overload
- def var(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[_int]=None, keepdim: _bool=False) -> Tensor: ...
- def vdot(self, other: Tensor) -> Tensor: ...
- @overload
- def view(self, dtype: _dtype) -> Tensor: ...
- @overload
- def view(self, size: Sequence[Union[_int, SymInt]]) -> Tensor: ...
- @overload
- def view(self, *size: _int) -> Tensor: ...
- def view_as(self, other: Tensor) -> Tensor: ...
- @overload
- def vsplit(self, sections: _int) -> List[Tensor]: ...
- @overload
- def vsplit(self, indices: _size) -> List[Tensor]: ...
- @overload
- def vsplit(self, *indices: _int) -> List[Tensor]: ...
- @overload
- def where(self, condition: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def where(self, condition: Tensor, other: Number) -> Tensor: ...
- @overload
- def xlogy(self, other: Tensor) -> Tensor: ...
- @overload
- def xlogy(self, other: Number) -> Tensor: ...
- @overload
- def xlogy_(self, other: Tensor) -> Tensor: ...
- @overload
- def xlogy_(self, other: Number) -> Tensor: ...
- def zero_(self) -> Tensor: ...
- # Defined in torch/csrc/multiprocessing/init.cpp
- def _multiprocessing_init() -> None: ...
- # Defined in torch/csrc/mps/Module.cpp
- def _mps_synchronize() -> None: ...
- def _mps_get_default_generator() -> Generator: ...
- def _mps_emptyCache() -> None: ...
- def _mps_setMemoryFraction(fraction: _float) -> None: ...
- def _mps_currentAllocatedMemory() -> _int: ...
- def _mps_driverAllocatedMemory() -> _int: ...
- def _mps_is_available() -> _bool: ...
- def _mps_is_on_macos_13_or_newer() -> _bool: ...
- # Defined in torch/csrc/cuda/Module.cpp
- def _cuda_getCurrentStream(device: _int) -> Tuple: ...
- def _cuda_getCurrentRawStream(device: _int) -> _int: ...
- def _cuda_getDefaultStream(device: _int) -> Tuple: ...
- def _cuda_getCurrentBlasHandle() -> _int: ...
- def _cuda_clearCublasWorkspaces() -> None: ...
- def _cuda_setDevice(device: _int) -> None: ...
- def _cuda_exchangeDevice(device: _int) -> _int: ...
- def _cuda_maybeExchangeDevice(device: _int) -> _int: ...
- def _cuda_getDevice() -> _int: ...
- def _cuda_getDeviceCount() -> _int: ...
- def _cuda_set_sync_debug_mode(warn_level: Union[_int, str]) -> None: ...
- def _cuda_get_sync_debug_mode() -> _int: ...
- def _cuda_sleep(cycles: _int) -> None: ...
- def _cuda_synchronize() -> None: ...
- def _cuda_ipc_collect() -> None: ...
- def _cuda_getArchFlags() -> Optional[str]: ...
- def _cuda_init() -> None: ...
- def _cuda_setStream(stream_id: _int, device_index: _int, device_type: _int) -> None: ...
- def _cuda_getCompiledVersion() -> _int: ...
- def _cuda_cudaHostAllocator() -> _int: ...
- def _cuda_cudaCachingAllocator_raw_alloc(size: _int, cuda_stream: _int) -> _int: ...
- def _cuda_cudaCachingAllocator_raw_delete(ptr: _int) -> None: ...
- def _cuda_cudaCachingAllocator_set_allocator_settings(env: str) -> None: ...
- def _cuda_setMemoryFraction(fraction: _float, device: _int) -> None: ...
- def _cuda_emptyCache() -> None: ...
- def _cuda_memoryStats(device: _int) -> Dict[str, Any]: ...
- def _cuda_resetAccumulatedMemoryStats(device: _int) -> None: ...
- def _cuda_resetPeakMemoryStats(device: _int) -> None: ...
- def _cuda_memorySnapshot() -> Dict[str, Any]: ...
- def _cuda_recordMemoryHistory(enabled: _bool, record_context: _bool, record_context_cpp: _bool, alloc_trace_max_entries: _int, alloc_trace_record_context: _bool) -> None: ...
- def _cuda_getAllocatorBackend() -> str: ...
- class _cuda_CUDAAllocator:
- ...
- def _cuda_customAllocator(alloc_fn: _int, free_fn: _int) -> _cuda_CUDAAllocator: ...
- def _cuda_changeCurrentAllocator(allocator: _cuda_CUDAAllocator) -> None: ...
- def _cuda_getAllocator() -> _cuda_CUDAAllocator: ...
- def _cuda_lock_mutex() -> None: ...
- def _cuda_unlock_mutex() -> None: ...
- def _cuda_canDeviceAccessPeer(device: _int, peer_device: _int) -> _bool: ...
- def _cuda_jiterator_compile_and_launch_kernel(code_string: str,
- kernel_name: str,
- return_by_ref: _bool,
- num_outputs: _int,
- tensors: Tuple,
- kwargs: Dict[str, Union[_int, _float, _bool]]) -> Tensor: ...
- def _cuda_get_cudnn_benchmark_limit() -> _int: ...
- def _cuda_set_cudnn_benchmark_limit(arg: _int) -> None: ...
- def _nccl_version() -> _int: ...
- def _nccl_unique_id() -> bytes: ...
- def _nccl_init_rank(nranks: _int, comm_id: bytes, rank: _int) -> object: ...
- def _nccl_reduce(input: Sequence[Tensor],
- output: Tensor,
- root: _int,
- op: _int,
- streams: Optional[Sequence[_CudaStreamBase]],
- comms: Optional[Sequence[object]]) -> None: ...
- def _nccl_all_reduce(input: Sequence[Tensor],
- output: Sequence[Tensor],
- op: _int,
- streams: Optional[Sequence[_CudaStreamBase]],
- comms: Optional[Sequence[object]]) -> None: ...
- def _nccl_broadcast(input: Sequence[Tensor],
- root: _int,
- streams: Optional[Sequence[_CudaStreamBase]],
- comms: Optional[Sequence[object]]) -> None: ...
- def _nccl_all_gather(input: Sequence[Tensor],
- output: Sequence[Tensor],
- streams: Optional[Sequence[_CudaStreamBase]],
- comms: Optional[Sequence[object]]) -> None: ...
- def _nccl_reduce_scatter(input: Sequence[Tensor],
- output: Sequence[Tensor],
- op: _int,
- streams: Optional[Sequence[_CudaStreamBase]],
- comms: Optional[Sequence[object]]) -> None: ...
- def _rocm_is_backward_pass() -> _bool: ...
- class _CudaDeviceProperties:
- name: str
- major: _int
- minor: _int
- multi_processor_count: _int
- total_memory: _int
- is_integrated: _int
- is_multi_gpu_board: _int
- # Defined in torch/csrc/cuda/python_comm.cpp
- def _broadcast(tensor: Tensor, devices: List[_int]) -> List[Tensor]: ...
- def _broadcast_out(tensor: Tensor, out_tensors: List[Tensor]) -> List[Tensor]: ...
- def _broadcast_coalesced(
- tensors: List[Tensor],
- devices: List[_int],
- buffer_size: _int
- ) -> List[List[Tensor]]: ...
- def _scatter(tensor: Tensor, devices: List[_int], chunk_sizes: Optional[List[_int]], dim: _int, streams: Optional[List[Stream]]) -> List[Tensor]: ...
- def _scatter_out(tensor: Tensor, out_tensors: List[Tensor], dim: _int, streams: Optional[List[Stream]]) -> List[Tensor]: ...
- def _gather(tensors: List[Tensor], dim: _int, destination_index: Optional[_int]) -> Tensor: ...
- def _gather_out(tensors: List[Tensor], out_tensor: Tensor, dim: _int) -> Tensor: ...
- # Defined in torch/csrc/cuda/Stream.cpp
- class _CudaStreamBase:
- stream_id: _int
- device_index: _int
- device_type: _int
- device: _device
- cuda_stream: _int
- priority: _int
- def __new__(self, priority: _int = 0, stream_id: _int = 0, device_index: _int = 0, stream_ptr: _int = 0) -> _CudaStreamBase: ...
- def query(self) -> _bool: ...
- def synchronize(self) -> None: ...
- def priority_range(self) -> Tuple[_int, _int]: ...
- # Defined in torch/csrc/cuda/Event.cpp
- class _CudaEventBase:
- device: _device
- cuda_event: _int
- def __new__(cls, enable_timing: _bool = False, blocking: _bool = False, interprocess: _bool = False) -> _CudaEventBase: ...
- @classmethod
- def from_ipc_handle(cls, device: _device, ipc_handle: bytes) -> _CudaEventBase: ...
- def record(self, stream: _CudaStreamBase) -> None: ...
- def wait(self, stream: _CudaStreamBase) -> None: ...
- def query(self) -> _bool: ...
- def elapsed_time(self, other: _CudaEventBase) -> _float: ...
- def synchronize(self) -> None: ...
- def ipc_handle(self) -> bytes: ...
- # Defined in torch/csrc/cuda/Graph.cpp
- class _CUDAGraph:
- def capture_begin(self,
- pool: Optional[Tuple[_int, _int]]=...) -> None: ...
- def capture_end(self) -> None: ...
- def replay(self) -> None: ...
- def reset(self) -> None: ...
- def pool(self) -> Tuple[_int, _int]: ...
- def enable_debug_mode(self) -> None: ...
- def debug_dump(self,
- debug_path: str) -> None: ...
- def _cuda_isCurrentStreamCapturing() -> _bool: ...
- def _graph_pool_handle() -> Tuple[_int, _int]: ...
- # Defined in torch/csrc/DataLoader.cpp
- def _set_worker_signal_handlers(*arg: Any) -> None: ... # THPModule_setWorkerSignalHandlers
- def _set_worker_pids(key: _int, child_pids: Tuple[_int, ...]) -> None: ... # THPModule_setWorkerPIDs
- def _remove_worker_pids(loader_id: _int) -> None: ... # THPModule_removeWorkerPIDs
- def _error_if_any_worker_fails() -> None: ... # THPModule_errorIfAnyWorkerFails
- # Defined in torch/csrc/jit/python/python_tracer.cpp
- class TracingState:
- def push_scope(self, scope_name: str) -> None: ...
- def pop_scope(self) -> None: ...
- def current_scope(self) -> str: ...
- def set_graph(self, graph: Graph) -> None: ...
- def graph(self) -> Graph: ...
- ...
- def _create_graph_by_tracing(
- func: Callable[..., Any],
- inputs: Any,
- var_name_lookup_fn: Callable[[Tensor], str],
- strict: Any,
- force_outplace: Any,
- self: Any = None,
- argument_names: List[str] = []
- ) -> Tuple[Graph, Stack]: ...
- def _tracer_warn_use_python(): ...
- def _get_tracing_state() -> TracingState: ...
- # Defined in torch/csrc/jit/python/python_ir.cpp
- # Not actually defined in python_ir.cpp, not sure where they are.
- class IValue:
- ...
- Stack = List[IValue]
- class JitType:
- annotation_str : str
- def isSubtypeOf(self, other: JitType) -> _bool: ...
- def with_dtype(self, dtype: _dtype) -> JitType: ...
- def with_sizes(self, sizes: List[Optional[_int]]) -> JitType: ...
- def kind(self) -> str: ...
- def scalarType(self) -> Optional[str]: ...
- def getElementType(self) -> JitType: ...
- def dtype(self) -> Optional[_dtype]: ...
- class InferredType:
- def __init__(self, arg: Union[JitType, str]): ...
- def type(self) -> JitType: ...
- def success(self) -> _bool: ...
- def reason(self) -> str: ...
- R = TypeVar('R', bound=JitType)
- class AnyType(JitType):
- @staticmethod
- def get() -> AnyType: ...
- class NoneType(JitType):
- @staticmethod
- def get() -> NoneType: ...
- class BoolType(JitType):
- @staticmethod
- def get() -> BoolType: ...
- class FloatType(JitType):
- @staticmethod
- def get() -> FloatType: ...
- class ComplexType(JitType):
- @staticmethod
- def get() -> ComplexType: ...
- class IntType(JitType):
- @staticmethod
- def get() -> IntType: ...
- class NumberType(JitType):
- @staticmethod
- def get() -> NumberType: ...
- class StringType(JitType):
- @staticmethod
- def get() -> StringType: ...
- class DeviceObjType(JitType):
- @staticmethod
- def get() -> DeviceObjType: ...
- class StreamObjType(JitType):
- @staticmethod
- def get() -> StreamObjType: ...
- class ListType(JitType):
- def __init__(self, a: JitType) -> None: ...
- def getElementType(self) -> JitType: ...
- @staticmethod
- def ofInts() -> ListType: ...
- @staticmethod
- def ofTensors() -> ListType: ...
- @staticmethod
- def ofFloats() -> ListType: ...
- @staticmethod
- def ofComplexDoubles() -> ListType: ...
- @staticmethod
- def ofBools() -> ListType: ...
- @staticmethod
- def ofStrings() -> ListType: ...
- class DictType(JitType):
- def __init__(self, key: JitType, value: JitType) -> None: ...
- def getKeyType(self) -> JitType: ...
- def getValueType(self) -> JitType: ...
- class TupleType(JitType):
- def __init__(self, a: List[Optional[JitType]]) -> None: ...
- def elements(self) -> List[JitType]: ...
- class UnionType(JitType):
- def __init__(self, a: List[JitType]) -> None: ...
- class ClassType(JitType):
- def __init__(self, qualified_name: str) -> None: ...
- class InterfaceType(JitType):
- def __init__(self, qualified_name: str) -> None: ...
- def getMethod(self, name: str) -> Optional[FunctionSchema]: ...
- def getMethodNames(self) -> List[str]: ...
- class OptionalType(JitType, Generic[R]):
- def __init__(self, a: JitType) -> None: ...
- def getElementType(self) -> JitType: ...
- @staticmethod
- def ofTensor() -> OptionalType: ...
- class FutureType(JitType):
- def __init__(self, a: JitType) -> None: ...
- def getElementType(self) -> JitType: ...
- class AwaitType(JitType):
- def __init__(self, a: JitType) -> None: ...
- def getElementType(self) -> JitType: ...
- class RRefType(JitType):
- def __init__(self, a: JitType) -> None: ...
- class EnumType(JitType):
- def __init__(
- self,
- qualified_name: str,
- value_type: JitType,
- enum_names_values: List[Any]
- ) -> None:
- ...
- class TensorType(JitType):
- @classmethod
- def get(cls) -> TensorType: ...
- @classmethod
- def getInferred(cls) -> TensorType: ...
- def with_sizes(self, other: Optional[List[Optional[_int]]]) -> TensorType: ...
- def sizes(self) -> Optional[List[_int]]: ...
- def varyingSizes(self) -> Optional[List[Optional[_int]]]: ...
- def strides(self) -> Optional[List[_int]]: ...
- def device(self) -> Optional[_device]: ...
- def dim(self) -> _int: ...
- def dtype(self) -> Optional[_dtype]: ...
- @staticmethod
- def create_from_tensor(t: Tensor) -> TensorType: ...
- # Defined in torch/csrc/jit/python/python_tree_views.cpp
- class SourceRange:
- ...
- class TreeView:
- ...
- class Ident(TreeView):
- @property
- def name(self) -> str: ...
- class ClassDef(TreeView):
- ...
- class Def(TreeView):
- def name(self) -> Ident: ...
- class Decl(TreeView):
- ...
- # Defined in torch/csrc/distributed/rpc/init.cpp
- def _rpc_init() -> _bool: ...
- # Defined in torch/csrc/distributed/autograd/init.cpp
- def _dist_autograd_init() -> _bool: ...
- # Defined in torch/csrc/distributed/c10d/init.cpp
- def _c10d_init() -> _bool: ...
- # Defined in torch/csrc/distributed/rpc/testing/init.cpp
- def _faulty_agent_init() -> _bool: ...
- def _enable_minidumps(directory: str) -> None: ...
- def _disable_minidumps() -> None: ...
- def _enable_minidumps_on_exceptions() -> None: ...
- def _register_py_class_for_device(device: str, cls: Any) -> None: ...
- def _activate_cuda_trace() -> None: ...
- # Defined in torch/csrc/Module.cpp
- def _current_graph_task_id() -> _int: ...
- def _current_autograd_node() -> _Node: ...
- class _OutOfMemoryError:
- pass
- class _DistBackendError(RuntimeError):
- pass
|